source
stringlengths
3
86
python
stringlengths
75
1.04M
topology_test.py
import re import time import pytest import logging from threading import Thread from cassandra import ConsistencyLevel from ccmlib.node import TimeoutError, ToolError from dtest import Tester, create_ks, create_cf, mk_bman_path from tools.assertions import assert_almost_equal, assert_all, assert_none from tools.data import insert_c1c2, query_c1c2 since = pytest.mark.since logger = logging.getLogger(__name__) class TestTopology(Tester): def test_do_not_join_ring(self): """ @jira_ticket CASSANDRA-9034 Check that AssertionError is not thrown on SizeEstimatesRecorder before node joins ring """ cluster = self.cluster.populate(1) node1, = cluster.nodelist() node1.start(wait_for_binary_proto=True, join_ring=False, jvm_args=["-Dcassandra.size_recorder_interval=1"]) # initial delay is 30s time.sleep(40) node1.stop(gently=False) @since('3.0.11') def test_size_estimates_multidc(self): """ Test that primary ranges are correctly generated on system.size_estimates for multi-dc, multi-ks scenario @jira_ticket CASSANDRA-9639 """ logger.debug("Creating cluster") cluster = self.cluster cluster.set_configuration_options(values={'num_tokens': 2}) cluster.populate([2, 1]) node1_1, node1_2, node2_1 = cluster.nodelist() logger.debug("Setting tokens") node1_tokens, node2_tokens, node3_tokens = ['-6639341390736545756,-2688160409776496397', '-2506475074448728501,8473270337963525440', '-3736333188524231709,8673615181726552074'] node1_1.set_configuration_options(values={'initial_token': node1_tokens}) node1_2.set_configuration_options(values={'initial_token': node2_tokens}) node2_1.set_configuration_options(values={'initial_token': node3_tokens}) cluster.set_configuration_options(values={'num_tokens': 2}) logger.debug("Starting cluster") cluster.start() out, _, _ = node1_1.nodetool('ring') logger.debug("Nodetool ring output {}".format(out)) logger.debug("Creating keyspaces") session = self.patient_cql_connection(node1_1) create_ks(session, 'ks1', 3) create_ks(session, 'ks2', {'dc1': 2}) create_cf(session, 'ks1.cf1', columns={'c1': 'text', 'c2': 'text'}) create_cf(session, 'ks2.cf2', columns={'c1': 'text', 'c2': 'text'}) logger.debug("Refreshing size estimates") node1_1.nodetool('refreshsizeestimates') node1_2.nodetool('refreshsizeestimates') node2_1.nodetool('refreshsizeestimates') """ CREATE KEYSPACE ks1 WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'} CREATE KEYSPACE ks2 WITH replication = {'class': 'NetworkTopologyStrategy', 'dc1': '2'} AND durable_writes = true; Datacenter: dc1 ========== Address Token 8473270337963525440 127.0.0.1 -6639341390736545756 127.0.0.1 -2688160409776496397 127.0.0.2 -2506475074448728501 127.0.0.2 8473270337963525440 Datacenter: dc2 ========== Address Token 8673615181726552074 127.0.0.3 -3736333188524231709 127.0.0.3 8673615181726552074 """ logger.debug("Checking node1_1 size_estimates primary ranges") session = self.patient_exclusive_cql_connection(node1_1) assert_all(session, "SELECT range_start, range_end FROM system.size_estimates " "WHERE keyspace_name = 'ks1'", [['-3736333188524231709', '-2688160409776496397'], ['-9223372036854775808', '-6639341390736545756'], ['8673615181726552074', '-9223372036854775808']]) assert_all(session, "SELECT range_start, range_end FROM system.size_estimates " "WHERE keyspace_name = 'ks2'", [['-3736333188524231709', '-2688160409776496397'], ['-6639341390736545756', '-3736333188524231709'], ['-9223372036854775808', '-6639341390736545756'], ['8473270337963525440', '8673615181726552074'], ['8673615181726552074', '-9223372036854775808']]) logger.debug("Checking node1_2 size_estimates primary ranges") session = self.patient_exclusive_cql_connection(node1_2) assert_all(session, "SELECT range_start, range_end FROM system.size_estimates " "WHERE keyspace_name = 'ks1'", [['-2506475074448728501', '8473270337963525440'], ['-2688160409776496397', '-2506475074448728501']]) assert_all(session, "SELECT range_start, range_end FROM system.size_estimates " "WHERE keyspace_name = 'ks2'", [['-2506475074448728501', '8473270337963525440'], ['-2688160409776496397', '-2506475074448728501']]) logger.debug("Checking node2_1 size_estimates primary ranges") session = self.patient_exclusive_cql_connection(node2_1) assert_all(session, "SELECT range_start, range_end FROM system.size_estimates " "WHERE keyspace_name = 'ks1'", [['-6639341390736545756', '-3736333188524231709'], ['8473270337963525440', '8673615181726552074']]) assert_none(session, "SELECT range_start, range_end FROM system.size_estimates " "WHERE keyspace_name = 'ks2'") def test_simple_removenode(self): """ test removenode """ cluster = self.cluster cluster.populate(3) cluster.start(jvm_args=["-Dcassandra.size_recorder_interval=1"]) node1, node2, node3 = cluster.nodelist() node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8', '-schema', 'replication(factor=2)']) node3_id = node3.nodetool('info').stdout[25:61] node3.stop(wait_other_notice=True) node1.nodetool('removenode ' + node3_id) node1.stress(['read', 'n=10K', 'no-warmup', '-rate', 'threads=8']) def test_simple_decommission(self): """ @jira_ticket CASSANDRA-9912 Check that AssertionError is not thrown on SizeEstimatesRecorder after node is decommissioned """ cluster = self.cluster cluster.populate(3) cluster.start(jvm_args=["-Dcassandra.size_recorder_interval=1"]) node1, node2, node3 = cluster.nodelist() session = self.patient_cql_connection(node1) if cluster.version() >= '2.2': # reduce system_distributed RF to 2 so we don't require forceful decommission session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'2'};") # write some data node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8']) # Decommission node and wipe its data node2.decommission() node2.stop() # This sleep is here to give the cluster time to hit the AssertionError # described in 9912. Do not remove it. time.sleep(10) @pytest.mark.skip(reason='Hangs on CI for 2.1') def test_concurrent_decommission_not_allowed(self): """ Test concurrent decommission is not allowed """ cluster = self.cluster cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1}) cluster.populate(2).start() node1, node2 = cluster.nodelist() session = self.patient_cql_connection(node2) create_ks(session, 'ks', 1) create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL) mark = node2.mark_log() def decommission(): node2.nodetool('decommission') # Launch first decommission in a external thread t = Thread(target=decommission) t.start() # Make sure first decommission is initialized before second decommission node2.watch_log_for('DECOMMISSIONING', filename='debug.log') # Launch a second decommission, should fail with pytest.raises(ToolError): node2.nodetool('decommission') # Check data is correctly forwarded to node1 after node2 is decommissioned t.join() node2.watch_log_for('DECOMMISSIONED', from_mark=mark) session = self.patient_cql_connection(node1) session.execute('USE ks') for n in range(0, 10000): query_c1c2(session, n, ConsistencyLevel.ONE) @since('3.10') def test_resumable_decommission(self): """ @jira_ticket CASSANDRA-12008 Test decommission operation is resumable """ self.fixture_dtest_setup.ignore_log_patterns = [r'Streaming error occurred', r'Error while decommissioning node', r'Remote peer 127.0.0.2 failed stream session', r'Remote peer 127.0.0.2:7000 failed stream session'] cluster = self.cluster cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1}) cluster.populate(3, install_byteman=True).start() node1, node2, node3 = cluster.nodelist() session = self.patient_cql_connection(node2) # reduce system_distributed RF to 2 so we don't require forceful decommission session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'2'};") create_ks(session, 'ks', 2) create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL) # Execute first rebuild, should fail with pytest.raises(ToolError): if cluster.version() >= '4.0': script = [mk_bman_path('4.0/decommission_failure_inject.btm')] else: script = [mk_bman_path('pre4.0/decommission_failure_inject.btm')] node2.byteman_submit(script) node2.nodetool('decommission') # Make sure previous ToolError is due to decommission node2.watch_log_for('Error while decommissioning node') # Decommission again mark = node2.mark_log() node2.nodetool('decommission') # Check decommision is done and we skipped transfereed ranges node2.watch_log_for('DECOMMISSIONED', from_mark=mark) node2.grep_log("Skipping transferred range .* of keyspace ks, endpoint {}".format(node2.address_for_current_version_slashy()), filename='debug.log') # Check data is correctly forwarded to node1 and node3 cluster.remove(node2) node3.stop(gently=False) session = self.patient_exclusive_cql_connection(node1) session.execute('USE ks') for i in range(0, 10000): query_c1c2(session, i, ConsistencyLevel.ONE) node1.stop(gently=False) node3.start() session.shutdown() mark = node3.mark_log() node3.watch_log_for('Starting listening for CQL clients', from_mark=mark) session = self.patient_exclusive_cql_connection(node3) session.execute('USE ks') for i in range(0, 10000): query_c1c2(session, i, ConsistencyLevel.ONE) @pytest.mark.no_vnodes def test_movement(self): cluster = self.cluster # Create an unbalanced ring cluster.populate(3, tokens=[0, 2**48, 2**62]).start() node1, node2, node3 = cluster.nodelist() session = self.patient_cql_connection(node1) create_ks(session, 'ks', 1) create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) insert_c1c2(session, n=30000, consistency=ConsistencyLevel.ONE) cluster.flush() # Move nodes to balance the cluster def move_node(node, token): mark = node.mark_log() node.move(token) # can't assume 0 is balanced with m3p node.watch_log_for('{} state jump to NORMAL'.format(node.address_for_current_version()), from_mark=mark, timeout=180) time.sleep(3) balancing_tokens = cluster.balanced_tokens(3) move_node(node1, balancing_tokens[0]) move_node(node2, balancing_tokens[1]) move_node(node3, balancing_tokens[2]) time.sleep(1) cluster.cleanup() for node in cluster.nodelist(): # after moving nodes we need to relocate any tokens in the wrong places, and after doing that # we might have overlapping tokens on the disks, so run a major compaction to get balance even if cluster.version() >= '3.2': node.nodetool("relocatesstables") node.nodetool("compact") # Check we can get all the keys for n in range(0, 30000): query_c1c2(session, n, ConsistencyLevel.ONE) # Now the load should be basically even sizes = [node.data_size() for node in [node1, node2, node3]] assert_almost_equal(sizes[0], sizes[1], error=0.05) assert_almost_equal(sizes[0], sizes[2], error=0.05) assert_almost_equal(sizes[1], sizes[2], error=0.05) @pytest.mark.no_vnodes def test_decommission(self): cluster = self.cluster tokens = cluster.balanced_tokens(4) cluster.populate(4, tokens=tokens).start() node1, node2, node3, node4 = cluster.nodelist() session = self.patient_cql_connection(node1) create_ks(session, 'ks', 2) create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) insert_c1c2(session, n=30000, consistency=ConsistencyLevel.QUORUM) cluster.flush() sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()] init_size = sizes[0] assert_almost_equal(*sizes) time.sleep(.5) node4.decommission() node4.stop() cluster.cleanup() time.sleep(.5) # Check we can get all the keys for n in range(0, 30000): query_c1c2(session, n, ConsistencyLevel.QUORUM) sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()] logger.debug(sizes) assert_almost_equal(sizes[0], sizes[1]) assert_almost_equal((2.0 / 3.0) * sizes[0], sizes[2]) assert_almost_equal(sizes[2], init_size) @pytest.mark.no_vnodes def test_move_single_node(self): """ Test moving a node in a single-node cluster (#4200) """ cluster = self.cluster # Create an unbalanced ring cluster.populate(1, tokens=[0]).start() node1 = cluster.nodelist()[0] time.sleep(0.2) session = self.patient_cql_connection(node1) create_ks(session, 'ks', 1) create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'}) insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ONE) cluster.flush() node1.move(2**25) time.sleep(1) cluster.cleanup() # Check we can get all the keys for n in range(0, 10000): query_c1c2(session, n, ConsistencyLevel.ONE) @since('3.0') def test_decommissioned_node_cant_rejoin(self): """ @jira_ticket CASSANDRA-8801 Test that a decommissioned node can't rejoin the cluster by: - creating a cluster, - decommissioning a node, and - asserting that the "decommissioned node won't rejoin" error is in the logs for that node and - asserting that the node is not running. """ rejoin_err = 'This node was decommissioned and will not rejoin the ring' self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [ rejoin_err] self.cluster.populate(3).start() node1, node2, node3 = self.cluster.nodelist() logger.debug('decommissioning...') node3.decommission(force=self.cluster.version() >= '4.0') logger.debug('stopping...') node3.stop() logger.debug('attempting restart...') node3.start(wait_other_notice=False) try: # usually takes 3 seconds, so give it a generous 15 node3.watch_log_for(rejoin_err, timeout=15) except TimeoutError: # TimeoutError is not very helpful to the reader of the test output; # let that pass and move on to string assertion below pass assert re.search(rejoin_err, '\n'.join(['\n'.join(err_list) for err_list in node3.grep_log_for_errors()]), re.MULTILINE) # Give the node some time to shut down once it has detected # its invalid state. If it doesn't shut down in the 30 seconds, # consider filing a bug. It shouldn't take more than 10, in most cases. start = time.time() while start + 30 > time.time() and node3.is_running(): time.sleep(1) assert not node3.is_running() @since('3.0') def test_crash_during_decommission(self): """ If a node crashes whilst another node is being decommissioned, upon restarting the crashed node should not have invalid entries for the decommissioned node @jira_ticket CASSANDRA-10231 """ cluster = self.cluster self.fixture_dtest_setup.ignore_log_patterns = [r'Streaming error occurred', 'Stream failed'] cluster.populate(3).start() node1, node2 = cluster.nodelist()[0:2] t = DecommissionInParallel(node1) t.start() node1.watch_log_for("DECOMMISSIONING", filename='debug.log') null_status_pattern = re.compile(r".N(?:\s*)127\.0\.0\.1(?:.*)null(?:\s*)rack1") while t.is_alive(): out = self.show_status(node2) if null_status_pattern.search(out): logger.debug("Matched null status entry") break logger.debug("Restarting node2") node2.stop(gently=False) node2.start(wait_for_binary_proto=True, wait_other_notice=False) logger.debug("Waiting for decommission to complete") t.join() self.show_status(node2) logger.debug("Sleeping for 30 seconds to allow gossip updates") time.sleep(30) out = self.show_status(node2) assert not null_status_pattern.search(out) @since('3.12') @pytest.mark.resource_intensive def test_stop_decommission_too_few_replicas_multi_dc(self): """ Decommission should fail when it would result in the number of live replicas being less than the replication factor. --force should bypass this requirement. @jira_ticket CASSANDRA-12510 @expected_errors ToolError when # nodes will drop below configured replicas in NTS/SimpleStrategy """ cluster = self.cluster cluster.populate([2, 2]).start() node1, node2, node3, node4 = self.cluster.nodelist() session = self.patient_cql_connection(node2) session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'2'};") create_ks(session, 'ks', {'dc1': 2, 'dc2': 2}) with pytest.raises(ToolError): node4.nodetool('decommission') session.execute('DROP KEYSPACE ks') create_ks(session, 'ks2', 4) with pytest.raises(ToolError): node4.nodetool('decommission') node4.nodetool('decommission --force') decommissioned = node4.watch_log_for("DECOMMISSIONED", timeout=120) assert decommissioned, "Node failed to decommission when passed --force" def show_status(self, node): out, _, _ = node.nodetool('status') logger.debug("Status as reported by node {}".format(node.address())) logger.debug(out) return out class DecommissionInParallel(Thread): def __init__(self, node): Thread.__init__(self) self.node = node def run(self): node = self.node mark = node.mark_log() try: out, err, _ = node.nodetool("decommission") node.watch_log_for("DECOMMISSIONED", from_mark=mark) logger.debug(out) logger.debug(err) except ToolError as e: logger.debug("Decommission failed with exception: " + str(e)) pass
SampleService_test.py
# These tests cover the integration of the entire system and do not go into details - that's # what unit tests are for. As such, typically each method will get a single happy path test and # a single unhappy path test unless otherwise warranted. # Tests of the auth user lookup and workspace wrapper code are at the bottom of the file. import datetime import json import os import tempfile import requests import time import uuid import yaml from configparser import ConfigParser from pytest import fixture, raises from threading import Thread from kafka import KafkaConsumer from kafka.errors import NoBrokersAvailable from SampleService.SampleServiceImpl import SampleService from SampleService.core.errors import ( MissingParameterError, NoSuchWorkspaceDataError, IllegalParameterError) from SampleService.core.notification import KafkaNotifier from SampleService.core.user_lookup import KBaseUserLookup, AdminPermission from SampleService.core.user_lookup import InvalidTokenError, InvalidUserError from SampleService.core.workspace import WS, WorkspaceAccessType, UPA from SampleService.core.errors import UnauthorizedError, NoSuchUserError from SampleService.core.user import UserID from installed_clients.WorkspaceClient import Workspace as Workspace from core import test_utils from core.test_utils import ( assert_ms_epoch_close_to_now, assert_exception_correct, find_free_port ) from arango_controller import ArangoController from mongo_controller import MongoController from workspace_controller import WorkspaceController from auth_controller import AuthController from kafka_controller import KafkaController # TODO should really test a start up for the case where the metadata validation config is not # supplied, but that's almost never going to be the case and the code is trivial, so YAGNI VER = '0.1.0-alpha25' _AUTH_DB = 'test_auth_db' _WS_DB = 'test_ws_db' _WS_TYPE_DB = 'test_ws_type_db' TEST_DB_NAME = 'test_sample_service' TEST_COL_SAMPLE = 'samples' TEST_COL_VERSION = 'versions' TEST_COL_VER_EDGE = 'ver_to_sample' TEST_COL_NODES = 'nodes' TEST_COL_NODE_EDGE = 'node_edges' TEST_COL_DATA_LINK = 'data_link' TEST_COL_WS_OBJ_VER = 'ws_obj_ver_shadow' TEST_COL_SCHEMA = 'schema' TEST_USER = 'user1' TEST_PWD = 'password1' USER_WS_READ_ADMIN = 'wsreadadmin' TOKEN_WS_READ_ADMIN = None USER_WS_FULL_ADMIN = 'wsfulladmin' TOKEN_WS_FULL_ADMIN = None WS_READ_ADMIN = 'WS_READ_ADMIN' WS_FULL_ADMIN = 'WS_FULL_ADMIN' USER_SERVICE = 'serviceuser' TOKEN_SERVICE = None USER1 = 'user1' TOKEN1 = None USER2 = 'user2' TOKEN2 = None USER3 = 'user3' TOKEN3 = None USER4 = 'user4' TOKEN4 = None USER5 = 'user5' TOKEN5 = None USER_NO_TOKEN1 = 'usernt1' USER_NO_TOKEN2 = 'usernt2' USER_NO_TOKEN3 = 'usernt3' KAFKA_TOPIC = 'sampleservice' def create_deploy_cfg(auth_port, arango_port, workspace_port, kafka_port): cfg = ConfigParser() ss = 'SampleService' cfg.add_section(ss) cfg[ss]['auth-service-url'] = (f'http://localhost:{auth_port}/testmode/' + 'api/legacy/KBase/Sessions/Login') cfg[ss]['auth-service-url-allow-insecure'] = 'true' cfg[ss]['auth-root-url'] = f'http://localhost:{auth_port}/testmode' cfg[ss]['auth-token'] = TOKEN_SERVICE cfg[ss]['auth-read-admin-roles'] = 'readadmin1' cfg[ss]['auth-full-admin-roles'] = 'fulladmin2' cfg[ss]['arango-url'] = f'http://localhost:{arango_port}' cfg[ss]['arango-db'] = TEST_DB_NAME cfg[ss]['arango-user'] = TEST_USER cfg[ss]['arango-pwd'] = TEST_PWD cfg[ss]['workspace-url'] = f'http://localhost:{workspace_port}' cfg[ss]['workspace-read-admin-token'] = TOKEN_WS_READ_ADMIN cfg[ss]['kafka-bootstrap-servers'] = f'localhost:{kafka_port}' cfg[ss]['kafka-topic'] = KAFKA_TOPIC cfg[ss]['sample-collection'] = TEST_COL_SAMPLE cfg[ss]['version-collection'] = TEST_COL_VERSION cfg[ss]['version-edge-collection'] = TEST_COL_VER_EDGE cfg[ss]['node-collection'] = TEST_COL_NODES cfg[ss]['node-edge-collection'] = TEST_COL_NODE_EDGE cfg[ss]['data-link-collection'] = TEST_COL_DATA_LINK cfg[ss]['workspace-object-version-shadow-collection'] = TEST_COL_WS_OBJ_VER cfg[ss]['schema-collection'] = TEST_COL_SCHEMA metacfg = { 'validators': { 'foo': {'validators': [{'module': 'SampleService.core.validator.builtin', 'callable_builder': 'noop' }], 'key_metadata': {'a': 'b', 'c': 'd'} }, 'stringlentest': {'validators': [{'module': 'SampleService.core.validator.builtin', 'callable_builder': 'string', 'parameters': {'max-len': 5} }, {'module': 'SampleService.core.validator.builtin', 'callable_builder': 'string', 'parameters': {'keys': 'spcky', 'max-len': 2} }], 'key_metadata': {'h': 'i', 'j': 'k'} } }, 'prefix_validators': { 'pre': {'validators': [{'module': 'core.config_test_vals', 'callable_builder': 'prefix_validator_test_builder', 'parameters': {'fail_on_arg': 'fail_plz'} }], 'key_metadata': {'1': '2'} } } } metaval = tempfile.mkstemp('.cfg', 'metaval-', dir=test_utils.get_temp_dir(), text=True) os.close(metaval[0]) with open(metaval[1], 'w') as handle: yaml.dump(metacfg, handle) cfg[ss]['metadata-validator-config-url'] = f'file://{metaval[1]}' deploy = tempfile.mkstemp('.cfg', 'deploy-', dir=test_utils.get_temp_dir(), text=True) os.close(deploy[0]) with open(deploy[1], 'w') as handle: cfg.write(handle) return deploy[1] @fixture(scope='module') def mongo(): mongoexe = test_utils.get_mongo_exe() tempdir = test_utils.get_temp_dir() wt = test_utils.get_use_wired_tiger() mongo = MongoController(mongoexe, tempdir, wt) wttext = ' with WiredTiger' if wt else '' print(f'running mongo {mongo.db_version}{wttext} on port {mongo.port} in dir {mongo.temp_dir}') yield mongo del_temp = test_utils.get_delete_temp_files() print(f'shutting down mongo, delete_temp_files={del_temp}') mongo.destroy(del_temp) @fixture(scope='module') def auth(mongo): global TOKEN_SERVICE global TOKEN_WS_FULL_ADMIN global TOKEN_WS_READ_ADMIN global TOKEN1 global TOKEN2 global TOKEN3 global TOKEN4 global TOKEN5 jd = test_utils.get_jars_dir() tempdir = test_utils.get_temp_dir() auth = AuthController(jd, f'localhost:{mongo.port}', _AUTH_DB, tempdir) print(f'Started KBase Auth2 {auth.version} on port {auth.port} ' + f'in dir {auth.temp_dir} in {auth.startup_count}s') url = f'http://localhost:{auth.port}' test_utils.create_auth_role(url, 'fulladmin1', 'fa1') test_utils.create_auth_role(url, 'fulladmin2', 'fa2') test_utils.create_auth_role(url, 'readadmin1', 'ra1') test_utils.create_auth_role(url, 'readadmin2', 'ra2') test_utils.create_auth_role(url, WS_READ_ADMIN, 'wsr') test_utils.create_auth_role(url, WS_FULL_ADMIN, 'wsf') test_utils.create_auth_user(url, USER_SERVICE, 'serv') TOKEN_SERVICE = test_utils.create_auth_login_token(url, USER_SERVICE) test_utils.create_auth_user(url, USER_WS_READ_ADMIN, 'wsra') TOKEN_WS_READ_ADMIN = test_utils.create_auth_login_token(url, USER_WS_READ_ADMIN) test_utils.set_custom_roles(url, USER_WS_READ_ADMIN, [WS_READ_ADMIN]) test_utils.create_auth_user(url, USER_WS_FULL_ADMIN, 'wsrf') TOKEN_WS_FULL_ADMIN = test_utils.create_auth_login_token(url, USER_WS_FULL_ADMIN) test_utils.set_custom_roles(url, USER_WS_FULL_ADMIN, [WS_FULL_ADMIN]) test_utils.create_auth_user(url, USER1, 'display1') TOKEN1 = test_utils.create_auth_login_token(url, USER1) test_utils.set_custom_roles(url, USER1, ['fulladmin1']) test_utils.create_auth_user(url, USER2, 'display2') TOKEN2 = test_utils.create_auth_login_token(url, USER2) test_utils.set_custom_roles(url, USER2, ['fulladmin1', 'fulladmin2', 'readadmin2']) test_utils.create_auth_user(url, USER3, 'display3') TOKEN3 = test_utils.create_auth_login_token(url, USER3) test_utils.set_custom_roles(url, USER3, ['readadmin1']) test_utils.create_auth_user(url, USER4, 'display4') TOKEN4 = test_utils.create_auth_login_token(url, USER4) test_utils.create_auth_user(url, USER5, 'display5') TOKEN5 = test_utils.create_auth_login_token(url, USER5) test_utils.set_custom_roles(url, USER5, ['fulladmin2']) test_utils.create_auth_user(url, USER_NO_TOKEN1, 'displaynt1') test_utils.create_auth_user(url, USER_NO_TOKEN2, 'displaynt2') test_utils.create_auth_user(url, USER_NO_TOKEN3, 'displaynt3') yield auth del_temp = test_utils.get_delete_temp_files() print(f'shutting down auth, delete_temp_files={del_temp}') auth.destroy(del_temp) @fixture(scope='module') def workspace(auth, mongo): jd = test_utils.get_jars_dir() tempdir = test_utils.get_temp_dir() ws = WorkspaceController( jd, mongo, _WS_DB, _WS_TYPE_DB, f'http://localhost:{auth.port}/testmode', tempdir) print(f'Started KBase Workspace {ws.version} on port {ws.port} ' + f'in dir {ws.temp_dir} in {ws.startup_count}s') wsc = Workspace(f'http://localhost:{ws.port}', token=TOKEN_WS_FULL_ADMIN) wsc.request_module_ownership('Trivial') wsc.administer({'command': 'approveModRequest', 'module': 'Trivial'}) wsc.register_typespec({ 'spec': ''' module Trivial { /* @optional dontusethisfieldorifyoudomakesureitsastring */ typedef structure { string dontusethisfieldorifyoudomakesureitsastring; } Object; }; ''', 'dryrun': 0, 'new_types': ['Object'] }) wsc.release_module('Trivial') yield ws del_temp = test_utils.get_delete_temp_files() print(f'shutting down workspace, delete_temp_files={del_temp}') ws.destroy(del_temp, False) @fixture(scope='module') def arango(): arangoexe = test_utils.get_arango_exe() arangojs = test_utils.get_arango_js() tempdir = test_utils.get_temp_dir() arango = ArangoController(arangoexe, arangojs, tempdir) create_test_db(arango) print('running arango on port {} in dir {}'.format(arango.port, arango.temp_dir)) yield arango del_temp = test_utils.get_delete_temp_files() print('shutting down arango, delete_temp_files={}'.format(del_temp)) arango.destroy(del_temp) def create_test_db(arango): systemdb = arango.client.db(verify=True) # default access to _system db systemdb.create_database(TEST_DB_NAME, [{'username': TEST_USER, 'password': TEST_PWD}]) return arango.client.db(TEST_DB_NAME, TEST_USER, TEST_PWD) def clear_db_and_recreate(arango): arango.clear_database(TEST_DB_NAME, drop_indexes=True) db = create_test_db(arango) db.create_collection(TEST_COL_SAMPLE) db.create_collection(TEST_COL_VERSION) db.create_collection(TEST_COL_VER_EDGE, edge=True) db.create_collection(TEST_COL_NODES) db.create_collection(TEST_COL_NODE_EDGE, edge=True) db.create_collection(TEST_COL_DATA_LINK, edge=True) db.create_collection(TEST_COL_WS_OBJ_VER) db.create_collection(TEST_COL_SCHEMA) return db @fixture(scope='module') def kafka(): kafka_bin_dir = test_utils.get_kafka_bin_dir() tempdir = test_utils.get_temp_dir() kc = KafkaController(kafka_bin_dir, tempdir) print('running kafka on port {} in dir {}'.format(kc.port, kc.temp_dir)) yield kc del_temp = test_utils.get_delete_temp_files() print('shutting down kafka, delete_temp_files={}'.format(del_temp)) kc.destroy(del_temp, dump_logs_to_stdout=False) @fixture(scope='module') def service(auth, arango, workspace, kafka): portint = test_utils.find_free_port() clear_db_and_recreate(arango) # this is completely stupid. The state is calculated on import so there's no way to # test the state creation normally. cfgpath = create_deploy_cfg(auth.port, arango.port, workspace.port, kafka.port) os.environ['KB_DEPLOYMENT_CONFIG'] = cfgpath from SampleService import SampleServiceServer Thread(target=SampleServiceServer.start_server, kwargs={'port': portint}, daemon=True).start() time.sleep(0.05) port = str(portint) print('running sample service at localhost:' + port) yield port # shutdown the server # SampleServiceServer.stop_server() <-- this causes an error. the start & stop methods are # bugged. _proc is only set if newprocess=True @fixture def sample_port(service, arango, workspace, kafka): clear_db_and_recreate(arango) workspace.clear_db() # _clear_kafka_messages(kafka) # too expensive to run after every test # kafka.clear_all_topics() # too expensive to run after every test yield service def test_init_fail(): # init success is tested via starting the server init_fail(None, ValueError('config is empty, cannot start service')) cfg = {} init_fail(cfg, ValueError('config is empty, cannot start service')) cfg['arango-url'] = None init_fail(cfg, MissingParameterError('config param arango-url')) cfg['arango-url'] = 'crap' init_fail(cfg, MissingParameterError('config param arango-db')) cfg['arango-db'] = 'crap' init_fail(cfg, MissingParameterError('config param arango-user')) cfg['arango-user'] = 'crap' init_fail(cfg, MissingParameterError('config param arango-pwd')) cfg['arango-pwd'] = 'crap' init_fail(cfg, MissingParameterError('config param sample-collection')) cfg['sample-collection'] = 'crap' init_fail(cfg, MissingParameterError('config param version-collection')) cfg['version-collection'] = 'crap' init_fail(cfg, MissingParameterError('config param version-edge-collection')) cfg['version-edge-collection'] = 'crap' init_fail(cfg, MissingParameterError('config param node-collection')) cfg['node-collection'] = 'crap' init_fail(cfg, MissingParameterError('config param node-edge-collection')) cfg['node-edge-collection'] = 'crap' init_fail(cfg, MissingParameterError('config param data-link-collection')) cfg['data-link-collection'] = 'crap' init_fail(cfg, MissingParameterError( 'config param workspace-object-version-shadow-collection')) cfg['workspace-object-version-shadow-collection'] = 'crap' init_fail(cfg, MissingParameterError('config param schema-collection')) cfg['schema-collection'] = 'crap' init_fail(cfg, MissingParameterError('config param auth-root-url')) cfg['auth-root-url'] = 'crap' init_fail(cfg, MissingParameterError('config param auth-token')) cfg['auth-token'] = 'crap' init_fail(cfg, MissingParameterError('config param workspace-url')) cfg['workspace-url'] = 'crap' init_fail(cfg, MissingParameterError('config param workspace-read-admin-token')) cfg['workspace-read-admin-token'] = 'crap' cfg['kafka-bootstrap-servers'] = 'crap' init_fail(cfg, MissingParameterError('config param kafka-topic')) cfg['kafka-topic'] = 'crap' # get_validators is tested elsewhere, just make sure it'll error out cfg['metadata-validator-config-url'] = 'https://kbase.us/services' init_fail(cfg, ValueError( 'Failed to open validator configuration file at https://kbase.us/services: Not Found')) def init_fail(config, expected): with raises(Exception) as got: SampleService(config) assert_exception_correct(got.value, expected) def test_status(sample_port): res = requests.post('http://localhost:' + sample_port, json={ 'method': 'SampleService.status', 'params': [], 'version': 1.1, 'id': 1 # don't do this. This is bad practice }) assert res.status_code == 200 s = res.json() # print(s) assert len(s['result']) == 1 # results are always in a list assert_ms_epoch_close_to_now(s['result'][0]['servertime']) assert s['result'][0]['state'] == 'OK' assert s['result'][0]['message'] == "" assert s['result'][0]['version'] == VER # ignore git url and hash, can change def get_authorized_headers(token): headers = {'accept': 'application/json'} if token is not None: headers['authorization'] = token return headers def _check_kafka_messages(kafka, expected_msgs, topic=KAFKA_TOPIC, print_res=False): kc = KafkaConsumer( topic, bootstrap_servers=f'localhost:{kafka.port}', auto_offset_reset='earliest', group_id='foo') # quiets warnings try: res = kc.poll(timeout_ms=2000) # 1s not enough? Seems like a lot if print_res: print(res) assert len(res) == 1 assert next(iter(res.keys())).topic == topic records = next(iter(res.values())) assert len(records) == len(expected_msgs) for i, r in enumerate(records): assert json.loads(r.value) == expected_msgs[i] # Need to commit here? doesn't seem like it finally: kc.close() def _clear_kafka_messages(kafka, topic=KAFKA_TOPIC): kc = KafkaConsumer( topic, bootstrap_servers=f'localhost:{kafka.port}', auto_offset_reset='earliest', group_id='foo') # quiets warnings try: kc.poll(timeout_ms=2000) # 1s not enough? Seems like a lot # Need to commit here? doesn't seem like it finally: kc.close() def test_create_and_get_sample_with_version(sample_port, kafka): _clear_kafka_messages(kafka) url = f'http://localhost:{sample_port}' # version 1 ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'baz'}, 'stringlentest': {'foooo': 'barrr', 'spcky': 'fa'}, 'prefixed': {'safe': 'args'} }, 'meta_user': {'a': {'b': 'c'}}, 'source_meta': [ {'key': 'foo', 'skey': 'bar', 'svalue': {'whee': 'whoo'}}, {'key': 'stringlentest', 'skey': 'ya fer sure', 'svalue': {'just': 'some', 'data': 42}} ] } ] } }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 1 id_ = ret.json()['result'][0]['id'] # version 2 ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '68', 'params': [{ 'sample': {'name': 'mysample2', 'id': id_, 'node_tree': [{'id': 'root2', 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'bat'}}, 'meta_user': {'a': {'b': 'd'}} } ] }, 'prior_version': 1 }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 2 # get version 1 ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.get_sample', 'version': '1.1', 'id': '42', 'params': [{'id': id_, 'version': 1}] }) # print(ret.text) assert ret.ok is True j = ret.json()['result'][0] assert_ms_epoch_close_to_now(j['save_date']) del j['save_date'] assert j == { 'id': id_, 'version': 1, 'user': USER1, 'name': 'mysample', 'node_tree': [{'id': 'root', 'parent': None, 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'baz'}, 'stringlentest': {'foooo': 'barrr', 'spcky': 'fa'}, 'prefixed': {'safe': 'args'} }, 'meta_user': {'a': {'b': 'c'}}, 'source_meta': [ {'key': 'foo', 'skey': 'bar', 'svalue': {'whee': 'whoo'}}, {'key': 'stringlentest', 'skey': 'ya fer sure', 'svalue': {'just': 'some', 'data': 42}} ], }] } # get version 2 ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.get_sample', 'version': '1.1', 'id': '43', 'params': [{'id': id_}] }) # print(ret.text) assert ret.ok is True j = ret.json()['result'][0] assert_ms_epoch_close_to_now(j['save_date']) del j['save_date'] assert j == { 'id': id_, 'version': 2, 'user': USER1, 'name': 'mysample2', 'node_tree': [{'id': 'root2', 'parent': None, 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'bat'}}, 'meta_user': {'a': {'b': 'd'}}, 'source_meta': [], }] } _check_kafka_messages( kafka, [ {'event_type': 'NEW_SAMPLE', 'sample_id': id_, 'sample_ver': 1}, {'event_type': 'NEW_SAMPLE', 'sample_id': id_, 'sample_ver': 2} ]) def test_create_and_get_samples(sample_port, kafka): _clear_kafka_messages(kafka) url = f'http://localhost:{sample_port}' # first sample ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'baz'}, 'stringlentest': {'foooo': 'barrr', 'spcky': 'fa'}, 'prefixed': {'safe': 'args'} }, 'meta_user': {'a': {'b': 'c'}}, 'source_meta': [ {'key': 'foo', 'skey': 'bar', 'svalue': {'whee': 'whoo'}}, {'key': 'stringlentest', 'skey': 'ya fer sure', 'svalue': {'just': 'some', 'data': 42}} ] } ] } }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 1 id1_ = ret.json()['result'][0]['id'] # second sample ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '68', 'params': [{ 'sample': {'name': 'mysample2', 'node_tree': [{'id': 'root2', 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'bat'}}, 'meta_user': {'a': {'b': 'd'}} } ] } }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 1 id2_ = ret.json()['result'][0]['id'] # get both samples ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.get_samples', 'version': '1.1', 'id': '42', 'params': [{'samples': [{'id': id1_, 'version': 1}, {'id': id2_, 'version': 1}]}] }) # print(ret.text) assert ret.ok is True j = ret.json()['result'][0] for s in j: assert_ms_epoch_close_to_now(s['save_date']) del s['save_date'] print('-'*80) import json print(json.dumps(j)) print('-'*80) assert j == [{ 'id': id1_, 'version': 1, 'user': USER1, 'name': 'mysample', 'node_tree': [{ 'id': 'root', 'parent': None, 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'baz'}, 'stringlentest': {'foooo': 'barrr', 'spcky': 'fa'}, 'prefixed': {'safe': 'args'} }, 'meta_user': {'a': {'b': 'c'}}, 'source_meta': [ {'key': 'foo', 'skey': 'bar', 'svalue': {'whee': 'whoo'}}, {'key': 'stringlentest', 'skey': 'ya fer sure', 'svalue': {'just': 'some', 'data': 42}} ], }] }, { 'id': id2_, 'version': 1, 'user': USER1, 'name': 'mysample2', 'node_tree': [{'id': 'root2', 'parent': None, 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'bat'}}, 'meta_user': {'a': {'b': 'd'}}, 'source_meta': [] }] }] _check_kafka_messages( kafka, [ {'event_type': 'NEW_SAMPLE', 'sample_id': id1_, 'sample_ver': 1}, {'event_type': 'NEW_SAMPLE', 'sample_id': id2_, 'sample_ver': 1} ]) def test_create_sample_as_admin(sample_port): _create_sample_as_admin(sample_port, None, TOKEN2, USER2) def test_create_sample_as_admin_impersonate_user(sample_port): _create_sample_as_admin(sample_port, ' ' + USER4 + ' ', TOKEN4, USER4) def _create_sample_as_admin(sample_port, as_user, get_token, expected_user): url = f'http://localhost:{sample_port}' # verison 1 ret = requests.post(url, headers=get_authorized_headers(TOKEN2), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'baz'} }, 'meta_user': {'a': {'b': 'c'}} } ] }, 'as_admin': 1, 'as_user': as_user }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 1 id_ = ret.json()['result'][0]['id'] # get ret = requests.post(url, headers=get_authorized_headers(get_token), json={ 'method': 'SampleService.get_sample', 'version': '1.1', 'id': '42', 'params': [{'id': id_, 'version': 1}] }) # print(ret.text) assert ret.ok is True j = ret.json()['result'][0] assert_ms_epoch_close_to_now(j['save_date']) del j['save_date'] assert j == { 'id': id_, 'version': 1, 'user': expected_user, 'name': 'mysample', 'node_tree': [{'id': 'root', 'parent': None, 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'baz'} }, 'meta_user': {'a': {'b': 'c'}}, 'source_meta': [], }] } def test_create_sample_version_as_admin(sample_port): _create_sample_version_as_admin(sample_port, None, USER2) def test_create_sample_version_as_admin_impersonate_user(sample_port): _create_sample_version_as_admin(sample_port, USER3, USER3) def _create_sample_version_as_admin(sample_port, as_user, expected_user): url = f'http://localhost:{sample_port}' # verison 1 ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'baz'}, 'stringlentest': {'foooo': 'barrr', 'spcky': 'fa'}, 'prefixed': {'safe': 'args'} }, 'meta_user': {'a': {'b': 'c'}} } ] } }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 1 id_ = ret.json()['result'][0]['id'] # version 2 ret = requests.post(url, headers=get_authorized_headers(TOKEN2), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '68', 'params': [{ 'sample': {'name': 'mysample2', 'id': id_, 'node_tree': [{'id': 'root2', 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'bat'}}, 'meta_user': {'a': {'b': 'd'}} } ] }, 'as_admin': 1, 'as_user': as_user }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 2 # get version 2 ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.get_sample', 'version': '1.1', 'id': '43', 'params': [{'id': id_}] }) # print(ret.text) assert ret.ok is True j = ret.json()['result'][0] assert_ms_epoch_close_to_now(j['save_date']) del j['save_date'] assert j == { 'id': id_, 'version': 2, 'user': expected_user, 'name': 'mysample2', 'node_tree': [{'id': 'root2', 'parent': None, 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'bat'}}, 'meta_user': {'a': {'b': 'd'}}, 'source_meta': [], }] } def test_get_sample_public_read(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _replace_acls(url, id_, TOKEN1, {'public_read': 1}) for token in [TOKEN4, None]: # unauthed user and anonymous user s = _get_sample(url, token, id_) assert_ms_epoch_close_to_now(s['save_date']) del s['save_date'] assert s == { 'id': id_, 'version': 1, 'user': 'user1', 'name': 'mysample', 'node_tree': [{'id': 'root', 'parent': None, 'type': 'BioReplicate', 'meta_controlled': {}, 'meta_user': {}, 'source_meta': [], }, {'id': 'foo', 'parent': 'root', 'type': 'TechReplicate', 'meta_controlled': {}, 'meta_user': {}, 'source_meta': [], } ] } def _get_sample(url, token, id_): ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.get_sample', 'version': '1.1', 'id': '43', 'params': [{'id': str(id_)}] }) # print(ret.text) assert ret.ok is True return ret.json()['result'][0] def test_get_sample_as_admin(sample_port): url = f'http://localhost:{sample_port}' # verison 1 ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'baz'} }, 'meta_user': {'a': {'b': 'c'}} } ] } }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 1 id_ = ret.json()['result'][0]['id'] # token3 has read admin but not full admin ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={ 'method': 'SampleService.get_sample', 'version': '1.1', 'id': '42', 'params': [{'id': id_, 'version': 1, 'as_admin': 1}] }) print(ret.text) assert ret.ok is True j = ret.json()['result'][0] assert_ms_epoch_close_to_now(j['save_date']) del j['save_date'] assert j == { 'id': id_, 'version': 1, 'user': USER1, 'name': 'mysample', 'node_tree': [{'id': 'root', 'parent': None, 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'baz'}, }, 'meta_user': {'a': {'b': 'c'}}, 'source_meta': [], }] } def test_create_sample_fail_no_nodes(sample_port): url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': None } }] }) # print(ret.text) assert ret.status_code == 500 assert ret.json()['error']['message'] == ( 'Sample service error code 30001 Illegal input parameter: sample node tree ' + 'must be present and a list') def test_create_sample_fail_bad_metadata(sample_port): _create_sample_fail_bad_metadata( sample_port, {'stringlentest': {}}, 'Sample service error code 30001 Illegal input parameter: Error for node at index 0: ' + 'Controlled metadata value associated with metadata key stringlentest is null or empty') _create_sample_fail_bad_metadata( sample_port, {'stringlentest': {'foooo': 'barrrr'}}, 'Sample service error code 30010 Metadata validation failed: Node at index 0: ' + 'Key stringlentest: Metadata value at key foooo is longer than max length of 5') _create_sample_fail_bad_metadata( sample_port, {'stringlentest': {'foooo': 'barrr', 'spcky': 'baz'}}, 'Sample service error code 30010 Metadata validation failed: Node at index 0: Key ' + 'stringlentest: Metadata value at key spcky is longer than max length of 2') _create_sample_fail_bad_metadata( sample_port, {'prefix': {'fail_plz': 'yes, or principal sayof'}}, "Sample service error code 30010 Metadata validation failed: Node at index 0: " + "Prefix validator pre, key prefix: pre, prefix, {'fail_plz': 'yes, or principal sayof'}") _create_sample_fail_bad_metadata( sample_port, {'prefix': {'foo': 'bar'}}, 'Sample service error code 30001 Illegal input parameter: Error for node at ' + 'index 0: Duplicate source metadata key: prefix', sourcemeta=[ {'key': 'prefix', 'skey': 'a', 'svalue': {'a': 'b'}}, {'key': 'prefix', 'skey': 'b', 'svalue': {'c': 'd'}} ]) def _create_sample_fail_bad_metadata(sample_port, meta, expected, sourcemeta=None): url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', 'meta_controlled': meta, 'source_meta': sourcemeta } ] } }] }) # print(ret.text) assert ret.status_code == 500 assert ret.json()['error']['message'] == expected def test_create_sample_fail_permissions(sample_port): url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', } ] } }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 1 id_ = ret.json()['result'][0]['id'] _replace_acls(url, id_, TOKEN1, {'read': [USER2]}) ret = requests.post(url, headers=get_authorized_headers(TOKEN2), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'id': id_, 'node_tree': [{'id': 'root', 'type': 'BioReplicate', } ] } }] }) # print(ret.text) assert ret.status_code == 500 assert ret.json()['error']['message'] == ( f'Sample service error code 20000 Unauthorized: User user2 cannot write to sample {id_}') def test_create_sample_fail_admin_bad_user_name(sample_port): _create_sample_fail_admin_as_user( sample_port, 'bad\tuser', 'Sample service error code 30001 Illegal input parameter: userid contains ' + 'control characters') def test_create_sample_fail_admin_no_such_user(sample_port): _create_sample_fail_admin_as_user( sample_port, USER4 + 'impostor', 'Sample service error code 50000 No such user: user4impostor') def _create_sample_fail_admin_as_user(sample_port, user, expected): url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(TOKEN2), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', } ] }, 'as_admin': 'true', 'as_user': user }] }) # print(ret.text) assert ret.status_code == 500 assert ret.json()['error']['message'] == expected def test_create_sample_fail_admin_permissions(sample_port): url = f'http://localhost:{sample_port}' # token 3 only has read permissions ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', } ] }, 'as_admin': 1, 'as_user': USER4 }] }) # print(ret.text) assert ret.status_code == 500 assert ret.json()['error']['message'] == ( 'Sample service error code 20000 Unauthorized: User user3 does not have the ' + 'necessary administration privileges to run method create_sample') def test_get_sample_fail_bad_id(sample_port): url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', } ] } }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 1 id_ = ret.json()['result'][0]['id'] ret = requests.post(url, headers=get_authorized_headers(TOKEN2), json={ 'method': 'SampleService.get_sample', 'version': '1.1', 'id': '42', 'params': [{'id': id_[:-1]}] }) # print(ret.text) assert ret.status_code == 500 assert ret.json()['error']['message'] == ( 'Sample service error code 30001 Illegal input parameter: ' + f'id {id_[:-1]} must be a UUID string') def test_get_sample_fail_permissions(sample_port): url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', } ] } }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 1 id_ = ret.json()['result'][0]['id'] _get_sample_fail( url, TOKEN2, {'id': id_}, f'Sample service error code 20000 Unauthorized: User user2 cannot read sample {id_}') _get_sample_fail( url, None, {'id': id_}, f'Sample service error code 20000 Unauthorized: Anonymous users cannot read sample {id_}') _get_sample_fail( url, None, {'id': id_, 'as_admin': 1}, 'Sample service error code 20000 Unauthorized: Anonymous users ' + 'may not act as service administrators.') def test_get_sample_fail_admin_permissions(sample_port): url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', } ] } }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 1 id_ = ret.json()['result'][0]['id'] _get_sample_fail( url, TOKEN4, {'id': id_, 'as_admin': 1}, 'Sample service error code 20000 Unauthorized: User user4 does not have the ' + 'necessary administration privileges to run method get_sample') def _get_sample_fail(url, token, params, expected): # user 4 has no admin permissions ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.get_sample', 'version': '1.1', 'id': '42', 'params': [params] }) # print(ret.text) assert ret.status_code == 500 assert ret.json()['error']['message'] == expected def test_get_and_replace_acls(sample_port, kafka): _clear_kafka_messages(kafka) url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', } ] } }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 1 id_ = ret.json()['result'][0]['id'] _assert_acl_contents(url, id_, TOKEN1, { 'owner': USER1, 'admin': [], 'write': [], 'read': [], 'public_read': 0 }) _replace_acls(url, id_, TOKEN1, { 'admin': [USER2], 'write': [USER_NO_TOKEN1, USER_NO_TOKEN2, USER3], 'read': [USER_NO_TOKEN3, USER4] }) # test that people in the acls can read for token in [TOKEN2, TOKEN3, TOKEN4]: _assert_acl_contents(url, id_, token, { 'owner': USER1, 'admin': [USER2], 'write': [USER3, USER_NO_TOKEN1, USER_NO_TOKEN2], 'read': [USER4, USER_NO_TOKEN3], 'public_read': 0 }) ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.get_sample', 'version': '1.1', 'id': '42', 'params': [{'id': id_}] }) # print(ret.text) assert ret.ok is True j = ret.json()['result'][0] del j['save_date'] assert j == { 'id': id_, 'version': 1, 'user': USER1, 'name': 'mysample', 'node_tree': [{ 'id': 'root', 'type': 'BioReplicate', 'parent': None, 'meta_controlled': {}, 'meta_user': {}, 'source_meta': [], }] } # test admins and writers can write for token, version in ((TOKEN2, 2), (TOKEN3, 3)): ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '68', 'params': [{ 'sample': {'name': f'mysample{version}', 'id': id_, 'node_tree': [{'id': f'root{version}', 'type': 'BioReplicate', } ] } }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == version # check one of the writes ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.get_sample', 'version': '1.1', 'id': '42', 'params': [{'id': id_, 'version': 2}] }) # print(ret.text) assert ret.ok is True j = ret.json()['result'][0] assert_ms_epoch_close_to_now(j['save_date']) del j['save_date'] assert j == { 'id': id_, 'version': 2, 'user': USER2, 'name': 'mysample2', 'node_tree': [{'id': 'root2', 'parent': None, 'type': 'BioReplicate', 'meta_controlled': {}, 'meta_user': {}, 'source_meta': [], }] } # test that an admin can replace ACLs _replace_acls(url, id_, TOKEN2, { 'admin': [USER_NO_TOKEN2], 'write': [], 'read': [USER2], 'public_read': 1 }) _assert_acl_contents(url, id_, TOKEN1, { 'owner': USER1, 'admin': [USER_NO_TOKEN2], 'write': [], 'read': [USER2], 'public_read': 1 }) _check_kafka_messages( kafka, [ {'event_type': 'NEW_SAMPLE', 'sample_id': id_, 'sample_ver': 1}, {'event_type': 'ACL_CHANGE', 'sample_id': id_}, {'event_type': 'NEW_SAMPLE', 'sample_id': id_, 'sample_ver': 2}, {'event_type': 'NEW_SAMPLE', 'sample_id': id_, 'sample_ver': 3}, {'event_type': 'ACL_CHANGE', 'sample_id': id_}, ]) def test_get_acls_public_read(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _replace_acls(url, id_, TOKEN1, {'public_read': 1}) for token in [TOKEN4, None]: # user with no explicit perms and anon user _assert_acl_contents(url, id_, token, { 'owner': USER1, 'admin': [], 'write': [], 'read': [], 'public_read': 1 }) def test_get_acls_as_admin(sample_port): url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', } ] } }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 1 id_ = ret.json()['result'][0]['id'] # user 3 has admin read rights only _assert_acl_contents(url, id_, TOKEN3, { 'owner': USER1, 'admin': [], 'write': [], 'read': [], 'public_read': 0 }, as_admin=1) def test_replace_acls_as_admin(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _assert_acl_contents(url, id_, TOKEN1, { 'owner': USER1, 'admin': [], 'write': [], 'read': [], 'public_read': 0 }) _replace_acls(url, id_, TOKEN2, { 'admin': [USER2], 'write': [USER_NO_TOKEN1, USER_NO_TOKEN2, USER3], 'read': [USER_NO_TOKEN3, USER4], 'public_read': 1 }, as_admin=1) _assert_acl_contents(url, id_, TOKEN1, { 'owner': USER1, 'admin': [USER2], 'write': [USER3, USER_NO_TOKEN1, USER_NO_TOKEN2], 'read': [USER4, USER_NO_TOKEN3], 'public_read': 1 }) def _replace_acls(url, id_, token, acls, as_admin=0, print_resp=False): ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.replace_sample_acls', 'version': '1.1', 'id': '67', 'params': [{'id': id_, 'acls': acls, 'as_admin': as_admin}] }) if print_resp: print(ret.text) assert ret.ok is True assert ret.json() == {'version': '1.1', 'id': '67', 'result': None} def _assert_acl_contents(url, id_, token, expected, as_admin=0, print_resp=False): ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.get_sample_acls', 'version': '1.1', 'id': '47', 'params': [{'id': id_, 'as_admin': as_admin}] }) if print_resp: print(ret.text) assert ret.ok is True assert ret.json()['result'][0] == expected def test_get_acls_fail_no_id(sample_port): url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', } ] } }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 1 id_ = ret.json()['result'][0]['id'] ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.get_sample_acls', 'version': '1.1', 'id': '42', 'params': [{'ids': id_}] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == ( 'Sample service error code 30000 Missing input parameter: id') def test_get_acls_fail_permissions(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _get_acls_fail_permissions( url, TOKEN2, {'id': id_}, f'Sample service error code 20000 Unauthorized: User user2 cannot read sample {id_}') _get_acls_fail_permissions( url, None, {'id': id_}, f'Sample service error code 20000 Unauthorized: Anonymous users cannot read sample {id_}') _get_acls_fail_permissions( url, None, {'id': id_, 'as_admin': 1}, 'Sample service error code 20000 Unauthorized: Anonymous users ' + 'may not act as service administrators.') def _get_acls_fail_permissions(url, token, params, expected): ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.get_sample_acls', 'version': '1.1', 'id': '42', 'params': [params] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == expected def test_get_acls_fail_admin_permissions(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) # user 4 has no admin perms ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={ 'method': 'SampleService.get_sample_acls', 'version': '1.1', 'id': '42', 'params': [{'id': id_, 'as_admin': 1}] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == ( 'Sample service error code 20000 Unauthorized: User user4 does not have the ' + 'necessary administration privileges to run method get_sample_acls') def _create_generic_sample(url, token): ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{ 'sample': {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', }, {'id': 'foo', 'parent': 'root', 'type': 'TechReplicate', } ] } }] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == 1 return ret.json()['result'][0]['id'] def test_replace_acls_fail_no_id(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.replace_sample_acls', 'version': '1.1', 'id': '42', 'params': [{'ids': id_}] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == ( 'Sample service error code 30000 Missing input parameter: id') def test_replace_acls_fail_bad_acls(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.replace_sample_acls', 'version': '1.1', 'id': '42', 'params': [{'id': id_, 'acls': ['foo']}] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == ( 'Sample service error code 30001 Illegal input parameter: ' + 'ACLs must be supplied in the acls key and must be a mapping') def test_replace_acls_fail_permissions(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _replace_acls(url, id_, TOKEN1, { 'admin': [USER2], 'write': [USER3], 'read': [USER4] }) for user, token in ((USER3, TOKEN3), (USER4, TOKEN4)): ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.replace_sample_acls', 'version': '1.1', 'id': '42', 'params': [{'id': id_, 'acls': {}}] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == ( f'Sample service error code 20000 Unauthorized: User {user} cannot ' + f'administrate sample {id_}') def test_replace_acls_fail_admin_permissions(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) for user, token in ((USER1, TOKEN1), (USER3, TOKEN3), (USER4, TOKEN4)): ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.replace_sample_acls', 'version': '1.1', 'id': '42', 'params': [{'id': id_, 'acls': {}, 'as_admin': 1}] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == ( f'Sample service error code 20000 Unauthorized: User {user} does not have the ' + 'necessary administration privileges to run method replace_sample_acls') def test_replace_acls_fail_bad_user(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.replace_sample_acls', 'version': '1.1', 'id': '42', 'params': [{'id': id_, 'acls': { 'admin': [USER2, 'a'], 'write': [USER3], 'read': [USER4, 'philbin_j_montgomery_iii'] } }] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == ( 'Sample service error code 50000 No such user: a, philbin_j_montgomery_iii') def test_replace_acls_fail_user_in_2_acls(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.replace_sample_acls', 'version': '1.1', 'id': '42', 'params': [{'id': id_, 'acls': {'write': [USER2, USER3], 'read': [USER2]}}] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == ( 'Sample service error code 30001 Illegal input parameter: ' + f'User {USER2} appears in two ACLs') def test_replace_acls_fail_owner_in_another_acl(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) ret = requests.post(url, headers=get_authorized_headers(TOKEN1), json={ 'method': 'SampleService.replace_sample_acls', 'version': '1.1', 'id': '42', 'params': [{'id': id_, 'acls': {'write': [USER1]}}] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == ( 'Sample service error code 30001 Illegal input parameter: ' + 'The owner cannot be in any other ACL') def test_update_acls(sample_port, kafka): _update_acls_tst(sample_port, kafka, TOKEN1, False) # owner _update_acls_tst(sample_port, kafka, TOKEN2, False) # admin _update_acls_tst(sample_port, kafka, TOKEN5, True) # as_admin = True def _update_acls_tst(sample_port, kafka, token, as_admin): _clear_kafka_messages(kafka) url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _replace_acls(url, id_, TOKEN1, { 'admin': [USER2], 'write': [USER_NO_TOKEN1, USER_NO_TOKEN2, USER3], 'read': [USER_NO_TOKEN3, USER4], 'public_read': 0 }) _update_acls(url, token, { 'id': str(id_), 'admin': [USER4], 'write': [USER2], 'read': [USER_NO_TOKEN2], 'remove': [USER3], 'public_read': 390, 'as_admin': 1 if as_admin else 0, }) _assert_acl_contents(url, id_, TOKEN1, { 'owner': USER1, 'admin': [USER4], 'write': [USER2, USER_NO_TOKEN1], 'read': [USER_NO_TOKEN2, USER_NO_TOKEN3], 'public_read': 1 }) _check_kafka_messages( kafka, [ {'event_type': 'NEW_SAMPLE', 'sample_id': id_, 'sample_ver': 1}, {'event_type': 'ACL_CHANGE', 'sample_id': id_}, {'event_type': 'ACL_CHANGE', 'sample_id': id_}, ]) def test_update_acls_with_at_least(sample_port, kafka): _update_acls_tst_with_at_least(sample_port, kafka, TOKEN1, False) # owner _update_acls_tst_with_at_least(sample_port, kafka, TOKEN2, False) # admin _update_acls_tst_with_at_least(sample_port, kafka, TOKEN5, True) # as_admin = True def _update_acls_tst_with_at_least(sample_port, kafka, token, as_admin): _clear_kafka_messages(kafka) url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _replace_acls(url, id_, TOKEN1, { 'admin': [USER2], 'write': [USER_NO_TOKEN1, USER_NO_TOKEN2, USER3], 'read': [USER_NO_TOKEN3, USER4], 'public_read': 0 }) _update_acls(url, token, { 'id': str(id_), 'admin': [USER4], 'write': [USER2, USER_NO_TOKEN3], 'read': [USER_NO_TOKEN2, USER5], 'remove': [USER3], 'public_read': 390, 'as_admin': 1 if as_admin else 0, 'at_least': 1, }) _assert_acl_contents(url, id_, TOKEN1, { 'owner': USER1, 'admin': [USER2, USER4], 'write': [USER_NO_TOKEN1, USER_NO_TOKEN2, USER_NO_TOKEN3], 'read': [USER5], 'public_read': 1 }, print_resp=True) _check_kafka_messages( kafka, [ {'event_type': 'NEW_SAMPLE', 'sample_id': id_, 'sample_ver': 1}, {'event_type': 'ACL_CHANGE', 'sample_id': id_}, {'event_type': 'ACL_CHANGE', 'sample_id': id_}, ]) def test_update_acls_fail_no_id(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _update_acls_fail( url, TOKEN1, {'ids': id_}, 'Sample service error code 30000 Missing input parameter: id') def test_update_acls_fail_bad_pub(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _update_acls_fail( url, TOKEN1, {'id': id_, 'public_read': 'thingy'}, 'Sample service error code 30001 Illegal input parameter: ' + 'public_read must be an integer if present') def test_update_acls_fail_permissions(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _replace_acls(url, id_, TOKEN1, { 'admin': [USER2], 'write': [USER3], 'read': [USER4] }) for user, token in ((USER3, TOKEN3), (USER4, TOKEN4)): _update_acls_fail(url, token, {'id': id_}, 'Sample service error code 20000 ' + f'Unauthorized: User {user} cannot administrate sample {id_}') def test_update_acls_fail_admin_permissions(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) for user, token in ((USER1, TOKEN1), (USER3, TOKEN3), (USER4, TOKEN4)): _update_acls_fail( url, token, {'id': id_, 'as_admin': 1}, f'Sample service error code 20000 Unauthorized: User {user} does not have the ' + 'necessary administration privileges to run method update_sample_acls') def test_update_acls_fail_bad_user(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _update_acls_fail( url, TOKEN1, {'id': id_, 'admin': [USER2, 'a'], 'write': [USER3], 'read': [USER4, 'philbin_j_montgomery_iii'], 'remove': ['someguy'] }, 'Sample service error code 50000 No such user: a, philbin_j_montgomery_iii, someguy') def test_update_acls_fail_user_2_acls(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _update_acls_fail( url, TOKEN1, {'id': id_, 'admin': [USER2], 'write': [USER3], 'read': [USER4, USER2], }, 'Sample service error code 30001 Illegal input parameter: User user2 appears in two ACLs') def test_update_acls_fail_user_in_acl_and_remove(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _update_acls_fail( url, TOKEN1, {'id': id_, 'admin': [USER2], 'write': [USER3], 'read': [USER4], 'remove': [USER2] }, 'Sample service error code 30001 Illegal input parameter: Users in the remove list ' + 'cannot be in any other ACL') def test_update_acls_fail_owner_in_another_acl(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _update_acls_fail( url, TOKEN1, {'id': id_, 'write': [USER1]}, 'Sample service error code 20000 Unauthorized: ' + 'ACLs for the sample owner user1 may not be modified by a delta update.') def test_update_acls_fail_owner_in_remove_acl(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN1) _update_acls_fail( url, TOKEN1, {'id': id_, 'remove': [USER1]}, 'Sample service error code 20000 Unauthorized: ' + 'ACLs for the sample owner user1 may not be modified by a delta update.') def _update_acls_fail(url, token, params, expected): ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.update_sample_acls', 'version': '1.1', 'id': '42', 'params': [params] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == expected def _update_acls(url, token, params, print_resp=False): ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.update_sample_acls', 'version': '1.1', 'id': '67', 'params': [params] }) if print_resp: print(ret.text) assert ret.ok is True assert ret.json() == {'version': '1.1', 'id': '67', 'result': None} def test_get_metadata_key_static_metadata(sample_port): _get_metadata_key_static_metadata( sample_port, {'keys': ['foo']}, {'foo': {'a': 'b', 'c': 'd'}}) _get_metadata_key_static_metadata( sample_port, {'keys': ['foo', 'stringlentest'], 'prefix': 0}, {'foo': {'a': 'b', 'c': 'd'}, 'stringlentest': {'h': 'i', 'j': 'k'}}) _get_metadata_key_static_metadata( sample_port, {'keys': ['pre'], 'prefix': 1}, {'pre': {'1': '2'}}) _get_metadata_key_static_metadata( sample_port, {'keys': ['premature'], 'prefix': 2}, {'pre': {'1': '2'}}) def _get_metadata_key_static_metadata(sample_port, params, expected): url = f'http://localhost:{sample_port}' ret = requests.post(url, json={ 'method': 'SampleService.get_metadata_key_static_metadata', 'version': '1.1', 'id': '67', 'params': [params] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0] == {'static_metadata': expected} def test_get_metadata_key_static_metadata_fail_bad_args(sample_port): _get_metadata_key_static_metadata_fail( sample_port, {}, 'Sample service error code 30001 Illegal input parameter: keys must be a list') _get_metadata_key_static_metadata_fail( sample_port, {'keys': ['foo', 'stringlentestage'], 'prefix': 0}, 'Sample service error code 30001 Illegal input parameter: No such metadata key: ' + 'stringlentestage') _get_metadata_key_static_metadata_fail( sample_port, {'keys': ['premature'], 'prefix': 1}, 'Sample service error code 30001 Illegal input parameter: No such prefix metadata key: ' + 'premature') _get_metadata_key_static_metadata_fail( sample_port, {'keys': ['somekey'], 'prefix': 2}, 'Sample service error code 30001 Illegal input parameter: No prefix metadata keys ' + 'matching key somekey') def _get_metadata_key_static_metadata_fail(sample_port, params, error): url = f'http://localhost:{sample_port}' ret = requests.post(url, json={ 'method': 'SampleService.get_metadata_key_static_metadata', 'version': '1.1', 'id': '67', 'params': [params] }) # print(ret.text) assert ret.status_code == 500 assert ret.json()['error']['message'] == error def _create_sample(url, token, sample, expected_version): ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.create_sample', 'version': '1.1', 'id': '67', 'params': [{'sample': sample}] }) # print(ret.text) assert ret.ok is True assert ret.json()['result'][0]['version'] == expected_version return ret.json()['result'][0]['id'] def _create_link(url, token, expected_user, params, print_resp=False): ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.create_data_link', 'version': '1.1', 'id': '42', 'params': [params] }) if print_resp: print(ret.text) assert ret.ok is True link = ret.json()['result'][0]['new_link'] id_ = link['linkid'] uuid.UUID(id_) # check the ID is a valid UUID del link['linkid'] created = link['created'] assert_ms_epoch_close_to_now(created) del link['created'] assert link == { 'id': params['id'], 'version': params['version'], 'node': params['node'], 'upa': params['upa'], 'dataid': params.get('dataid'), 'createdby': expected_user, 'expiredby': None, 'expired': None } return id_ def test_create_links_and_get_links_from_sample_basic(sample_port, workspace, kafka): ''' Also tests that the 'as_user' key is ignored if 'as_admin' is falsy. ''' _clear_kafka_messages(kafka) url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN3) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, {'name': 'baz', 'data': {}, 'type': 'Trivial.Object-1.0'}, {'name': 'baz', 'data': {}, 'type': 'Trivial.Object-1.0'} ]}) wscli.set_permissions({'id': 1, 'new_permission': 'w', 'users': [USER4]}) # create samples id1 = _create_sample( url, TOKEN3, {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate'}, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'} ] }, 1 ) id2 = _create_sample( url, TOKEN4, {'name': 'myothersample', 'node_tree': [{'id': 'root2', 'type': 'BioReplicate'}, {'id': 'foo2', 'type': 'TechReplicate', 'parent': 'root2'} ] }, 1 ) # ver 2 _create_sample( url, TOKEN4, {'id': id2, 'name': 'myothersample3', 'node_tree': [{'id': 'root3', 'type': 'BioReplicate'}, {'id': 'foo3', 'type': 'TechReplicate', 'parent': 'root3'} ] }, 2 ) # create links # as_user should be ignored unless as_admin is true lid1 = _create_link(url, TOKEN3, USER3, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/2/2', 'as_user': USER1}) lid2 = _create_link( url, TOKEN3, USER3, {'id': id1, 'version': 1, 'node': 'root', 'upa': '1/1/1', 'dataid': 'column1'}) lid3 = _create_link( url, TOKEN4, USER4, {'id': id2, 'version': 1, 'node': 'foo2', 'upa': '1/2/1', 'dataid': 'column2'}) # get links from sample 1 ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={ 'method': 'SampleService.get_data_links_from_sample', 'version': '1.1', 'id': '42', 'params': [{'id': id1, 'version': 1}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 assert len(ret.json()['result'][0]) == 2 assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time']) res = ret.json()['result'][0]['links'] expected_links = [ { 'linkid': lid1, 'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/2/2', 'dataid': None, 'createdby': USER3, 'expiredby': None, 'expired': None }, { 'linkid': lid2, 'id': id1, 'version': 1, 'node': 'root', 'upa': '1/1/1', 'dataid': 'column1', 'createdby': USER3, 'expiredby': None, 'expired': None } ] assert len(res) == len(expected_links) for link in res: assert_ms_epoch_close_to_now(link['created']) del link['created'] for link in expected_links: assert link in res # get links from sample 2 ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={ 'method': 'SampleService.get_data_links_from_sample', 'version': '1.1', 'id': '42', 'params': [{'id': id2, 'version': 1}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 assert len(ret.json()['result'][0]) == 2 assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time']) res = ret.json()['result'][0]['links'] assert_ms_epoch_close_to_now(res[0]['created']) del res[0]['created'] assert res == [ { 'linkid': lid3, 'id': id2, 'version': 1, 'node': 'foo2', 'upa': '1/2/1', 'dataid': 'column2', 'createdby': USER4, 'expiredby': None, 'expired': None } ] # get links from ver 2 of sample 2 ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={ 'method': 'SampleService.get_data_links_from_sample', 'version': '1.1', 'id': '42', 'params': [{'id': id2, 'version': 2}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 assert len(ret.json()['result'][0]) == 2 assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time']) assert ret.json()['result'][0]['links'] == [] _check_kafka_messages( kafka, [ {'event_type': 'NEW_SAMPLE', 'sample_id': id1, 'sample_ver': 1}, {'event_type': 'NEW_SAMPLE', 'sample_id': id2, 'sample_ver': 1}, {'event_type': 'NEW_SAMPLE', 'sample_id': id2, 'sample_ver': 2}, {'event_type': 'NEW_LINK', 'link_id': lid1}, {'event_type': 'NEW_LINK', 'link_id': lid2}, {'event_type': 'NEW_LINK', 'link_id': lid3}, ]) def test_update_and_get_links_from_sample(sample_port, workspace, kafka): ''' Also tests getting links from a sample using an effective time ''' _clear_kafka_messages(kafka) url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN3) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) wscli.set_permissions({'id': 1, 'new_permission': 'w', 'users': [USER4]}) # create samples id1 = _create_sample( url, TOKEN3, {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate'}, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'} ] }, 1 ) _replace_acls(url, id1, TOKEN3, {'admin': [USER4]}) # create links lid1 = _create_link(url, TOKEN3, USER3, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'}) oldlinkactive = datetime.datetime.now() time.sleep(1) # update link node lid2 = _create_link( url, TOKEN4, USER4, {'id': id1, 'version': 1, 'node': 'root', 'upa': '1/1/1', 'dataid': 'yay', 'update': 1}) # get current link ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={ 'method': 'SampleService.get_data_links_from_sample', 'version': '1.1', 'id': '42', 'params': [{'id': id1, 'version': 1}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 res = ret.json()['result'][0] assert len(res) == 2 assert_ms_epoch_close_to_now(res['effective_time']) del res['effective_time'] created = res['links'][0]['created'] assert_ms_epoch_close_to_now(created) del res['links'][0]['created'] assert res == {'links': [ { 'linkid': lid2, 'id': id1, 'version': 1, 'node': 'root', 'upa': '1/1/1', 'dataid': 'yay', 'createdby': USER4, 'expiredby': None, 'expired': None } ]} # get expired link ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={ 'method': 'SampleService.get_data_links_from_sample', 'version': '1.1', 'id': '42', 'params': [{ 'id': id1, 'version': 1, 'effective_time': round(oldlinkactive.timestamp() * 1000)}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 res = ret.json()['result'][0] assert res['links'][0]['expired'] == created - 1 assert_ms_epoch_close_to_now(res['links'][0]['created'] + 1000) del res['links'][0]['created'] del res['links'][0]['expired'] assert res == { 'effective_time': round(oldlinkactive.timestamp() * 1000), 'links': [ { 'linkid': lid1, 'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay', 'createdby': USER3, 'expiredby': USER4, } ]} _check_kafka_messages( kafka, [ {'event_type': 'NEW_SAMPLE', 'sample_id': id1, 'sample_ver': 1}, {'event_type': 'ACL_CHANGE', 'sample_id': id1}, {'event_type': 'NEW_LINK', 'link_id': lid1}, {'event_type': 'NEW_LINK', 'link_id': lid2}, {'event_type': 'EXPIRED_LINK', 'link_id': lid1}, ]) def test_create_data_link_as_admin(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN3) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) # create samples id1 = _create_sample( url, TOKEN3, {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate'}, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'} ] }, 1 ) # create links lid1 = _create_link( url, TOKEN2, USER2, {'id': id1, 'version': 1, 'node': 'root', 'upa': '1/1/1', 'dataid': 'yeet', 'as_admin': 1}) lid2 = _create_link( url, TOKEN2, USER4, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'as_admin': 1, 'as_user': f' {USER4} '}) # get link ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={ 'method': 'SampleService.get_data_links_from_sample', 'version': '1.1', 'id': '42', 'params': [{'id': id1, 'version': 1}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 assert len(ret.json()['result'][0]) == 2 assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time']) res = ret.json()['result'][0]['links'] expected_links = [ { 'linkid': lid1, 'id': id1, 'version': 1, 'node': 'root', 'upa': '1/1/1', 'dataid': 'yeet', 'createdby': USER2, 'expiredby': None, 'expired': None }, { 'linkid': lid2, 'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': None, 'createdby': USER4, 'expiredby': None, 'expired': None } ] assert len(res) == len(expected_links) for link in res: assert_ms_epoch_close_to_now(link['created']) del link['created'] for link in expected_links: assert link in res def test_get_links_from_sample_exclude_workspaces(sample_port, workspace): ''' Tests that unreadable workspaces are excluded from link results ''' url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli3 = Workspace(wsurl, token=TOKEN3) wscli4 = Workspace(wsurl, token=TOKEN4) # create workspace & objects wscli3.create_workspace({'workspace': 'foo'}) wscli3.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) wscli4.create_workspace({'workspace': 'bar'}) wscli4.save_objects({'id': 2, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) wscli4.set_permissions({'id': 2, 'new_permission': 'r', 'users': [USER3]}) wscli4.create_workspace({'workspace': 'baz'}) wscli4.save_objects({'id': 3, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) wscli4.set_global_permission({'id': 3, 'new_permission': 'r'}) wscli4.create_workspace({'workspace': 'bat'}) # unreadable wscli4.save_objects({'id': 4, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) # create sample id_ = _create_generic_sample(url, TOKEN3) _replace_acls(url, id_, TOKEN3, {'admin': [USER4]}) # create links lid1 = _create_link( url, TOKEN3, USER3, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'}) lid2 = _create_link( url, TOKEN4, USER4, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '2/1/1'}) lid3 = _create_link(url, TOKEN4, USER4, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '3/1/1', 'dataid': 'whee'}) _create_link( url, TOKEN4, USER4, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '4/1/1'}) # check correct links are returned ret = _get_links_from_sample(url, TOKEN3, {'id': id_, 'version': 1}) assert_ms_epoch_close_to_now(ret['effective_time']) res = ret['links'] expected_links = [ { 'linkid': lid1, 'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': None, 'createdby': USER3, 'expiredby': None, 'expired': None }, { 'linkid': lid2, 'id': id_, 'version': 1, 'node': 'foo', 'upa': '2/1/1', 'dataid': None, 'createdby': USER4, 'expiredby': None, 'expired': None }, { 'linkid': lid3, 'id': id_, 'version': 1, 'node': 'foo', 'upa': '3/1/1', 'dataid': 'whee', 'createdby': USER4, 'expiredby': None, 'expired': None } ] assert len(res) == len(expected_links) for link in res: assert_ms_epoch_close_to_now(link['created']) del link['created'] for link in expected_links: assert link in res # test with anon user _replace_acls(url, id_, TOKEN3, {'public_read': 1}) ret = _get_links_from_sample(url, None, {'id': id_, 'version': 1}) assert_ms_epoch_close_to_now(ret['effective_time']) res = ret['links'] expected_links = [ { 'linkid': lid3, 'id': id_, 'version': 1, 'node': 'foo', 'upa': '3/1/1', 'dataid': 'whee', 'createdby': USER4, 'expiredby': None, 'expired': None } ] assert len(res) == len(expected_links) for link in res: assert_ms_epoch_close_to_now(link['created']) del link['created'] for link in expected_links: assert link in res def _get_links_from_sample(url, token, params, print_resp=False): ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.get_data_links_from_sample', 'version': '1.1', 'id': '42', 'params': [params] }) if print_resp: print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 assert len(ret.json()['result'][0]) == 2 return ret.json()['result'][0] def test_get_links_from_sample_as_admin(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN4) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) # create sample id_ = _create_generic_sample(url, TOKEN4) # create links lid = _create_link(url, TOKEN4, USER4, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'}) # check correct links are returned, user 3 has read admin perms, but not full ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={ 'method': 'SampleService.get_data_links_from_sample', 'version': '1.1', 'id': '42', 'params': [{'id': id_, 'version': 1, 'as_admin': 1}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 assert len(ret.json()['result'][0]) == 2 assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time']) assert len(ret.json()['result'][0]['links']) == 1 link = ret.json()['result'][0]['links'][0] assert_ms_epoch_close_to_now(link['created']) del link['created'] assert link == { 'linkid': lid, 'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': None, 'createdby': USER4, 'expiredby': None, 'expired': None } def test_get_links_from_sample_public_read(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN1) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) wscli.set_global_permission({'id': 1, 'new_permission': 'r'}) # create sample id_ = _create_generic_sample(url, TOKEN1) # create links lid = _create_link(url, TOKEN1, USER1, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'}) _replace_acls(url, id_, TOKEN1, {'public_read': 1}) for token in [None, TOKEN4]: # anon user & user without explicit permission # check correct links are returned ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.get_data_links_from_sample', 'version': '1.1', 'id': '42', 'params': [{'id': id_, 'version': 1}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 assert len(ret.json()['result'][0]) == 2 assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time']) assert len(ret.json()['result'][0]['links']) == 1 link = ret.json()['result'][0]['links'][0] assert_ms_epoch_close_to_now(link['created']) del link['created'] assert link == { 'linkid': lid, 'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': None, 'createdby': USER1, 'expiredby': None, 'expired': None } def test_create_link_fail(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN3) id_ = _create_generic_sample(url, TOKEN3) _create_link_fail( sample_port, TOKEN3, {'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'}, 'Sample service error code 30000 Missing input parameter: id') _create_link_fail( sample_port, TOKEN3, {'id': id_, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'}, 'Sample service error code 30000 Missing input parameter: version') _create_link_fail( sample_port, TOKEN3, {'id': id_, 'version': 1, 'node': 'foo', 'upa': 'upalupa', 'dataid': 'yay'}, 'Sample service error code 30001 Illegal input parameter: upalupa is not a valid UPA') _create_link_fail( sample_port, TOKEN3, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'}, 'Sample service error code 50040 No such workspace data: No workspace with id 1 exists') wscli.create_workspace({'workspace': 'foo'}) _create_link_fail( sample_port, TOKEN3, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'}, 'Sample service error code 50040 No such workspace data: Object 1/1/1 does not exist') _replace_acls(url, id_, TOKEN3, {'write': [USER4]}) _create_link_fail( # fails if permission granted is admin sample_port, TOKEN4, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'}, 'Sample service error code 20000 Unauthorized: User user4 cannot ' + f'administrate sample {id_}') _replace_acls(url, id_, TOKEN3, {'admin': [USER4]}) wscli.set_permissions({'id': 1, 'new_permission': 'r', 'users': [USER4]}) _create_link_fail( # fails if permission granted is write sample_port, TOKEN4, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'}, 'Sample service error code 20000 Unauthorized: User user4 cannot write to upa 1/1/1') wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) _create_link_fail( sample_port, TOKEN3, {'id': id_, 'version': 1, 'node': 'fake', 'upa': '1/1/1'}, f'Sample service error code 50030 No such sample node: {id_} ver 1 fake') # admin tests _create_link_fail( sample_port, TOKEN2, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'as_admin': 1, 'as_user': 'foo\bbar'}, 'Sample service error code 30001 Illegal input parameter: ' + 'userid contains control characters') _create_link_fail( sample_port, TOKEN3, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'as_user': USER4, 'as_admin': 'f'}, 'Sample service error code 20000 Unauthorized: User user3 does not have ' + 'the necessary administration privileges to run method create_data_link') _create_link_fail( sample_port, TOKEN2, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'as_user': 'fake', 'as_admin': 'f'}, 'Sample service error code 50000 No such user: fake') def test_create_link_fail_link_exists(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN3) wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) id_ = _create_generic_sample(url, TOKEN3) _create_link(url, TOKEN3, USER3, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'}) _create_link_fail( sample_port, TOKEN3, {'id': id_, 'version': 1, 'node': 'root', 'upa': '1/1/1', 'dataid': 'yay'}, 'Sample service error code 60000 Data link exists for data ID: 1/1/1:yay') def _create_link_fail(sample_port, token, params, expected): url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.create_data_link', 'version': '1.1', 'id': '42', 'params': [params] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == expected def test_get_links_from_sample_fail(sample_port): url = f'http://localhost:{sample_port}' id_ = _create_generic_sample(url, TOKEN3) _get_link_from_sample_fail( sample_port, TOKEN3, {}, 'Sample service error code 30000 Missing input parameter: id') _get_link_from_sample_fail( sample_port, TOKEN3, {'id': id_}, 'Sample service error code 30000 Missing input parameter: version') _get_link_from_sample_fail( sample_port, TOKEN3, {'id': id_, 'version': 1, 'effective_time': 'foo'}, "Sample service error code 30001 Illegal input parameter: key 'effective_time' " + "value of 'foo' is not a valid epoch millisecond timestamp") _get_link_from_sample_fail( sample_port, TOKEN4, {'id': id_, 'version': 1}, f'Sample service error code 20000 Unauthorized: User user4 cannot read sample {id_}') _get_link_from_sample_fail( sample_port, None, {'id': id_, 'version': 1}, f'Sample service error code 20000 Unauthorized: Anonymous users cannot read sample {id_}') badid = uuid.uuid4() _get_link_from_sample_fail( sample_port, TOKEN3, {'id': str(badid), 'version': 1}, f'Sample service error code 50010 No such sample: {badid}') # admin tests _get_link_from_sample_fail( sample_port, TOKEN4, {'id': id_, 'version': 1, 'as_admin': 1}, 'Sample service error code 20000 Unauthorized: User user4 does not have the ' + 'necessary administration privileges to run method get_data_links_from_sample') _get_link_from_sample_fail( sample_port, None, {'id': id_, 'version': 1, 'as_admin': 1}, 'Sample service error code 20000 Unauthorized: Anonymous users ' + 'may not act as service administrators.') def _get_link_from_sample_fail(sample_port, token, params, expected): url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.get_data_links_from_sample', 'version': '1.1', 'id': '42', 'params': [params] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == expected def _get_current_epochmillis(): return round(datetime.datetime.now(tz=datetime.timezone.utc).timestamp() * 1000) def test_expire_data_link(sample_port, workspace, kafka): _expire_data_link(sample_port, workspace, None, kafka) def test_expire_data_link_with_data_id(sample_port, workspace, kafka): _expire_data_link(sample_port, workspace, 'whee', kafka) def _expire_data_link(sample_port, workspace, dataid, kafka): ''' also tests that 'as_user' is ignored if 'as_admin' is false ''' _clear_kafka_messages(kafka) url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN3) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) wscli.set_permissions({'id': 1, 'new_permission': 'w', 'users': [USER4]}) # create samples id1 = _create_sample( url, TOKEN3, {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate'}, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}, {'id': 'bar', 'type': 'TechReplicate', 'parent': 'root'} ] }, 1 ) _replace_acls(url, id1, TOKEN3, {'admin': [USER4]}) # create links lid1 = _create_link(url, TOKEN3, USER3, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': dataid}) lid2 = _create_link(url, TOKEN3, USER3, {'id': id1, 'version': 1, 'node': 'bar', 'upa': '1/1/1', 'dataid': 'fake'}) time.sleep(1) # need to be able to set a resonable effective time to fetch links # expire link ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={ 'method': 'SampleService.expire_data_link', 'version': '1.1', 'id': '42', 'params': [{'upa': '1/1/1', 'dataid': dataid, 'as_user': USER1}] }) # print(ret.text) assert ret.ok is True # check links ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={ 'method': 'SampleService.get_data_links_from_data', 'version': '1.1', 'id': '42', 'params': [{'upa': '1/1/1', 'effective_time': _get_current_epochmillis() - 500}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 assert len(ret.json()['result'][0]) == 2 assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time']) links = ret.json()['result'][0]['links'] assert len(links) == 2 for link in links: if link['dataid'] == 'fake': current_link = link else: expired_link = link assert_ms_epoch_close_to_now(expired_link['expired']) assert_ms_epoch_close_to_now(expired_link['created'] + 1000) del expired_link['created'] del expired_link['expired'] assert expired_link == { 'linkid': lid1, 'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': dataid, 'createdby': USER3, 'expiredby': USER4, } assert_ms_epoch_close_to_now(current_link['created'] + 1000) del current_link['created'] assert current_link == { 'linkid': lid2, 'id': id1, 'version': 1, 'node': 'bar', 'upa': '1/1/1', 'dataid': 'fake', 'createdby': USER3, 'expiredby': None, 'expired': None } _check_kafka_messages( kafka, [ {'event_type': 'NEW_SAMPLE', 'sample_id': id1, 'sample_ver': 1}, {'event_type': 'ACL_CHANGE', 'sample_id': id1}, {'event_type': 'NEW_LINK', 'link_id': lid1}, {'event_type': 'NEW_LINK', 'link_id': lid2}, {'event_type': 'EXPIRED_LINK', 'link_id': lid1}, ]) def test_expire_data_link_as_admin(sample_port, workspace, kafka): _expire_data_link_as_admin(sample_port, workspace, None, USER2, kafka) def test_expire_data_link_as_admin_impersonate_user(sample_port, workspace, kafka): _expire_data_link_as_admin(sample_port, workspace, USER4, USER4, kafka) def _expire_data_link_as_admin(sample_port, workspace, user, expected_user, kafka): _clear_kafka_messages(kafka) url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN3) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) wscli.set_permissions({'id': 1, 'new_permission': 'w', 'users': [USER4]}) # create samples id1 = _create_sample( url, TOKEN3, {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate'}, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'}, {'id': 'bar', 'type': 'TechReplicate', 'parent': 'root'} ] }, 1 ) # create links lid = _create_link(url, TOKEN3, USER3, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'duidy'}) time.sleep(1) # need to be able to set a resonable effective time to fetch links # expire link ret = requests.post(url, headers=get_authorized_headers(TOKEN2), json={ 'method': 'SampleService.expire_data_link', 'version': '1.1', 'id': '42', 'params': [{'upa': '1/1/1', 'dataid': 'duidy', 'as_admin': 1, 'as_user': user}] }) # print(ret.text) assert ret.ok is True # check links ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={ 'method': 'SampleService.get_data_links_from_data', 'version': '1.1', 'id': '42', 'params': [{'upa': '1/1/1', 'effective_time': _get_current_epochmillis() - 500}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 assert len(ret.json()['result'][0]) == 2 assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time']) links = ret.json()['result'][0]['links'] assert len(links) == 1 link = links[0] assert_ms_epoch_close_to_now(link['expired']) assert_ms_epoch_close_to_now(link['created'] + 1000) del link['created'] del link['expired'] assert link == { 'linkid': lid, 'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'duidy', 'createdby': USER3, 'expiredby': expected_user, } _check_kafka_messages( kafka, [ {'event_type': 'NEW_SAMPLE', 'sample_id': id1, 'sample_ver': 1}, {'event_type': 'NEW_LINK', 'link_id': lid}, {'event_type': 'EXPIRED_LINK', 'link_id': lid}, ]) def test_expire_data_link_fail(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN3) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) # create samples id1 = _create_sample( url, TOKEN3, {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate'}, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'} ] }, 1 ) # create links _create_link(url, TOKEN3, USER3, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'}) _expire_data_link_fail( sample_port, TOKEN3, {}, 'Sample service error code 30000 Missing input parameter: upa') _expire_data_link_fail( sample_port, TOKEN3, {'upa': '1/0/1'}, 'Sample service error code 30001 Illegal input parameter: 1/0/1 is not a valid UPA') _expire_data_link_fail( sample_port, TOKEN3, {'upa': '1/1/1', 'dataid': 'foo\nbar'}, 'Sample service error code 30001 Illegal input parameter: ' + 'dataid contains control characters') _expire_data_link_fail( sample_port, TOKEN4, {'upa': '1/1/1', 'dataid': 'yay'}, 'Sample service error code 20000 Unauthorized: User user4 cannot write to workspace 1') wscli.delete_workspace({'id': 1}) _expire_data_link_fail( sample_port, TOKEN3, {'upa': '1/1/1', 'dataid': 'yay'}, 'Sample service error code 50040 No such workspace data: Workspace 1 is deleted') wsadmin = Workspace(wsurl, token=TOKEN_WS_FULL_ADMIN) wsadmin.administer({'command': 'undeleteWorkspace', 'params': {'id': 1}}) _expire_data_link_fail( sample_port, TOKEN3, {'upa': '1/1/2', 'dataid': 'yay'}, 'Sample service error code 50050 No such data link: 1/1/2:yay') _expire_data_link_fail( sample_port, TOKEN3, {'upa': '1/1/1', 'dataid': 'yee'}, 'Sample service error code 50050 No such data link: 1/1/1:yee') wscli.set_permissions({'id': 1, 'new_permission': 'w', 'users': [USER4]}) _expire_data_link_fail( sample_port, TOKEN4, {'upa': '1/1/1', 'dataid': 'yay'}, 'Sample service error code 20000 Unauthorized: User user4 cannot ' + f'administrate sample {id1}') # admin tests _expire_data_link_fail( sample_port, TOKEN2, {'upa': '1/1/1', 'dataid': 'yay', 'as_admin': ['t'], 'as_user': 'foo\tbar'}, 'Sample service error code 30001 Illegal input parameter: ' + 'userid contains control characters') _expire_data_link_fail( sample_port, TOKEN3, {'upa': '1/1/1', 'dataid': 'yay', 'as_admin': ['t'], 'as_user': USER4}, 'Sample service error code 20000 Unauthorized: User user3 does not have ' + 'the necessary administration privileges to run method expire_data_link') _expire_data_link_fail( sample_port, TOKEN2, {'upa': '1/1/1', 'dataid': 'yay', 'as_admin': ['t'], 'as_user': 'fake'}, 'Sample service error code 50000 No such user: fake') def _expire_data_link_fail(sample_port, token, params, expected): _request_fail(sample_port, 'expire_data_link', token, params, expected) def _request_fail(sample_port, method, token, params, expected): url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.' + method, 'version': '1.1', 'id': '42', 'params': [params] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == expected def test_get_links_from_data(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN3) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, {'name': 'baz', 'data': {}, 'type': 'Trivial.Object-1.0'}, {'name': 'baz', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) wscli.set_permissions({'id': 1, 'new_permission': 'w', 'users': [USER4]}) # create samples id1 = _create_sample( url, TOKEN3, {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate'}, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'} ] }, 1 ) id2 = _create_sample( url, TOKEN4, {'name': 'myothersample', 'node_tree': [{'id': 'root2', 'type': 'BioReplicate'}, {'id': 'foo2', 'type': 'TechReplicate', 'parent': 'root2'} ] }, 1 ) # ver 2 _create_sample( url, TOKEN4, {'id': id2, 'name': 'myothersample3', 'node_tree': [{'id': 'root3', 'type': 'BioReplicate'}, {'id': 'foo3', 'type': 'TechReplicate', 'parent': 'root3'} ] }, 2 ) # create links lid1 = _create_link( url, TOKEN3, USER3, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/2/2'}) lid2 = _create_link( url, TOKEN4, USER4, {'id': id2, 'version': 1, 'node': 'root2', 'upa': '1/1/1', 'dataid': 'column1'}) lid3 = _create_link( url, TOKEN4, USER4, {'id': id2, 'version': 2, 'node': 'foo3', 'upa': '1/2/2', 'dataid': 'column2'}) # get links from object 1/2/2 ret = _get_links_from_data(url, TOKEN3, {'upa': '1/2/2'}) assert_ms_epoch_close_to_now(ret['effective_time']) res = ret['links'] expected_links = [ { 'linkid': lid1, 'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/2/2', 'dataid': None, 'createdby': USER3, 'expiredby': None, 'expired': None }, { 'linkid': lid3, 'id': id2, 'version': 2, 'node': 'foo3', 'upa': '1/2/2', 'dataid': 'column2', 'createdby': USER4, 'expiredby': None, 'expired': None } ] assert len(res) == len(expected_links) for link in res: assert_ms_epoch_close_to_now(link['created']) del link['created'] for link in expected_links: assert link in res # get links from object 1/1/1 ret = _get_links_from_data(url, TOKEN3, {'upa': '1/1/1'}) assert_ms_epoch_close_to_now(ret['effective_time']) res = ret['links'] assert_ms_epoch_close_to_now(res[0]['created']) del res[0]['created'] assert res == [ { 'linkid': lid2, 'id': id2, 'version': 1, 'node': 'root2', 'upa': '1/1/1', 'dataid': 'column1', 'createdby': USER4, 'expiredby': None, 'expired': None } ] # get links from object 1/2/1 ret = _get_links_from_data(url, TOKEN3, {'upa': '1/2/1'}) assert_ms_epoch_close_to_now(ret['effective_time']) assert ret['links'] == [] def _get_links_from_data(url, token, params, print_resp=False): ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.get_data_links_from_data', 'version': '1.1', 'id': '42', 'params': [params] }) if print_resp: print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 assert len(ret.json()['result'][0]) == 2 return ret.json()['result'][0] def test_get_links_from_data_expired(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN3) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) wscli.set_permissions({'id': 1, 'new_permission': 'w', 'users': [USER4]}) # create samples id1 = _create_sample( url, TOKEN3, {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate'}, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'} ] }, 1 ) _replace_acls(url, id1, TOKEN3, {'admin': [USER4]}) # create links lid1 = _create_link(url, TOKEN3, USER3, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'}) oldlinkactive = datetime.datetime.now() time.sleep(1) # update link node lid2 = _create_link(url, TOKEN4, USER4, { 'id': id1, 'version': 1, 'node': 'root', 'upa': '1/1/1', 'dataid': 'yay', 'update': 1 }) # get current link ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={ 'method': 'SampleService.get_data_links_from_data', 'version': '1.1', 'id': '42', 'params': [{'upa': '1/1/1'}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 res = ret.json()['result'][0] assert len(res) == 2 assert_ms_epoch_close_to_now(res['effective_time']) del res['effective_time'] created = res['links'][0]['created'] assert_ms_epoch_close_to_now(created) del res['links'][0]['created'] assert res == {'links': [ { 'linkid': lid2, 'id': id1, 'version': 1, 'node': 'root', 'upa': '1/1/1', 'dataid': 'yay', 'createdby': USER4, 'expiredby': None, 'expired': None } ]} # get expired link ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={ 'method': 'SampleService.get_data_links_from_data', 'version': '1.1', 'id': '42', 'params': [{ 'upa': '1/1/1', 'effective_time': round(oldlinkactive.timestamp() * 1000)}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 res = ret.json()['result'][0] assert res['links'][0]['expired'] == created - 1 assert_ms_epoch_close_to_now(res['links'][0]['created'] + 1000) del res['links'][0]['created'] del res['links'][0]['expired'] assert res == { 'effective_time': round(oldlinkactive.timestamp() * 1000), 'links': [ { 'linkid': lid1, 'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay', 'createdby': USER3, 'expiredby': USER4, } ]} def test_get_links_from_data_public_read(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN1) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) wscli.set_global_permission({'id': 1, 'new_permission': 'r'}) # create samples id_ = _create_generic_sample(url, TOKEN1) # create links lid = _create_link(url, TOKEN1, USER1, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'}) for token in [None, TOKEN4]: # anon user, user 4 has no explicit perms ret = _get_links_from_data(url, token, {'upa': '1/1/1'}) assert_ms_epoch_close_to_now(ret['effective_time']) assert len(ret['links']) == 1 link = ret['links'][0] assert_ms_epoch_close_to_now(link['created']) del link['created'] assert link == { 'linkid': lid, 'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': None, 'createdby': USER1, 'expiredby': None, 'expired': None } def test_get_links_from_data_as_admin(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN4) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) # create samples id1 = _create_sample( url, TOKEN4, {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate'}, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'} ] }, 1 ) # create links lid = _create_link(url, TOKEN4, USER4, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1'}) # get links from object, user 3 has admin read perms ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={ 'method': 'SampleService.get_data_links_from_data', 'version': '1.1', 'id': '42', 'params': [{'upa': '1/1/1', 'as_admin': 1}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 assert len(ret.json()['result'][0]) == 2 assert_ms_epoch_close_to_now(ret.json()['result'][0]['effective_time']) assert len(ret.json()['result'][0]['links']) == 1 link = ret.json()['result'][0]['links'][0] assert_ms_epoch_close_to_now(link['created']) del link['created'] assert link == { 'linkid': lid, 'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': None, 'createdby': USER4, 'expiredby': None, 'expired': None } def test_get_links_from_data_fail(sample_port, workspace): wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN3) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) _get_link_from_data_fail( sample_port, TOKEN3, {}, 'Sample service error code 30000 Missing input parameter: upa') _get_link_from_data_fail( sample_port, TOKEN3, {'upa': '1/1/1', 'effective_time': 'foo'}, "Sample service error code 30001 Illegal input parameter: key 'effective_time' " + "value of 'foo' is not a valid epoch millisecond timestamp") _get_link_from_data_fail( sample_port, TOKEN4, {'upa': '1/1/1'}, 'Sample service error code 20000 Unauthorized: User user4 cannot read upa 1/1/1') _get_link_from_data_fail( sample_port, None, {'upa': '1/1/1'}, 'Sample service error code 20000 Unauthorized: Anonymous users cannot read upa 1/1/1') _get_link_from_data_fail( sample_port, TOKEN3, {'upa': '1/2/1'}, 'Sample service error code 50040 No such workspace data: Object 1/2/1 does not exist') # admin tests (also tests missing / deleted objects) _get_link_from_data_fail( sample_port, TOKEN4, {'upa': '1/1/1', 'as_admin': 1}, 'Sample service error code 20000 Unauthorized: User user4 does not have the necessary ' + 'administration privileges to run method get_data_links_from_data') _get_link_from_data_fail( sample_port, None, {'upa': '1/1/1', 'as_admin': 1}, 'Sample service error code 20000 Unauthorized: Anonymous users may not act ' + 'as service administrators.') _get_link_from_data_fail( sample_port, TOKEN3, {'upa': '1/1/2', 'as_admin': 1}, 'Sample service error code 50040 No such workspace data: Object 1/1/2 does not exist') _get_link_from_data_fail( sample_port, TOKEN3, {'upa': '2/1/1', 'as_admin': 1}, 'Sample service error code 50040 No such workspace data: No workspace with id 2 exists') wscli.delete_objects([{'ref': '1/1'}]) _get_link_from_data_fail( sample_port, TOKEN3, {'upa': '1/1/1', 'as_admin': 1}, 'Sample service error code 50040 No such workspace data: Object 1/1/1 does not exist') wscli.delete_workspace({'id': 1}) _get_link_from_data_fail( sample_port, TOKEN3, {'upa': '1/1/1', 'as_admin': 1}, 'Sample service error code 50040 No such workspace data: Workspace 1 is deleted') def _get_link_from_data_fail(sample_port, token, params, expected): url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.get_data_links_from_data', 'version': '1.1', 'id': '42', 'params': [params] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == expected def test_get_sample_via_data(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN3) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) wscli.set_permissions({'id': 1, 'new_permission': 'r', 'users': [USER4]}) # create samples id1 = _create_sample( url, TOKEN3, {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate', 'meta_user': {'a': {'b': 'f', 'e': 'g'}, 'c': {'d': 'h'}}, 'meta_controlled': {'foo': {'bar': 'baz'}, 'premature': {'e': 'fakeout'}}, 'source_meta': [{'key': 'foo', 'skey': 'b', 'svalue': {'x': 'y'}}] }, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'} ] }, 1 ) id2 = _create_sample( url, TOKEN3, {'name': 'unused', 'node_tree': [{'id': 'unused', 'type': 'BioReplicate'}]}, 1 ) # ver 2 _create_sample( url, TOKEN3, {'id': id2, 'name': 'myothersample3', 'node_tree': [{'id': 'root3', 'type': 'BioReplicate'}, {'id': 'foo3', 'type': 'TechReplicate', 'parent': 'root3'} ] }, 2 ) # create links _create_link(url, TOKEN3, USER3, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1'}) _create_link( url, TOKEN3, USER3, {'id': id2, 'version': 2, 'node': 'root3', 'upa': '1/1/1', 'dataid': 'column1'}) # get first sample via link from object 1/1/1 using a token that has no access ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={ 'method': 'SampleService.get_sample_via_data', 'version': '1.1', 'id': '42', 'params': [{'upa': '1/1/1', 'id': str(id1), 'version': 1}] }) # print(ret.text) assert ret.ok is True res = ret.json()['result'][0] assert_ms_epoch_close_to_now(res['save_date']) del res['save_date'] expected = { 'id': id1, 'version': 1, 'name': 'mysample', 'user': USER3, 'node_tree': [{'id': 'root', 'type': 'BioReplicate', 'parent': None, 'meta_user': {'a': {'b': 'f', 'e': 'g'}, 'c': {'d': 'h'}}, 'meta_controlled': {'foo': {'bar': 'baz'}, 'premature': {'e': 'fakeout'}}, 'source_meta': [{'key': 'foo', 'skey': 'b', 'svalue': {'x': 'y'}}], }, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root', 'meta_controlled': {}, 'meta_user': {}, 'source_meta': [], }, ] } assert res == expected # get second sample via link from object 1/1/1 using a token that has no access ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={ 'method': 'SampleService.get_sample_via_data', 'version': '1.1', 'id': '42', 'params': [{'upa': '1/1/1', 'id': str(id2), 'version': 2}] }) # print(ret.text) assert ret.ok is True res = ret.json()['result'][0] assert_ms_epoch_close_to_now(res['save_date']) del res['save_date'] expected = { 'id': id2, 'version': 2, 'name': 'myothersample3', 'user': USER3, 'node_tree': [{'id': 'root3', 'type': 'BioReplicate', 'parent': None, 'meta_controlled': {}, 'meta_user': {}, 'source_meta': [], }, {'id': 'foo3', 'type': 'TechReplicate', 'parent': 'root3', 'meta_controlled': {}, 'meta_user': {}, 'source_meta': [], }, ] } assert res == expected def test_get_sample_via_data_expired_with_anon_user(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN3) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) wscli.set_global_permission({'id': 1, 'new_permission': 'r'}) # create samples id1 = _create_sample( url, TOKEN3, {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate'}, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'} ] }, 1 ) id2 = _create_sample( url, TOKEN3, {'name': 'myothersample', 'node_tree': [{'id': 'root2', 'type': 'BioReplicate'}, {'id': 'foo2', 'type': 'TechReplicate', 'parent': 'root2'} ] }, 1 ) # create links _create_link(url, TOKEN3, USER3, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'}) # update link node _create_link(url, TOKEN3, USER3, { 'id': id2, 'version': 1, 'node': 'root2', 'upa': '1/1/1', 'dataid': 'yay', 'update': 1, }) # pulled link from server to check the old link was expired # get sample via current link ret = requests.post(url, headers=get_authorized_headers(None), json={ 'method': 'SampleService.get_sample_via_data', 'version': '1.1', 'id': '42', 'params': [{'upa': '1/1/1', 'id': str(id2), 'version': 1}] }) # print(ret.text) assert ret.ok is True res = ret.json()['result'][0] assert_ms_epoch_close_to_now(res['save_date']) del res['save_date'] expected = { 'id': id2, 'version': 1, 'name': 'myothersample', 'user': USER3, 'node_tree': [{'id': 'root2', 'type': 'BioReplicate', 'parent': None, 'meta_user': {}, 'meta_controlled': {}, 'source_meta': [], }, {'id': 'foo2', 'type': 'TechReplicate', 'parent': 'root2', 'meta_controlled': {}, 'meta_user': {}, 'source_meta': [], }, ] } assert res == expected # get sample via expired link ret = requests.post(url, headers=get_authorized_headers(None), json={ 'method': 'SampleService.get_sample_via_data', 'version': '1.1', 'id': '42', 'params': [{'upa': '1/1/1', 'id': str(id1), 'version': 1}] }) # print(ret.text) assert ret.ok is True res = ret.json()['result'][0] assert_ms_epoch_close_to_now(res['save_date']) del res['save_date'] expected = { 'id': id1, 'version': 1, 'name': 'mysample', 'user': USER3, 'node_tree': [{'id': 'root', 'type': 'BioReplicate', 'parent': None, 'meta_user': {}, 'meta_controlled': {}, 'source_meta': [], }, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root', 'meta_controlled': {}, 'meta_user': {}, 'source_meta': [], }, ] } assert res == expected def test_get_sample_via_data_public_read(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN1) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) wscli.set_global_permission({'id': 1, 'new_permission': 'r'}) # create samples id_ = _create_generic_sample(url, TOKEN1) # create links _create_link(url, TOKEN1, USER1, {'id': id_, 'version': 1, 'node': 'foo', 'upa': '1/1/1'}) # get sample via link from object 1/1/1 using a token that has no explicit access ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={ 'method': 'SampleService.get_sample_via_data', 'version': '1.1', 'id': '42', 'params': [{'upa': '1/1/1', 'id': str(id_), 'version': 1}] }) # print(ret.text) assert ret.ok is True res = ret.json()['result'][0] assert_ms_epoch_close_to_now(res['save_date']) del res['save_date'] expected = { 'id': id_, 'version': 1, 'name': 'mysample', 'user': USER1, 'node_tree': [{'id': 'root', 'type': 'BioReplicate', 'parent': None, 'meta_user': {}, 'meta_controlled': {}, 'source_meta': [], }, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root', 'meta_controlled': {}, 'meta_user': {}, 'source_meta': [], }, ] } assert res == expected def test_get_sample_via_data_fail(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN3) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) # create samples id1 = _create_sample( url, TOKEN3, {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate'}, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'} ] }, 1 ) # create links _create_link(url, TOKEN3, USER3, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'}) _get_sample_via_data_fail( sample_port, TOKEN3, {}, 'Sample service error code 30000 Missing input parameter: upa') _get_sample_via_data_fail( sample_port, TOKEN3, {'upa': '1/1/1'}, 'Sample service error code 30000 Missing input parameter: id') _get_sample_via_data_fail( sample_port, TOKEN3, {'upa': '1/1/1', 'id': id1}, 'Sample service error code 30000 Missing input parameter: version') _get_sample_via_data_fail( sample_port, TOKEN4, {'upa': '1/1/1', 'id': id1, 'version': 1}, 'Sample service error code 20000 Unauthorized: User user4 cannot read upa 1/1/1') _get_sample_via_data_fail( sample_port, None, {'upa': '1/1/1', 'id': id1, 'version': 1}, 'Sample service error code 20000 Unauthorized: Anonymous users cannot read upa 1/1/1') _get_sample_via_data_fail( sample_port, TOKEN3, {'upa': '1/2/1', 'id': id1, 'version': 1}, 'Sample service error code 50040 No such workspace data: Object 1/2/1 does not exist') badid = uuid.uuid4() _get_sample_via_data_fail( sample_port, TOKEN3, {'upa': '1/1/1', 'id': str(badid), 'version': 1}, 'Sample service error code 50050 No such data link: There is no link from UPA 1/1/1 ' + f'to sample {badid}') _get_sample_via_data_fail( sample_port, TOKEN3, {'upa': '1/1/1', 'id': str(id1), 'version': 2}, f'Sample service error code 50020 No such sample version: {id1} ver 2') def _get_sample_via_data_fail(sample_port, token, params, expected): # could make a single method that just takes the service method name to DRY things up a bit url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.get_sample_via_data', 'version': '1.1', 'id': '42', 'params': [params] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == expected def test_get_data_link(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN4) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) # create samples id1 = _create_sample( url, TOKEN4, {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate'}, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'} ] }, 1 ) # create link lid = _create_link(url, TOKEN4, USER4, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'}) # get link, user 3 has admin read perms ret = requests.post(url, headers=get_authorized_headers(TOKEN3), json={ 'method': 'SampleService.get_data_link', 'version': '1.1', 'id': '42', 'params': [{'linkid': lid}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 link = ret.json()['result'][0] created = link.pop('created') assert_ms_epoch_close_to_now(created) assert link == { 'linkid': lid, 'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay', 'createdby': USER4, 'expiredby': None, 'expired': None } # expire link ret = requests.post(url, headers=get_authorized_headers(TOKEN4), json={ 'method': 'SampleService.expire_data_link', 'version': '1.1', 'id': '42', 'params': [{'upa': '1/1/1', 'dataid': 'yay'}] }) # print(ret.text) assert ret.ok is True # get link, user 5 has full perms ret = requests.post(url, headers=get_authorized_headers(TOKEN5), json={ 'method': 'SampleService.get_data_link', 'version': '1.1', 'id': '42', 'params': [{'linkid': lid}] }) # print(ret.text) assert ret.ok is True assert len(ret.json()['result']) == 1 link = ret.json()['result'][0] assert_ms_epoch_close_to_now(link['expired']) del link['expired'] assert link == { 'linkid': lid, 'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay', 'created': created, 'createdby': USER4, 'expiredby': USER4, } def test_get_data_link_fail(sample_port, workspace): url = f'http://localhost:{sample_port}' wsurl = f'http://localhost:{workspace.port}' wscli = Workspace(wsurl, token=TOKEN4) # create workspace & objects wscli.create_workspace({'workspace': 'foo'}) wscli.save_objects({'id': 1, 'objects': [ {'name': 'bar', 'data': {}, 'type': 'Trivial.Object-1.0'}, ]}) # create samples id1 = _create_sample( url, TOKEN4, {'name': 'mysample', 'node_tree': [{'id': 'root', 'type': 'BioReplicate'}, {'id': 'foo', 'type': 'TechReplicate', 'parent': 'root'} ] }, 1 ) # create link lid = _create_link(url, TOKEN4, USER4, {'id': id1, 'version': 1, 'node': 'foo', 'upa': '1/1/1', 'dataid': 'yay'}) _get_data_link_fail( sample_port, TOKEN3, {}, 'Sample service error code 30000 Missing input parameter: linkid') _get_data_link_fail( sample_port, TOKEN4, {'linkid': lid}, 'Sample service error code 20000 Unauthorized: User user4 does not have the necessary ' + 'administration privileges to run method get_data_link') oid = uuid.uuid4() _get_data_link_fail( sample_port, TOKEN3, {'linkid': str(oid)}, f'Sample service error code 50050 No such data link: {oid}') def _get_data_link_fail(sample_port, token, params, expected): # could make a single method that just takes the service method name to DRY things up a bit url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(token), json={ 'method': 'SampleService.get_data_link', 'version': '1.1', 'id': '42', 'params': [params] }) assert ret.status_code == 500 assert ret.json()['error']['message'] == expected # ########################### # Auth user lookup tests # ########################### # for some reason having sample_port along with auth in the test fn args prevents a tear down # error, not quite sure why def test_user_lookup_build_fail_bad_args(): _user_lookup_build_fail( '', 'foo', ValueError('auth_url cannot be a value that evaluates to false')) _user_lookup_build_fail( 'http://foo.com', '', ValueError('auth_token cannot be a value that evaluates to false')) def test_user_lookup_build_fail_bad_token(sample_port, auth): _user_lookup_build_fail( f'http://localhost:{auth.port}/testmode', 'tokentokentoken!', InvalidTokenError('KBase auth server reported token is invalid.')) def test_user_lookup_build_fail_bad_auth_url(sample_port, auth): _user_lookup_build_fail( f'http://localhost:{auth.port}/testmode/foo', TOKEN1, IOError('Error from KBase auth server: HTTP 404 Not Found')) def test_user_lookup_build_fail_not_auth_url(auth): _user_lookup_build_fail( 'https://httpbin.org/status/404', TOKEN1, IOError('Non-JSON response from KBase auth server, status code: 404')) def _user_lookup_build_fail(url, token, expected): with raises(Exception) as got: KBaseUserLookup(url, token) assert_exception_correct(got.value, expected) def test_user_lookup(sample_port, auth): ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode', TOKEN1) assert ul.invalid_users([]) == [] assert ul.invalid_users([UserID(USER1), UserID(USER2), UserID(USER3)]) == [] def test_user_lookup_cache(sample_port, auth): ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode', TOKEN1) assert ul._valid_cache.get(USER1, default=False) is False assert ul._valid_cache.get(USER2, default=False) is False ul.invalid_users([UserID(USER1)]) assert ul._valid_cache.get(USER1, default=False) is True assert ul._valid_cache.get(USER2, default=False) is False def test_user_lookup_bad_users(sample_port, auth): ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode/', TOKEN1) assert ul.invalid_users( [UserID('nouserhere'), UserID(USER1), UserID(USER2), UserID('whooptydoo'), UserID(USER3)]) == [UserID('nouserhere'), UserID('whooptydoo')] def test_user_lookup_fail_bad_args(sample_port, auth): ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode/', TOKEN1) _user_lookup_fail(ul, None, ValueError('usernames cannot be None')) _user_lookup_fail(ul, [UserID('foo'), UserID('bar'), None], ValueError( 'Index 2 of iterable usernames cannot be a value that evaluates to false')) def test_user_lookup_fail_bad_username(sample_port, auth): ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode/', TOKEN1) # maybe possibly this error should be shortened # definitely clear the user name is illegal though, there's no question about that _user_lookup_fail(ul, [UserID('1')], InvalidUserError( 'The KBase auth server is being very assertive about one of the usernames being ' + 'illegal: 30010 Illegal user name: Illegal user name [1]: 30010 Illegal user name: ' + 'Username must start with a letter')) def _user_lookup_fail(userlookup, users, expected): with raises(Exception) as got: userlookup.invalid_users(users) assert_exception_correct(got.value, expected) def test_is_admin(sample_port, auth): n = AdminPermission.NONE r = AdminPermission.READ f = AdminPermission.FULL _check_is_admin(auth.port, [n, n, n, n]) _check_is_admin(auth.port, [f, f, n, n], ['fulladmin1']) _check_is_admin(auth.port, [n, f, n, n], ['fulladmin2']) _check_is_admin(auth.port, [n, n, r, n], None, ['readadmin1']) _check_is_admin(auth.port, [n, r, n, n], None, ['readadmin2']) _check_is_admin(auth.port, [n, f, n, n], ['fulladmin2'], ['readadmin2']) _check_is_admin(auth.port, [n, f, r, n], ['fulladmin2'], ['readadmin1']) def _check_is_admin(port, results, full_roles=None, read_roles=None): ul = KBaseUserLookup( f'http://localhost:{port}/testmode/', TOKEN_SERVICE, full_roles, read_roles) for t, u, r in zip([TOKEN1, TOKEN2, TOKEN3, TOKEN4], [USER1, USER2, USER3, USER4], results): assert ul.is_admin(t) == (r, u) def test_is_admin_cache(sample_port, auth): ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode/', TOKEN_SERVICE) assert ul._admin_cache.get(TOKEN1, default=False) is False assert ul._admin_cache.get(TOKEN2, default=False) is False ul.is_admin(TOKEN1) assert ul._admin_cache.get(TOKEN1, default=False) is not False assert ul._admin_cache.get(TOKEN2, default=False) is False def test_is_admin_fail_bad_input(sample_port, auth): ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode/', TOKEN_SERVICE) _is_admin_fail(ul, None, ValueError('token cannot be a value that evaluates to false')) _is_admin_fail(ul, '', ValueError('token cannot be a value that evaluates to false')) def test_is_admin_fail_bad_token(sample_port, auth): ul = KBaseUserLookup(f'http://localhost:{auth.port}/testmode/', TOKEN_SERVICE) _is_admin_fail(ul, 'bad token here', InvalidTokenError( 'KBase auth server reported token is invalid.')) def _is_admin_fail(userlookup, user, expected): with raises(Exception) as got: userlookup.is_admin(user) assert_exception_correct(got.value, expected) # ########################### # Workspace wrapper tests # ########################### def test_workspace_wrapper_has_permission(sample_port, workspace): url = f'http://localhost:{workspace.port}' wscli = Workspace(url, token=TOKEN_WS_READ_ADMIN) ws = WS(wscli) wscli2 = Workspace(url, token=TOKEN2) wscli2.create_workspace({'workspace': 'foo'}) wscli2.save_objects({'id': 1, 'objects': [{'name': 'bar', 'type': 'Trivial.Object-1.0', 'data': {}}]}) wscli2.save_objects({'id': 1, 'objects': [{'name': 'foo', 'type': 'Trivial.Object-1.0', 'data': {}}]}) wscli2.save_objects({'id': 1, 'objects': [{'name': 'foo', 'type': 'Trivial.Object-1.0', 'data': {}}]}) ws.has_permission(UserID(USER2), WorkspaceAccessType.ADMIN, 1) # Shouldn't fail ws.has_permission(UserID(USER2), WorkspaceAccessType.ADMIN, upa=UPA('1/2/2')) # Shouldn't fail def test_workspace_wrapper_has_permission_fail_bad_args(sample_port, workspace): url = f'http://localhost:{workspace.port}' wscli2 = Workspace(url, token=TOKEN2) wscli2.create_workspace({'workspace': 'foo'}) wscli2.save_objects({'id': 1, 'objects': [{'name': 'bar', 'type': 'Trivial.Object-1.0', 'data': {}}]}) wscli2.save_objects({'id': 1, 'objects': [{'name': 'foo', 'type': 'Trivial.Object-1.0', 'data': {}}]}) _workspace_wrapper_has_permission_fail( workspace.port, UserID(USER1), 1, None, UnauthorizedError( 'User user1 cannot read workspace 1')) _workspace_wrapper_has_permission_fail( workspace.port, UserID(USER1), None, UPA('1/2/1'), UnauthorizedError('User user1 cannot read upa 1/2/1')) _workspace_wrapper_has_permission_fail( workspace.port, UserID('fakeuser'), 1, None, UnauthorizedError( 'User fakeuser cannot read workspace 1')) _workspace_wrapper_has_permission_fail( workspace.port, UserID('fakeuser'), None, UPA('1/2/1'), UnauthorizedError('User fakeuser cannot read upa 1/2/1')) _workspace_wrapper_has_permission_fail( workspace.port, UserID(USER2), 2, None, NoSuchWorkspaceDataError('No workspace with id 2 exists')) _workspace_wrapper_has_permission_fail( workspace.port, UserID(USER2), None, UPA('2/1/1'), NoSuchWorkspaceDataError('No workspace with id 2 exists')) _workspace_wrapper_has_permission_fail( workspace.port, UserID(USER2), None, UPA('1/2/2'), NoSuchWorkspaceDataError('Object 1/2/2 does not exist')) _workspace_wrapper_has_permission_fail( workspace.port, UserID(USER2), None, UPA('1/3/1'), NoSuchWorkspaceDataError('Object 1/3/1 does not exist')) wscli2.delete_objects([{'ref': '1/2'}]) _workspace_wrapper_has_permission_fail( workspace.port, UserID(USER2), None, UPA('1/2/1'), NoSuchWorkspaceDataError('Object 1/2/1 does not exist')) wscli2.delete_workspace({'id': 1}) _workspace_wrapper_has_permission_fail( workspace.port, UserID(USER2), None, UPA('1/1/1'), NoSuchWorkspaceDataError('Workspace 1 is deleted')) _workspace_wrapper_has_permission_fail( workspace.port, UserID(USER2), 1, None, NoSuchWorkspaceDataError('Workspace 1 is deleted')) def _workspace_wrapper_has_permission_fail(ws_port, user, wsid, upa, expected): url = f'http://localhost:{ws_port}' wscli = Workspace(url, token=TOKEN_WS_READ_ADMIN) ws = WS(wscli) with raises(Exception) as got: ws.has_permission(user, WorkspaceAccessType.READ, wsid, upa) assert_exception_correct(got.value, expected) def test_workspace_wrapper_get_workspaces(sample_port, workspace): url = f'http://localhost:{workspace.port}' wscli = Workspace(url, token=TOKEN_WS_READ_ADMIN) ws = WS(wscli) wscli1 = Workspace(url, token=TOKEN1) wscli1.create_workspace({'workspace': 'baz'}) wscli2 = Workspace(url, token=TOKEN2) wscli2.create_workspace({'workspace': 'foo'}) wscli2.set_global_permission({'id': 2, 'new_permission': 'r'}) wscli3 = Workspace(url, token=TOKEN3) wscli3.create_workspace({'workspace': 'bar'}) wscli3.set_permissions({'id': 3, 'users': [USER1], 'new_permission': 'r'}) wscli3.create_workspace({'workspace': 'invisible'}) assert ws.get_user_workspaces(UserID(USER1)) == [1, 2, 3] # not 4 def test_workspace_wrapper_get_workspaces_fail_no_user(sample_port, workspace): url = f'http://localhost:{workspace.port}' wscli = Workspace(url, token=TOKEN_WS_READ_ADMIN) ws = WS(wscli) with raises(Exception) as got: ws.get_user_workspaces(UserID('fakeuser')) assert_exception_correct(got.value, NoSuchUserError('User fakeuser is not a valid user')) # ########################### # Kafka notifier tests # ########################### def test_kafka_notifier_init_fail(): _kafka_notifier_init_fail(None, 't', MissingParameterError('bootstrap_servers')) _kafka_notifier_init_fail(' \t ', 't', MissingParameterError('bootstrap_servers')) _kafka_notifier_init_fail('localhost:10000', None, MissingParameterError('topic')) _kafka_notifier_init_fail('localhost:10000', ' \t ', MissingParameterError('topic')) _kafka_notifier_init_fail( 'localhost:10000', 'mytopic' + 243 * 'a', IllegalParameterError('topic exceeds maximum length of 249')) _kafka_notifier_init_fail(f'localhost:{find_free_port()}', 'mytopic', NoBrokersAvailable()) for c in ['Ѽ', '_', '.', '*']: _kafka_notifier_init_fail('localhost:10000', f'topic{c}topic', ValueError( f'Illegal character in Kafka topic topic{c}topic: {c}')) def _kafka_notifier_init_fail(servers, topic, expected): with raises(Exception) as got: KafkaNotifier(servers, topic) assert_exception_correct(got.value, expected) def test_kafka_notifier_new_sample(sample_port, kafka): topic = 'abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ-' + 186 * 'a' kn = KafkaNotifier(f'localhost:{kafka.port}', topic) try: id_ = uuid.uuid4() kn.notify_new_sample_version(id_, 6) _check_kafka_messages( kafka, [{'event_type': 'NEW_SAMPLE', 'sample_id': str(id_), 'sample_ver': 6}], topic) finally: kn.close() def test_kafka_notifier_notify_new_sample_version_fail(sample_port, kafka): kn = KafkaNotifier(f'localhost:{kafka.port}', 'mytopic') _kafka_notifier_notify_new_sample_version_fail(kn, None, 1, ValueError( 'sample_id cannot be a value that evaluates to false')) _kafka_notifier_notify_new_sample_version_fail(kn, uuid.uuid4(), 0, ValueError( 'sample_ver must be > 0')) _kafka_notifier_notify_new_sample_version_fail(kn, uuid.uuid4(), -3, ValueError( 'sample_ver must be > 0')) kn.close() _kafka_notifier_notify_new_sample_version_fail(kn, uuid.uuid4(), 1, ValueError( 'client is closed')) def _kafka_notifier_notify_new_sample_version_fail(notifier, sample, version, expected): with raises(Exception) as got: notifier.notify_new_sample_version(sample, version) assert_exception_correct(got.value, expected) def test_kafka_notifier_acl_change(sample_port, kafka): kn = KafkaNotifier(f'localhost:{kafka.port}', 'topictopic') try: id_ = uuid.uuid4() kn.notify_sample_acl_change(id_) _check_kafka_messages( kafka, [{'event_type': 'ACL_CHANGE', 'sample_id': str(id_)}], 'topictopic') finally: kn.close() def test_kafka_notifier_notify_acl_change_fail(sample_port, kafka): kn = KafkaNotifier(f'localhost:{kafka.port}', 'mytopic') _kafka_notifier_notify_acl_change_fail(kn, None, ValueError( 'sample_id cannot be a value that evaluates to false')) kn.close() _kafka_notifier_notify_acl_change_fail(kn, uuid.uuid4(), ValueError( 'client is closed')) def _kafka_notifier_notify_acl_change_fail(notifier, sample, expected): with raises(Exception) as got: notifier.notify_sample_acl_change(sample) assert_exception_correct(got.value, expected) def test_kafka_notifier_new_link(sample_port, kafka): kn = KafkaNotifier(f'localhost:{kafka.port}', 'topictopic') try: id_ = uuid.uuid4() kn.notify_new_link(id_) _check_kafka_messages( kafka, [{'event_type': 'NEW_LINK', 'link_id': str(id_)}], 'topictopic') finally: kn.close() def test_kafka_notifier_new_link_fail(sample_port, kafka): kn = KafkaNotifier(f'localhost:{kafka.port}', 'mytopic') _kafka_notifier_new_link_fail(kn, None, ValueError( 'link_id cannot be a value that evaluates to false')) kn.close() _kafka_notifier_new_link_fail(kn, uuid.uuid4(), ValueError( 'client is closed')) def _kafka_notifier_new_link_fail(notifier, sample, expected): with raises(Exception) as got: notifier.notify_new_link(sample) assert_exception_correct(got.value, expected) def test_kafka_notifier_expired_link(sample_port, kafka): kn = KafkaNotifier(f'localhost:{kafka.port}', 'topictopic') try: id_ = uuid.uuid4() kn.notify_expired_link(id_) _check_kafka_messages( kafka, [{'event_type': 'EXPIRED_LINK', 'link_id': str(id_)}], 'topictopic') finally: kn.close() def test_kafka_notifier_expired_link_fail(sample_port, kafka): kn = KafkaNotifier(f'localhost:{kafka.port}', 'mytopic') _kafka_notifier_expired_link_fail(kn, None, ValueError( 'link_id cannot be a value that evaluates to false')) kn.close() _kafka_notifier_expired_link_fail(kn, uuid.uuid4(), ValueError( 'client is closed')) def _kafka_notifier_expired_link_fail(notifier, sample, expected): with raises(Exception) as got: notifier.notify_expired_link(sample) assert_exception_correct(got.value, expected) def test_validate_sample(sample_port): _validate_sample_as_admin(sample_port, None, TOKEN2, USER2) def _validate_sample_as_admin(sample_port, as_user, get_token, expected_user): url = f'http://localhost:{sample_port}' ret = requests.post(url, headers=get_authorized_headers(TOKEN2), json={ 'method': 'SampleService.validate_samples', 'version': '1.1', 'id': '67', 'params': [{ 'samples': [{ 'name': 'mysample', 'node_tree': [{ 'id': 'root', 'type': 'BioReplicate', 'meta_controlled': {'foo': {'bar': 'baz'}}, 'meta_user': {'a': {'b': 'c'}} }] }] }] }) # print(ret.text) assert ret.ok is True ret_json = ret.json()['result'][0] assert 'mysample' not in ret_json['errors']
fn_api_runner.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """A PipelineRunner using the SDK harness. """ from __future__ import absolute_import from __future__ import print_function import collections import contextlib import copy import logging import os import queue import subprocess import sys import threading import time from builtins import object from concurrent import futures import grpc import apache_beam as beam # pylint: disable=ungrouped-imports from apache_beam import coders from apache_beam import metrics from apache_beam.coders.coder_impl import create_InputStream from apache_beam.coders.coder_impl import create_OutputStream from apache_beam.metrics import monitoring_infos from apache_beam.metrics.execution import MetricKey from apache_beam.metrics.execution import MetricsEnvironment from apache_beam.options import pipeline_options from apache_beam.options.value_provider import RuntimeValueProvider from apache_beam.portability import common_urns from apache_beam.portability import python_urns from apache_beam.portability.api import beam_fn_api_pb2 from apache_beam.portability.api import beam_fn_api_pb2_grpc from apache_beam.portability.api import beam_runner_api_pb2 from apache_beam.portability.api import endpoints_pb2 from apache_beam.runners import pipeline_context from apache_beam.runners import runner from apache_beam.runners.portability import fn_api_runner_transforms from apache_beam.runners.portability.fn_api_runner_transforms import create_buffer_id from apache_beam.runners.portability.fn_api_runner_transforms import only_element from apache_beam.runners.portability.fn_api_runner_transforms import split_buffer_id from apache_beam.runners.portability.fn_api_runner_transforms import unique_name from apache_beam.runners.worker import bundle_processor from apache_beam.runners.worker import data_plane from apache_beam.runners.worker import sdk_worker from apache_beam.transforms import trigger from apache_beam.transforms.window import GlobalWindows from apache_beam.utils import profiler from apache_beam.utils import proto_utils # This module is experimental. No backwards-compatibility guarantees. ENCODED_IMPULSE_VALUE = beam.coders.WindowedValueCoder( beam.coders.BytesCoder(), beam.coders.coders.GlobalWindowCoder()).get_impl().encode_nested( beam.transforms.window.GlobalWindows.windowed_value(b'')) class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer): UNSTARTED_STATE = 'unstarted' STARTED_STATE = 'started' DONE_STATE = 'done' _DONE_MARKER = object() def __init__(self): self._push_queue = queue.Queue() self._futures_by_id = dict() self._read_thread = threading.Thread( name='beam_control_read', target=self._read) self._uid_counter = 0 self._state = self.UNSTARTED_STATE self._lock = threading.Lock() def Control(self, iterator, context): with self._lock: if self._state == self.DONE_STATE: return else: self._state = self.STARTED_STATE self._inputs = iterator # Note: We only support one client for now. self._read_thread.start() while True: to_push = self._push_queue.get() if to_push is self._DONE_MARKER: return yield to_push def _read(self): for data in self._inputs: self._futures_by_id.pop(data.instruction_id).set(data) def push(self, item): if item is self._DONE_MARKER: future = None else: if not item.instruction_id: self._uid_counter += 1 item.instruction_id = 'control_%s' % self._uid_counter future = ControlFuture(item.instruction_id) self._futures_by_id[item.instruction_id] = future self._push_queue.put(item) return future def done(self): with self._lock: if self._state == self.STARTED_STATE: self.push(self._DONE_MARKER) self._read_thread.join() self._state = self.DONE_STATE class _GroupingBuffer(object): """Used to accumulate groupded (shuffled) results.""" def __init__(self, pre_grouped_coder, post_grouped_coder, windowing): self._key_coder = pre_grouped_coder.key_coder() self._pre_grouped_coder = pre_grouped_coder self._post_grouped_coder = post_grouped_coder self._table = collections.defaultdict(list) self._windowing = windowing self._grouped_output = None def append(self, elements_data): if self._grouped_output: raise RuntimeError('Grouping table append after read.') input_stream = create_InputStream(elements_data) coder_impl = self._pre_grouped_coder.get_impl() key_coder_impl = self._key_coder.get_impl() # TODO(robertwb): We could optimize this even more by using a # window-dropping coder for the data plane. is_trivial_windowing = self._windowing.is_default() while input_stream.size() > 0: windowed_key_value = coder_impl.decode_from_stream(input_stream, True) key, value = windowed_key_value.value self._table[key_coder_impl.encode(key)].append( value if is_trivial_windowing else windowed_key_value.with_value(value)) def __iter__(self): if not self._grouped_output: output_stream = create_OutputStream() if self._windowing.is_default(): globally_window = GlobalWindows.windowed_value(None).with_value windowed_key_values = lambda key, values: [ globally_window((key, values))] else: trigger_driver = trigger.create_trigger_driver(self._windowing, True) windowed_key_values = trigger_driver.process_entire_key coder_impl = self._post_grouped_coder.get_impl() key_coder_impl = self._key_coder.get_impl() for encoded_key, windowed_values in self._table.items(): key = key_coder_impl.decode(encoded_key) for wkvs in windowed_key_values(key, windowed_values): coder_impl.encode_to_stream(wkvs, output_stream, True) self._grouped_output = [output_stream.get()] self._table = None return iter(self._grouped_output) class _WindowGroupingBuffer(object): """Used to partition windowed side inputs.""" def __init__(self, access_pattern, coder): # Here's where we would use a different type of partitioning # (e.g. also by key) for a different access pattern. if access_pattern.urn == common_urns.side_inputs.ITERABLE.urn: self._kv_extrator = lambda value: ('', value) self._key_coder = coders.SingletonCoder('') self._value_coder = coder.wrapped_value_coder elif access_pattern.urn == common_urns.side_inputs.MULTIMAP.urn: self._kv_extrator = lambda value: value self._key_coder = coder.wrapped_value_coder.key_coder() self._value_coder = ( coder.wrapped_value_coder.value_coder()) else: raise ValueError( "Unknown access pattern: '%s'" % access_pattern.urn) self._windowed_value_coder = coder self._window_coder = coder.window_coder self._values_by_window = collections.defaultdict(list) def append(self, elements_data): input_stream = create_InputStream(elements_data) while input_stream.size() > 0: windowed_value = self._windowed_value_coder.get_impl( ).decode_from_stream(input_stream, True) key, value = self._kv_extrator(windowed_value.value) for window in windowed_value.windows: self._values_by_window[key, window].append(value) def encoded_items(self): value_coder_impl = self._value_coder.get_impl() key_coder_impl = self._key_coder.get_impl() for (key, window), values in self._values_by_window.items(): encoded_window = self._window_coder.encode(window) encoded_key = key_coder_impl.encode_nested(key) output_stream = create_OutputStream() for value in values: value_coder_impl.encode_to_stream(value, output_stream, True) yield encoded_key, encoded_window, output_stream.get() class FnApiRunner(runner.PipelineRunner): def __init__( self, default_environment=None, bundle_repeat=0, use_state_iterables=False): """Creates a new Fn API Runner. Args: default_environment: the default environment to use for UserFns. bundle_repeat: replay every bundle this many extra times, for profiling and debugging use_state_iterables: Intentionally split gbk iterables over state API (for testing) """ super(FnApiRunner, self).__init__() self._last_uid = -1 self._default_environment = ( default_environment or beam_runner_api_pb2.Environment(urn=python_urns.EMBEDDED_PYTHON)) self._bundle_repeat = bundle_repeat self._progress_frequency = None self._profiler_factory = None self._use_state_iterables = use_state_iterables def _next_uid(self): self._last_uid += 1 return str(self._last_uid) def run_pipeline(self, pipeline, options): MetricsEnvironment.set_metrics_supported(False) RuntimeValueProvider.set_runtime_options({}) # This is sometimes needed if type checking is disabled # to enforce that the inputs (and outputs) of GroupByKey operations # are known to be KVs. from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner pipeline.visit(DataflowRunner.group_by_key_input_visitor()) self._bundle_repeat = self._bundle_repeat or options.view_as( pipeline_options.DirectOptions).direct_runner_bundle_repeat self._profiler_factory = profiler.Profile.factory_from_options( options.view_as(pipeline_options.ProfilingOptions)) return self.run_via_runner_api(pipeline.to_runner_api( default_environment=self._default_environment)) def run_via_runner_api(self, pipeline_proto): return self.run_stages(*self.create_stages(pipeline_proto)) @contextlib.contextmanager def maybe_profile(self): if self._profiler_factory: try: profile_id = 'direct-' + subprocess.check_output( ['git', 'rev-parse', '--abbrev-ref', 'HEAD'] ).decode(errors='ignore').strip() except subprocess.CalledProcessError: profile_id = 'direct-unknown' profiler = self._profiler_factory(profile_id, time_prefix='') else: profiler = None if profiler: with profiler: yield if not self._bundle_repeat: logging.warning( 'The --direct_runner_bundle_repeat option is not set; ' 'a significant portion of the profile may be one-time overhead.') path = profiler.profile_output print('CPU Profile written to %s' % path) try: import gprof2dot # pylint: disable=unused-variable if not subprocess.call([ sys.executable, '-m', 'gprof2dot', '-f', 'pstats', path, '-o', path + '.dot']): if not subprocess.call( ['dot', '-Tsvg', '-o', path + '.svg', path + '.dot']): print('CPU Profile rendering at file://%s.svg' % os.path.abspath(path)) except ImportError: # pylint: disable=superfluous-parens print('Please install gprof2dot and dot for profile renderings.') else: # Empty context. yield def create_stages(self, pipeline_proto): pipeline_context = fn_api_runner_transforms.TransformContext( copy.deepcopy(pipeline_proto.components), use_state_iterables=self._use_state_iterables) # Initial set of stages are singleton leaf transforms. stages = list(fn_api_runner_transforms.leaf_transform_stages( pipeline_proto.root_transform_ids, pipeline_proto.components)) # Apply each phase in order. for phase in [ fn_api_runner_transforms.annotate_downstream_side_inputs, fn_api_runner_transforms.fix_side_input_pcoll_coders, fn_api_runner_transforms.lift_combiners, fn_api_runner_transforms.expand_gbk, fn_api_runner_transforms.sink_flattens, fn_api_runner_transforms.greedily_fuse, fn_api_runner_transforms.read_to_impulse, fn_api_runner_transforms.impulse_to_input, fn_api_runner_transforms.inject_timer_pcollections, fn_api_runner_transforms.sort_stages, fn_api_runner_transforms.window_pcollection_coders]: logging.info('%s %s %s', '=' * 20, phase, '=' * 20) stages = list(phase(stages, pipeline_context)) logging.debug('Stages: %s', [str(s) for s in stages]) # Return the (possibly mutated) context and ordered set of stages. return pipeline_context.components, stages, pipeline_context.safe_coders def run_stages(self, pipeline_components, stages, safe_coders): worker_handler_manager = WorkerHandlerManager( pipeline_components.environments) metrics_by_stage = {} monitoring_infos_by_stage = {} try: with self.maybe_profile(): pcoll_buffers = collections.defaultdict(list) for stage in stages: stage_results = self.run_stage( worker_handler_manager.get_worker_handler, pipeline_components, stage, pcoll_buffers, safe_coders) metrics_by_stage[stage.name] = stage_results.process_bundle.metrics monitoring_infos_by_stage[stage.name] = ( stage_results.process_bundle.monitoring_infos) finally: worker_handler_manager.close_all() return RunnerResult( runner.PipelineState.DONE, monitoring_infos_by_stage, metrics_by_stage) def run_stage( self, worker_handler_factory, pipeline_components, stage, pcoll_buffers, safe_coders): def iterable_state_write(values, element_coder_impl): token = unique_name(None, 'iter').encode('ascii') out = create_OutputStream() for element in values: element_coder_impl.encode_to_stream(element, out, True) controller.state.blocking_append( beam_fn_api_pb2.StateKey( runner=beam_fn_api_pb2.StateKey.Runner(key=token)), out.get()) return token controller = worker_handler_factory(stage.environment) context = pipeline_context.PipelineContext( pipeline_components, iterable_state_write=iterable_state_write) data_api_service_descriptor = controller.data_api_service_descriptor() def extract_endpoints(stage): # Returns maps of transform names to PCollection identifiers. # Also mutates IO stages to point to the data ApiServiceDescriptor. data_input = {} data_side_input = {} data_output = {} for transform in stage.transforms: if transform.spec.urn in (bundle_processor.DATA_INPUT_URN, bundle_processor.DATA_OUTPUT_URN): pcoll_id = transform.spec.payload if transform.spec.urn == bundle_processor.DATA_INPUT_URN: target = transform.unique_name, only_element(transform.outputs) if pcoll_id == fn_api_runner_transforms.IMPULSE_BUFFER: data_input[target] = [ENCODED_IMPULSE_VALUE] else: data_input[target] = pcoll_buffers[pcoll_id] coder_id = pipeline_components.pcollections[ only_element(transform.outputs.values())].coder_id elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN: target = transform.unique_name, only_element(transform.inputs) data_output[target] = pcoll_id coder_id = pipeline_components.pcollections[ only_element(transform.inputs.values())].coder_id else: raise NotImplementedError data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id) if data_api_service_descriptor: data_spec.api_service_descriptor.url = ( data_api_service_descriptor.url) transform.spec.payload = data_spec.SerializeToString() elif transform.spec.urn == common_urns.primitives.PAR_DO.urn: payload = proto_utils.parse_Bytes( transform.spec.payload, beam_runner_api_pb2.ParDoPayload) for tag, si in payload.side_inputs.items(): data_side_input[transform.unique_name, tag] = ( create_buffer_id(transform.inputs[tag]), si.access_pattern) return data_input, data_side_input, data_output logging.info('Running %s', stage.name) logging.debug(' %s', stage) data_input, data_side_input, data_output = extract_endpoints(stage) process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor( id=self._next_uid(), transforms={transform.unique_name: transform for transform in stage.transforms}, pcollections=dict(pipeline_components.pcollections.items()), coders=dict(pipeline_components.coders.items()), windowing_strategies=dict( pipeline_components.windowing_strategies.items()), environments=dict(pipeline_components.environments.items())) if controller.state_api_service_descriptor(): process_bundle_descriptor.state_api_service_descriptor.url = ( controller.state_api_service_descriptor().url) # Store the required side inputs into state. for (transform_id, tag), (buffer_id, si) in data_side_input.items(): _, pcoll_id = split_buffer_id(buffer_id) value_coder = context.coders[safe_coders[ pipeline_components.pcollections[pcoll_id].coder_id]] elements_by_window = _WindowGroupingBuffer(si, value_coder) for element_data in pcoll_buffers[buffer_id]: elements_by_window.append(element_data) for key, window, elements_data in elements_by_window.encoded_items(): state_key = beam_fn_api_pb2.StateKey( multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput( ptransform_id=transform_id, side_input_id=tag, window=window, key=key)) controller.state.blocking_append(state_key, elements_data) def get_buffer(buffer_id): kind, name = split_buffer_id(buffer_id) if kind in ('materialize', 'timers'): if buffer_id not in pcoll_buffers: # Just store the data chunks for replay. pcoll_buffers[buffer_id] = list() elif kind == 'group': # This is a grouping write, create a grouping buffer if needed. if buffer_id not in pcoll_buffers: original_gbk_transform = name transform_proto = pipeline_components.transforms[ original_gbk_transform] input_pcoll = only_element(list(transform_proto.inputs.values())) output_pcoll = only_element(list(transform_proto.outputs.values())) pre_gbk_coder = context.coders[safe_coders[ pipeline_components.pcollections[input_pcoll].coder_id]] post_gbk_coder = context.coders[safe_coders[ pipeline_components.pcollections[output_pcoll].coder_id]] windowing_strategy = context.windowing_strategies[ pipeline_components .pcollections[output_pcoll].windowing_strategy_id] pcoll_buffers[buffer_id] = _GroupingBuffer( pre_gbk_coder, post_gbk_coder, windowing_strategy) else: # These should be the only two identifiers we produce for now, # but special side input writes may go here. raise NotImplementedError(buffer_id) return pcoll_buffers[buffer_id] for k in range(self._bundle_repeat): try: controller.state.checkpoint() BundleManager( controller, lambda pcoll_id: [], process_bundle_descriptor, self._progress_frequency, k).process_bundle(data_input, data_output) finally: controller.state.restore() result = BundleManager( controller, get_buffer, process_bundle_descriptor, self._progress_frequency).process_bundle(data_input, data_output) while True: timer_inputs = {} for transform_id, timer_writes in stage.timer_pcollections: windowed_timer_coder_impl = context.coders[ pipeline_components.pcollections[timer_writes].coder_id].get_impl() written_timers = get_buffer( create_buffer_id(timer_writes, kind='timers')) if written_timers: # Keep only the "last" timer set per key and window. timers_by_key_and_window = {} for elements_data in written_timers: input_stream = create_InputStream(elements_data) while input_stream.size() > 0: windowed_key_timer = windowed_timer_coder_impl.decode_from_stream( input_stream, True) key, _ = windowed_key_timer.value # TODO: Explode and merge windows. assert len(windowed_key_timer.windows) == 1 timers_by_key_and_window[ key, windowed_key_timer.windows[0]] = windowed_key_timer out = create_OutputStream() for windowed_key_timer in timers_by_key_and_window.values(): windowed_timer_coder_impl.encode_to_stream( windowed_key_timer, out, True) timer_inputs[transform_id, 'out'] = [out.get()] written_timers[:] = [] if timer_inputs: # The worker will be waiting on these inputs as well. for other_input in data_input: if other_input not in timer_inputs: timer_inputs[other_input] = [] # TODO(robertwb): merge results BundleManager( controller, get_buffer, process_bundle_descriptor, self._progress_frequency, True).process_bundle(timer_inputs, data_output) else: break return result # These classes are used to interact with the worker. class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer): class CopyOnWriteState(object): def __init__(self, underlying): self._underlying = underlying self._overlay = {} def __getitem__(self, key): if key in self._overlay: return self._overlay[key] else: return FnApiRunner.StateServicer.CopyOnWriteList( self._underlying, self._overlay, key) def __delitem__(self, key): self._overlay[key] = [] def commit(self): self._underlying.update(self._overlay) return self._underlying class CopyOnWriteList(object): def __init__(self, underlying, overlay, key): self._underlying = underlying self._overlay = overlay self._key = key def __iter__(self): if self._key in self._overlay: return iter(self._overlay[self._key]) else: return iter(self._underlying[self._key]) def append(self, item): if self._key not in self._overlay: self._overlay[self._key] = list(self._underlying[self._key]) self._overlay[self._key].append(item) def __init__(self): self._lock = threading.Lock() self._state = collections.defaultdict(list) self._checkpoint = None self._use_continuation_tokens = False self._continuations = {} def checkpoint(self): assert self._checkpoint is None self._checkpoint = self._state self._state = FnApiRunner.StateServicer.CopyOnWriteState(self._state) def commit(self): self._state.commit() self._state = self._checkpoint.commit() self._checkpoint = None def restore(self): self._state = self._checkpoint self._checkpoint = None @contextlib.contextmanager def process_instruction_id(self, unused_instruction_id): yield def blocking_get(self, state_key, continuation_token=None): with self._lock: full_state = self._state[self._to_key(state_key)] if self._use_continuation_tokens: # The token is "nonce:index". if not continuation_token: token_base = 'token_%x' % len(self._continuations) self._continuations[token_base] = tuple(full_state) return b'', '%s:0' % token_base else: token_base, index = continuation_token.split(':') ix = int(index) full_state = self._continuations[token_base] if ix == len(full_state): return b'', None else: return full_state[ix], '%s:%d' % (token_base, ix + 1) else: assert not continuation_token return b''.join(full_state), None def blocking_append(self, state_key, data): with self._lock: self._state[self._to_key(state_key)].append(data) def blocking_clear(self, state_key): with self._lock: del self._state[self._to_key(state_key)] @staticmethod def _to_key(state_key): return state_key.SerializeToString() class GrpcStateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer): def __init__(self, state): self._state = state def State(self, request_stream, context=None): # Note that this eagerly mutates state, assuming any failures are fatal. # Thus it is safe to ignore instruction_reference. for request in request_stream: request_type = request.WhichOneof('request') if request_type == 'get': data, continuation_token = self._state.blocking_get( request.state_key, request.get.continuation_token) yield beam_fn_api_pb2.StateResponse( id=request.id, get=beam_fn_api_pb2.StateGetResponse( data=data, continuation_token=continuation_token)) elif request_type == 'append': self._state.blocking_append(request.state_key, request.append.data) yield beam_fn_api_pb2.StateResponse( id=request.id, append=beam_fn_api_pb2.StateAppendResponse()) elif request_type == 'clear': self._state.blocking_clear(request.state_key) yield beam_fn_api_pb2.StateResponse( id=request.id, clear=beam_fn_api_pb2.StateClearResponse()) else: raise NotImplementedError('Unknown state request: %s' % request_type) class SingletonStateHandlerFactory(sdk_worker.StateHandlerFactory): """A singleton cache for a StateServicer.""" def __init__(self, state_handler): self._state_handler = state_handler def create_state_handler(self, api_service_descriptor): """Returns the singleton state handler.""" return self._state_handler def close(self): """Does nothing.""" pass class WorkerHandler(object): _registered_environments = {} def __init__(self, control_handler, data_plane_handler, state): self.control_handler = control_handler self.data_plane_handler = data_plane_handler self.state = state def close(self): self.stop_worker() def start_worker(self): raise NotImplementedError def stop_worker(self): raise NotImplementedError def data_api_service_descriptor(self): raise NotImplementedError def state_api_service_descriptor(self): raise NotImplementedError @classmethod def register_environment(cls, urn, payload_type): def wrapper(constructor): cls._registered_environments[urn] = constructor, payload_type return constructor return wrapper @classmethod def create(cls, environment, state): constructor, payload_type = cls._registered_environments[environment.urn] return constructor( proto_utils.parse_Bytes(environment.payload, payload_type), state) @WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON, None) class EmbeddedWorkerHandler(WorkerHandler): """An in-memory controller for fn API control, state and data planes.""" def __init__(self, unused_payload, state): super(EmbeddedWorkerHandler, self).__init__( self, data_plane.InMemoryDataChannel(), state) self.worker = sdk_worker.SdkWorker( FnApiRunner.SingletonStateHandlerFactory(self.state), data_plane.InMemoryDataChannelFactory( self.data_plane_handler.inverse()), {}) self._uid_counter = 0 def push(self, request): if not request.instruction_id: self._uid_counter += 1 request.instruction_id = 'control_%s' % self._uid_counter logging.debug('CONTROL REQUEST %s', request) response = self.worker.do_instruction(request) logging.debug('CONTROL RESPONSE %s', response) return ControlFuture(request.instruction_id, response) def start_worker(self): pass def stop_worker(self): pass def done(self): pass def data_api_service_descriptor(self): return None def state_api_service_descriptor(self): return None class GrpcWorkerHandler(WorkerHandler): """An grpc based controller for fn API control, state and data planes.""" def __init__(self, state=None): self.control_server = grpc.server( futures.ThreadPoolExecutor(max_workers=10)) self.control_port = self.control_server.add_insecure_port('[::]:0') self.control_address = 'localhost:%s' % self.control_port # Options to have no limits (-1) on the size of the messages # received or sent over the data plane. The actual buffer size # is controlled in a layer above. no_max_message_sizes = [("grpc.max_receive_message_length", -1), ("grpc.max_send_message_length", -1)] self.data_server = grpc.server( futures.ThreadPoolExecutor(max_workers=10), options=no_max_message_sizes) self.data_port = self.data_server.add_insecure_port('[::]:0') self.state_server = grpc.server( futures.ThreadPoolExecutor(max_workers=10), options=no_max_message_sizes) self.state_port = self.state_server.add_insecure_port('[::]:0') self.control_handler = BeamFnControlServicer() beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server( self.control_handler, self.control_server) self.data_plane_handler = data_plane.GrpcServerDataChannel() beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server( self.data_plane_handler, self.data_server) self.state = state beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server( FnApiRunner.GrpcStateServicer(state), self.state_server) logging.info('starting control server on port %s', self.control_port) logging.info('starting data server on port %s', self.data_port) self.state_server.start() self.data_server.start() self.control_server.start() def data_api_service_descriptor(self): return endpoints_pb2.ApiServiceDescriptor( url='localhost:%s' % self.data_port) def state_api_service_descriptor(self): return endpoints_pb2.ApiServiceDescriptor( url='localhost:%s' % self.state_port) def close(self): self.control_handler.done() self.data_plane_handler.close() self.control_server.stop(5).wait() self.data_server.stop(5).wait() self.state_server.stop(5).wait() super(GrpcWorkerHandler, self).close() @WorkerHandler.register_environment( common_urns.environments.EXTERNAL.urn, beam_runner_api_pb2.ExternalPayload) class ExternalWorkerHandler(GrpcWorkerHandler): def __init__(self, external_payload, state): super(ExternalWorkerHandler, self).__init__(state) self._external_payload = external_payload def start_worker(self): stub = beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolStub( grpc.insecure_channel(self._external_payload.endpoint.url)) response = stub.NotifyRunnerAvailable( beam_fn_api_pb2.NotifyRunnerAvailableRequest( control_endpoint=endpoints_pb2.ApiServiceDescriptor( url=self.control_address), params=self._external_payload.params)) if response.error: raise RuntimeError("Error starting worker: %s" % response.error) def stop_worker(self): pass @WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON_GRPC, bytes) class EmbeddedGrpcWorkerHandler(GrpcWorkerHandler): def __init__(self, num_workers_payload, state): super(EmbeddedGrpcWorkerHandler, self).__init__(state) self._num_threads = int(num_workers_payload) if num_workers_payload else 1 def start_worker(self): self.worker = sdk_worker.SdkHarness( self.control_address, worker_count=self._num_threads) self.worker_thread = threading.Thread( name='run_worker', target=self.worker.run) self.worker_thread.start() def stop_worker(self): self.worker_thread.join() @WorkerHandler.register_environment(python_urns.SUBPROCESS_SDK, bytes) class SubprocessSdkWorkerHandler(GrpcWorkerHandler): def __init__(self, worker_command_line, state): super(SubprocessSdkWorkerHandler, self).__init__(state) self._worker_command_line = worker_command_line def start_worker(self): from apache_beam.runners.portability import local_job_service self.worker = local_job_service.SubprocessSdkWorker( self._worker_command_line, self.control_address) self.worker_thread = threading.Thread( name='run_worker', target=self.worker.run) self.worker_thread.start() def stop_worker(self): self.worker_thread.join() class WorkerHandlerManager(object): def __init__(self, environments): self._environments = environments self._cached_handlers = {} self._state = FnApiRunner.StateServicer() # rename? def get_worker_handler(self, environment_id): if environment_id is None: # Any environment will do, pick one arbitrarily. environment_id = next(iter(self._environments.keys())) environment = self._environments[environment_id] worker_handler = self._cached_handlers.get(environment_id) if worker_handler is None: worker_handler = self._cached_handlers[ environment_id] = WorkerHandler.create( environment, self._state) worker_handler.start_worker() return worker_handler def close_all(self): for controller in set(self._cached_handlers.values()): controller.close() self._cached_handlers = {} class BundleManager(object): _uid_counter = 0 def __init__( self, controller, get_buffer, bundle_descriptor, progress_frequency=None, skip_registration=False): self._controller = controller self._get_buffer = get_buffer self._bundle_descriptor = bundle_descriptor self._registered = skip_registration self._progress_frequency = progress_frequency def process_bundle(self, inputs, expected_outputs): # Unique id for the instruction processing this bundle. BundleManager._uid_counter += 1 process_bundle_id = 'bundle_%s' % BundleManager._uid_counter # Register the bundle descriptor, if needed. if self._registered: registration_future = None else: process_bundle_registration = beam_fn_api_pb2.InstructionRequest( register=beam_fn_api_pb2.RegisterRequest( process_bundle_descriptor=[self._bundle_descriptor])) registration_future = self._controller.control_handler.push( process_bundle_registration) self._registered = True # Write all the input data to the channel. for (transform_id, name), elements in inputs.items(): data_out = self._controller.data_plane_handler.output_stream( process_bundle_id, beam_fn_api_pb2.Target( primitive_transform_reference=transform_id, name=name)) for element_data in elements: data_out.write(element_data) data_out.close() # Actually start the bundle. if registration_future and registration_future.get().error: raise RuntimeError(registration_future.get().error) process_bundle = beam_fn_api_pb2.InstructionRequest( instruction_id=process_bundle_id, process_bundle=beam_fn_api_pb2.ProcessBundleRequest( process_bundle_descriptor_reference=self._bundle_descriptor.id)) result_future = self._controller.control_handler.push(process_bundle) with ProgressRequester( self._controller, process_bundle_id, self._progress_frequency): # Gather all output data. expected_targets = [ beam_fn_api_pb2.Target(primitive_transform_reference=transform_id, name=output_name) for (transform_id, output_name), _ in expected_outputs.items()] logging.debug('Gather all output data from %s.', expected_targets) for output in self._controller.data_plane_handler.input_elements( process_bundle_id, expected_targets, abort_callback=lambda: (result_future.is_done() and result_future.get().error)): target_tuple = ( output.target.primitive_transform_reference, output.target.name) if target_tuple in expected_outputs: self._get_buffer(expected_outputs[target_tuple]).append(output.data) logging.debug('Wait for the bundle to finish.') result = result_future.get() if result.error: raise RuntimeError(result.error) return result class ProgressRequester(threading.Thread): def __init__(self, controller, instruction_id, frequency, callback=None): super(ProgressRequester, self).__init__() self._controller = controller self._instruction_id = instruction_id self._frequency = frequency self._done = False self._latest_progress = None self._callback = callback self.daemon = True def __enter__(self): if self._frequency: self.start() def __exit__(self, *unused_exc_info): if self._frequency: self.stop() def run(self): while not self._done: try: progress_result = self._controller.control_handler.push( beam_fn_api_pb2.InstructionRequest( process_bundle_progress= beam_fn_api_pb2.ProcessBundleProgressRequest( instruction_reference=self._instruction_id))).get() self._latest_progress = progress_result.process_bundle_progress if self._callback: self._callback(self._latest_progress) except Exception as exn: logging.error("Bad progress: %s", exn) time.sleep(self._frequency) def stop(self): self._done = True class ControlFuture(object): def __init__(self, instruction_id, response=None): self.instruction_id = instruction_id if response: self._response = response else: self._response = None self._condition = threading.Condition() def is_done(self): return self._response is not None def set(self, response): with self._condition: self._response = response self._condition.notify_all() def get(self, timeout=None): if not self._response: with self._condition: if not self._response: self._condition.wait(timeout) return self._response class FnApiMetrics(metrics.metric.MetricResults): def __init__(self, step_monitoring_infos, user_metrics_only=True): """Used for querying metrics from the PipelineResult object. step_monitoring_infos: Per step metrics specified as MonitoringInfos. use_monitoring_infos: If true, return the metrics based on the step_monitoring_infos. """ self._counters = {} self._distributions = {} self._gauges = {} self._user_metrics_only = user_metrics_only self._init_metrics_from_monitoring_infos(step_monitoring_infos) def _init_metrics_from_monitoring_infos(self, step_monitoring_infos): for smi in step_monitoring_infos.values(): # Only include user metrics. for mi in smi: if (self._user_metrics_only and not monitoring_infos.is_user_monitoring_info(mi)): continue key = self._to_metric_key(mi) if monitoring_infos.is_counter(mi): self._counters[key] = ( monitoring_infos.extract_metric_result_map_value(mi)) elif monitoring_infos.is_distribution(mi): self._distributions[key] = ( monitoring_infos.extract_metric_result_map_value(mi)) elif monitoring_infos.is_gauge(mi): self._gauges[key] = ( monitoring_infos.extract_metric_result_map_value(mi)) def _to_metric_key(self, monitoring_info): # Right now this assumes that all metrics have a PTRANSFORM ptransform_id = monitoring_info.labels['PTRANSFORM'] namespace, name = monitoring_infos.parse_namespace_and_name(monitoring_info) return MetricKey( ptransform_id, metrics.metricbase.MetricName(namespace, name)) def query(self, filter=None): counters = [metrics.execution.MetricResult(k, v, v) for k, v in self._counters.items() if self.matches(filter, k)] distributions = [metrics.execution.MetricResult(k, v, v) for k, v in self._distributions.items() if self.matches(filter, k)] gauges = [metrics.execution.MetricResult(k, v, v) for k, v in self._gauges.items() if self.matches(filter, k)] return {self.COUNTERS: counters, self.DISTRIBUTIONS: distributions, self.GAUGES: gauges} class RunnerResult(runner.PipelineResult): def __init__(self, state, monitoring_infos_by_stage, metrics_by_stage): super(RunnerResult, self).__init__(state) self._monitoring_infos_by_stage = monitoring_infos_by_stage self._metrics_by_stage = metrics_by_stage self._metrics = None self._monitoring_metrics = None def wait_until_finish(self, duration=None): return self._state def metrics(self): """Returns a queryable oject including user metrics only.""" if self._metrics is None: self._metrics = FnApiMetrics( self._monitoring_infos_by_stage, user_metrics_only=True) return self._metrics def monitoring_metrics(self): """Returns a queryable object including all metrics.""" if self._monitoring_metrics is None: self._monitoring_metrics = FnApiMetrics( self._monitoring_infos_by_stage, user_metrics_only=False) return self._monitoring_metrics
io.py
# coding: utf-8 # pylint: disable=invalid-name, protected-access, fixme, too-many-arguments, W0221, W0201, no-self-use """NDArray interface of mxnet""" from __future__ import absolute_import from collections import OrderedDict import ctypes import sys import numpy as np import logging import threading from .base import _LIB from .base import c_array, c_str, mx_uint, py_str from .base import DataIterHandle, NDArrayHandle from .base import check_call, ctypes2docstring from .ndarray import NDArray from .ndarray import array class DataBatch(object): """Default object for holding a mini-batch of data and related information.""" def __init__(self, data, label, pad=None, index=None, bucket_key=None, provide_data=None, provide_label=None): self.data = data self.label = label self.pad = pad self.index = index # the following properties are only used when bucketing is used self.bucket_key = bucket_key self.provide_data = provide_data self.provide_label = provide_label class DataIter(object): """DataIter object in mxnet. """ def __init__(self): self.batch_size = 0 def __iter__(self): return self def reset(self): """Reset the iterator. """ pass def next(self): """Get next data batch from iterator. Equivalent to self.iter_next() DataBatch(self.getdata(), self.getlabel(), self.getpad(), None) Returns ------- data : DataBatch The data of next batch. """ if self.iter_next(): return DataBatch(data=self.getdata(), label=self.getlabel(), \ pad=self.getpad(), index=self.getindex()) else: raise StopIteration def __next__(self): return self.next() def iter_next(self): """Iterate to next batch. Returns ------- has_next : boolean Whether the move is successful. """ pass def getdata(self): """Get data of current batch. Returns ------- data : NDArray The data of current batch. """ pass def getlabel(self): """Get label of current batch. Returns ------- label : NDArray The label of current batch. """ pass def getindex(self): """Get index of the current batch. Returns ------- index : numpy.array The index of current batch """ return None def getpad(self): """Get the number of padding examples in current batch. Returns ------- pad : int Number of padding examples in current batch """ pass class ResizeIter(DataIter): """Resize a DataIter to given number of batches per epoch. May produce incomplete batch in the middle of an epoch due to padding from internal iterator. Parameters ---------- data_iter : DataIter Internal data iterator. size : number of batches per epoch to resize to. reset_internal : whether to reset internal iterator on ResizeIter.reset """ def __init__(self, data_iter, size, reset_internal=True): super(ResizeIter, self).__init__() self.data_iter = data_iter self.size = size self.reset_internal = reset_internal self.cur = 0 self.current_batch = None self.provide_data = data_iter.provide_data self.provide_label = data_iter.provide_label self.batch_size = data_iter.batch_size def reset(self): self.cur = 0 if self.reset_internal: self.data_iter.reset() def iter_next(self): if self.cur == self.size: return False try: self.current_batch = self.data_iter.next() except StopIteration: self.data_iter.reset() self.current_batch = self.data_iter.next() self.cur += 1 return True def getdata(self): return self.current_batch.data def getlabel(self): return self.current_batch.label def getindex(self): return self.current_batch.index def getpad(self): return self.current_batch.pad class PrefetchingIter(DataIter): """Base class for prefetching iterators. Takes one or more DataIters ( or any class with "reset" and "read" methods) and combine them with prefetching. For example: Parameters ---------- iters : DataIter or list of DataIter one or more DataIters (or any class with "reset" and "read" methods) rename_data : None or list of dict i-th element is a renaming map for i-th iter, in the form of {'original_name' : 'new_name'}. Should have one entry for each entry in iter[i].provide_data rename_label : None or list of dict Similar to rename_data Examples -------- iter = PrefetchingIter([NDArrayIter({'data': X1}), NDArrayIter({'data': X2})], rename_data=[{'data': 'data1'}, {'data': 'data2'}]) """ def __init__(self, iters, rename_data=None, rename_label=None): super(PrefetchingIter, self).__init__() if not isinstance(iters, list): iters = [iters] self.n_iter = len(iters) assert self.n_iter > 0 self.iters = iters if rename_data is None: self.provide_data = sum([i.provide_data for i in iters], []) else: self.provide_data = sum([[(r[n], s) for n, s in i.provide_data] \ for r, i in zip(rename_data, iters)], []) if rename_label is None: self.provide_label = sum([i.provide_label for i in iters], []) else: self.provide_label = sum([[(r[n], s) for n, s in i.provide_label] \ for r, i in zip(rename_label, iters)], []) self.batch_size = self.provide_data[0][1][0] self.data_ready = [threading.Event() for i in range(self.n_iter)] self.data_taken = [threading.Event() for i in range(self.n_iter)] for e in self.data_taken: e.set() self.started = True self.current_batch = [None for i in range(self.n_iter)] self.next_batch = [None for i in range(self.n_iter)] def prefetch_func(self, i): """Thread entry""" while True: self.data_taken[i].wait() if not self.started: break try: self.next_batch[i] = self.iters[i].next() except StopIteration: self.next_batch[i] = None self.data_taken[i].clear() self.data_ready[i].set() self.prefetch_threads = [threading.Thread(target=prefetch_func, args=[self, i]) \ for i in range(self.n_iter)] for thread in self.prefetch_threads: thread.setDaemon(True) thread.start() def __del__(self): self.started = False for e in self.data_taken: e.set() for thread in self.prefetch_threads: thread.join() def reset(self): for e in self.data_ready: e.wait() for i in self.iters: i.reset() for e in self.data_ready: e.clear() for e in self.data_taken: e.set() def iter_next(self): for e in self.data_ready: e.wait() if self.next_batch[0] is None: for i in self.next_batch: assert i is None, "Number of entry mismatches between iterators" return False else: for batch in self.next_batch: assert batch.pad == self.next_batch[0].pad, \ "Number of entry mismatches between iterators" self.current_batch = DataBatch(sum([batch.data for batch in self.next_batch], []), sum([batch.label for batch in self.next_batch], []), self.next_batch[0].pad, self.next_batch[0].index) for e in self.data_ready: e.clear() for e in self.data_taken: e.set() return True def next(self): if self.iter_next(): return self.current_batch else: raise StopIteration def getdata(self): return self.current_batch.data def getlabel(self): return self.current_batch.label def getindex(self): return self.current_batch.index def getpad(self): return self.current_batch.pad def _init_data(data, allow_empty, default_name): """Convert data into canonical form.""" assert (data is not None) or allow_empty if data is None: data = [] if isinstance(data, (np.ndarray, NDArray)): data = [data] if isinstance(data, list): if not allow_empty: assert(len(data) > 0) if len(data) == 1: data = OrderedDict([(default_name, data[0])]) else: data = OrderedDict([('_%d_%s' % (i, default_name), d) for i, d in enumerate(data)]) if not isinstance(data, dict): raise TypeError("Input must be NDArray, numpy.ndarray, " + \ "a list of them or dict with them as values") for k, v in data.items(): if isinstance(v, NDArray): data[k] = v.asnumpy() for k, v in data.items(): if not isinstance(v, np.ndarray): raise TypeError(("Invalid type '%s' for %s, " % (type(v), k)) + \ "should be NDArray or numpy.ndarray") return list(data.items()) class NDArrayIter(DataIter): """NDArrayIter object in mxnet. Taking NDArray or numpy array to get dataiter. Parameters ---------- data: NDArray or numpy.ndarray, a list of them, or a dict of string to them. NDArrayIter supports single or multiple data and label. label: NDArray or numpy.ndarray, a list of them, or a dict of them. Same as data, but is not fed to the model during testing. batch_size: int Batch Size shuffle: bool Whether to shuffle the data last_batch_handle: 'pad', 'discard' or 'roll_over' How to handle the last batch Note ---- This iterator will pad, discard or roll over the last batch if the size of data does not match batch_size. Roll over is intended for training and can cause problems if used for prediction. """ def __init__(self, data, label=None, batch_size=1, shuffle=False, last_batch_handle='pad'): # pylint: disable=W0201 super(NDArrayIter, self).__init__() self.data = _init_data(data, allow_empty=False, default_name='data') self.label = _init_data(label, allow_empty=True, default_name='softmax_label') # shuffle data if shuffle: idx = np.arange(self.data[0][1].shape[0]) np.random.shuffle(idx) self.data = [(k, v[idx]) for k, v in self.data] self.label = [(k, v[idx]) for k, v in self.label] self.data_list = [x[1] for x in self.data] + [x[1] for x in self.label] self.num_source = len(self.data_list) # batching if last_batch_handle == 'discard': new_n = self.data_list[0].shape[0] - self.data_list[0].shape[0] % batch_size data_dict = OrderedDict(self.data) label_dict = OrderedDict(self.label) for k, _ in self.data: data_dict[k] = data_dict[k][:new_n] for k, _ in self.label: label_dict[k] = label_dict[k][:new_n] self.data = data_dict.items() self.label = label_dict.items() self.num_data = self.data_list[0].shape[0] assert self.num_data >= batch_size, \ "batch_size need to be smaller than data size." self.cursor = -batch_size self.batch_size = batch_size self.last_batch_handle = last_batch_handle @property def provide_data(self): """The name and shape of data provided by this iterator""" return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self.data] @property def provide_label(self): """The name and shape of label provided by this iterator""" return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self.label] def hard_reset(self): """Igore roll over data and set to start""" self.cursor = -self.batch_size def reset(self): if self.last_batch_handle == 'roll_over' and self.cursor > self.num_data: self.cursor = -self.batch_size + (self.cursor%self.num_data)%self.batch_size else: self.cursor = -self.batch_size def iter_next(self): self.cursor += self.batch_size if self.cursor < self.num_data: return True else: return False def next(self): if self.iter_next(): return DataBatch(data=self.getdata(), label=self.getlabel(), \ pad=self.getpad(), index=None) else: raise StopIteration def _getdata(self, data_source): """Load data from underlying arrays, internal use only""" assert(self.cursor < self.num_data), "DataIter needs reset." if self.cursor + self.batch_size <= self.num_data: return [array(x[1][self.cursor:self.cursor+self.batch_size]) for x in data_source] else: pad = self.batch_size - self.num_data + self.cursor return [array(np.concatenate((x[1][self.cursor:], x[1][:pad]), axis=0)) for x in data_source] def getdata(self): return self._getdata(self.data) def getlabel(self): return self._getdata(self.label) def getpad(self): if self.last_batch_handle == 'pad' and \ self.cursor + self.batch_size > self.num_data: return self.cursor + self.batch_size - self.num_data else: return 0 class MXDataIter(DataIter): """DataIter built in MXNet. List all the needed functions here. Parameters ---------- handle : DataIterHandle the handle to the underlying C++ Data Iterator """ def __init__(self, handle, data_name='data', label_name='softmax_label', **_): super(MXDataIter, self).__init__() self.handle = handle # debug option, used to test the speed with io effect eliminated self._debug_skip_load = False # load the first batch to get shape information self.first_batch = None self.first_batch = self.next() data = self.first_batch.data[0] label = self.first_batch.label[0] # properties self.provide_data = [(data_name, data.shape)] self.provide_label = [(label_name, label.shape)] self.batch_size = data.shape[0] def __del__(self): check_call(_LIB.MXDataIterFree(self.handle)) def debug_skip_load(self): """Set the iterator to simply return always first batch. Notes ----- This can be used to test the speed of network without taking the loading delay into account. """ self._debug_skip_load = True logging.info('Set debug_skip_load to be true, will simply return first batch') def reset(self): self._debug_at_begin = True self.first_batch = None check_call(_LIB.MXDataIterBeforeFirst(self.handle)) def next(self): if self._debug_skip_load and not self._debug_at_begin: return DataBatch(data=[self.getdata()], label=[self.getlabel()], pad=self.getpad(), index=self.getindex()) if self.first_batch is not None: batch = self.first_batch self.first_batch = None return batch self._debug_at_begin = False next_res = ctypes.c_int(0) check_call(_LIB.MXDataIterNext(self.handle, ctypes.byref(next_res))) if next_res.value: return DataBatch(data=[self.getdata()], label=[self.getlabel()], pad=self.getpad(), index=self.getindex()) else: raise StopIteration def iter_next(self): if self.first_batch is not None: return True next_res = ctypes.c_int(0) check_call(_LIB.MXDataIterNext(self.handle, ctypes.byref(next_res))) return next_res.value def getdata(self): hdl = NDArrayHandle() check_call(_LIB.MXDataIterGetData(self.handle, ctypes.byref(hdl))) return NDArray(hdl, False) def getlabel(self): hdl = NDArrayHandle() check_call(_LIB.MXDataIterGetLabel(self.handle, ctypes.byref(hdl))) return NDArray(hdl, False) def getindex(self): index_size = ctypes.c_uint64(0) index_data = ctypes.POINTER(ctypes.c_uint64)() check_call(_LIB.MXDataIterGetIndex(self.handle, ctypes.byref(index_data), ctypes.byref(index_size))) address = ctypes.addressof(index_data.contents) dbuffer = (ctypes.c_uint64* index_size.value).from_address(address) np_index = np.frombuffer(dbuffer, dtype=np.uint64) return np_index.copy() def getpad(self): pad = ctypes.c_int(0) check_call(_LIB.MXDataIterGetPadNum(self.handle, ctypes.byref(pad))) return pad.value def _make_io_iterator(handle): """Create an io iterator by handle.""" name = ctypes.c_char_p() desc = ctypes.c_char_p() num_args = mx_uint() arg_names = ctypes.POINTER(ctypes.c_char_p)() arg_types = ctypes.POINTER(ctypes.c_char_p)() arg_descs = ctypes.POINTER(ctypes.c_char_p)() check_call(_LIB.MXDataIterGetIterInfo( \ handle, ctypes.byref(name), ctypes.byref(desc), \ ctypes.byref(num_args), \ ctypes.byref(arg_names), \ ctypes.byref(arg_types), \ ctypes.byref(arg_descs))) iter_name = py_str(name.value) param_str = ctypes2docstring(num_args, arg_names, arg_types, arg_descs) doc_str = ('%s\n\n' + '%s\n' + 'name : string, required.\n' + ' Name of the resulting data iterator.\n\n' + 'Returns\n' + '-------\n' + 'iterator: DataIter\n'+ ' The result iterator.') doc_str = doc_str % (desc.value, param_str) def creator(*args, **kwargs): """Create an iterator. The parameters listed below can be passed in as keyword arguments. Parameters ---------- name : string, required. Name of the resulting data iterator. Returns ------- dataiter: Dataiter the resulting data iterator """ param_keys = [] param_vals = [] for k, val in kwargs.items(): param_keys.append(c_str(k)) param_vals.append(c_str(str(val))) # create atomic symbol param_keys = c_array(ctypes.c_char_p, param_keys) param_vals = c_array(ctypes.c_char_p, param_vals) iter_handle = DataIterHandle() check_call(_LIB.MXDataIterCreateIter( handle, mx_uint(len(param_keys)), param_keys, param_vals, ctypes.byref(iter_handle))) if len(args): raise TypeError('%s can only accept keyword arguments' % iter_name) return MXDataIter(iter_handle, **kwargs) creator.__name__ = iter_name creator.__doc__ = doc_str return creator def _init_io_module(): """List and add all the data iterators to current module.""" plist = ctypes.POINTER(ctypes.c_void_p)() size = ctypes.c_uint() check_call(_LIB.MXListDataIters(ctypes.byref(size), ctypes.byref(plist))) module_obj = sys.modules[__name__] for i in range(size.value): hdl = ctypes.c_void_p(plist[i]) dataiter = _make_io_iterator(hdl) setattr(module_obj, dataiter.__name__, dataiter) # Initialize the io in startups _init_io_module()
server-gunn-pi-v1.py
import socket from multiprocessing import Process, Manager import time import sys import os ''' scp ~/Documents/Code/python/client-server_simple/server.py pi@raspberrypi:~/Desktop/ ''' ### Server Stuff HOST = "169.254.209.111" # 169.254.209.111 PORT = 6789 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind((HOST, PORT)) def messageCatcher(inputs, _): while True: data, addr = s.recvfrom(1024) if data == "key_left_down": inputs['k_left'] = 1 print('left') if data == "key_right_down": inputs['k_right'] = 1 print('right') if data == "key_left_up": inputs['k_left'] = 0 if data == "key_right_up": inputs['k_right'] = 0 if data == "terminate": inputs['terminator_var'] = 1 print 'Client terminated... :(' if data == "Client is connected...": inputs['terminator_var'] = 0 print 'Client joined...' if data.startswith("data:"): _, x_axis, y_axis, z_axis, switch_axis = data.split() inputs['x_axis'] = float(x_axis) inputs['y_axis'] = float(y_axis) inputs['z_axis'] = float(z_axis) inputs['switch_axis'] = float(switch_axis) def mainProcess(inputs, _): ### Variables global to the main process # Base Control GPIO.setmode(GPIO.BCM) GPIO.setup(17, GPIO.OUT) # motor 1 GPIO.setup(18, GPIO.OUT) # motor 2 GPIO.setup(27, GPIO.OUT) # motor 3 GPIO.setup(22, GPIO.OUT) # motor 4 m1 = GPIO.PWM(17, 50) m2 = GPIO.PWM(18, 50) m3 = GPIO.PWM(27, 50) m4 = GPIO.PWM(22, 50) m1.start(7) m2.start(7) m3.start(7) m4.start(7) def inputFilter(x): if(x <= -0.05): x += 0.05 if(x >= 0.05): x -= 0.05 if(x < 0.05 and x > -0.05): x = 0 if(x == 0.95): x = 1 if(x == -0.95): x = -1 return x while True: #if inputs['k_left'] == 1: # filtered inputs x_axis = inputFilter(inputs['x_axis']) y_axis = inputFilter(inputs['y_axis']) z_axis = inputFilter(inputs['z_axis']) switch_axis = inputFilter(inputs['switch_axis']) print(x_axis) print(y_axis) print(z_axis) print(switch_axis) horizontal_power = (inputs['x_axis'] * 4) + 7 vertical_power = (inputs['y_axis'] * 4) + 7 print("longitudinal movement: " + str(vertical_power)) print("strafe movement: " + str(horizontal_power)) print(" ") m1_duty_cycle = min(11, max(3, (-1 * (inputs['x_axis'] - (-1 * inputs['z_axis'] )) * 4) + 7)) m3_duty_cycle = min(11, max(3, ( 1 * (inputs['x_axis'] + (-1 * inputs['z_axis'] )) * 4) + 7)) m2_duty_cycle = min(11, max(3, (-1 * (inputs['y_axis'] - (-1 * inputs['z_axis'] )) * 4) + 7)) m4_duty_cycle = min(11, max(3, ( 1 * (inputs['y_axis'] + (-1 * inputs['z_axis'] )) * 4) + 7)) # m4 = (-1 * (inputs['y_axis'] - inputs['z_axis'] / 4) * 4) + 7 print("Motor 1: " + str(m1_duty_cycle)) print("Motor 2: " + str(m2_duty_cycle)) print("Motor 3: " + str(m3_duty_cycle)) print("Motor 4: " + str(m4_duty_cycle)) m1.ChangeDutyCycle(m1_duty_cycle) m2.ChangeDutyCycle(m2_duty_cycle) m3.ChangeDutyCycle(m3_duty_cycle) m4.ChangeDutyCycle(m4_duty_cycle) #m1.ChangeDutyCycle(horizontal_power) # between 2.5 & 12.5 time.sleep(0.01) os.system('clear') # Clear screen for Mac and Linux if inputs['terminator_var'] == 1: print 'Client is missing, we will pause\n' \ 'and wait for them to reconnect\n' \ 'before doing anything irrational.' # Default all values so robot is safe if __name__ == "__main__": manager = Manager() inputs = manager.dict() inputs['x_axis'] = 0 inputs['y_axis'] = 0 inputs['z_axis'] = 0 inputs['switch_axis'] = 0 inputs['terminator_var'] = 0 # - multiprocessing runs a separate instance of python, typical # global variables are not shared between child processes mC = Process(target=messageCatcher, args=(inputs, 1)) mP = Process(target=mainProcess, args=(inputs, 1)) mC.start() mP.start() mC.join() mP.join()
bomber.py
from tkinter import ttk from tkinter import* import smtplib, threading, random, time sent, stop, badCred = 0,0,0 def spam(): global sent, var, badCred credentials = emailSender.get().split(",") server = smtplib.SMTP_SSL("smtp.gmail.com", 465) try: server.login(f"{credentials[0]}", f"{credentials[1]}") except smtplib.SMTPAuthenticationError or IndexError: emailsSent["text"] = "Bad credentials." badCred = 1 threading.Thread(target=turnoff).start() except: emailsSent["text"] = "And unknown error occurred!" badCred = 1 threading.Thread(target=turnoff).start() while stop == 0: if badCred == 1: badCred = 0 break try: print(credentials) em = emailMessage.get("1.0",END).split() es = emailSubject.get("1.0",END).split() randomNumber = (str(random.randint(0, 9999999))) try: if var.get() == 0: msg = f"Subject: {emailSubject.get()}\n\n{emailMessage.get()}".encode(encoding="utf-8") else: msg = f"Subject: {es} ({randomNumber})\n\n{em[random.randint(0, len(em) - 1)]}\n({randomNumber})".encode(encoding="utf-8") except: emailsSent["text"] = "Boxes are empty." badCred = 1 threading.Thread(target=turnoff).start() break try: server.sendmail(f"{emailSender.get()}", emailReceiver.get(), msg) except: emailsSent["text"] = "Bad sender's email." badCred = 1 threading.Thread(target=turnoff).start() break sent += 1 emailsSent["text"] = f"Emails sent: {str(sent)}" if stop == 1: emailsSent["text"] = "" sent = 0 break time.sleep(0.3) except smtplib.SMTPSenderRefused: print("eror XDXD") server = smtplib.SMTP_SSL("smtp.gmail.com", 465) server.login(f"{credentials[0]}", f"{credentials[1]}") if stop == 1: emailsSent["text"] = "" sent = 0 break time.sleep(60) except smtplib.SMTPAuthenticationError: emailsSent["text"] = "Wrong password!" badCred = 1 threading.Thread(target=turnoff).start() break def stopThread(): global stop stop = 1 def turnoff(): time.sleep(2) emailsSent["text"] = "" app = Tk() var = IntVar() app.geometry("945x400") app.title("TRASH BOMBER") app.resizable(False, False) ttk.Label(app, text="Subjects:", font=("Arial", 12)).grid(row=0, column=0, pady=20) emailSubject = Text(app, width=40, height=20) emailSubject.grid(row=1, column=0, padx=10, rowspan=200) ttk.Label(app, text="Messages:", font=("Arial", 12)).grid(row=0, column=1, pady=20) emailMessage = Text(app, width=40, height=20) emailMessage.grid(row=1, column=1, padx=10, rowspan=200) ttk.Label(app, text="Sender's email and password\n (separate by \",\"):", font=("Arial", 12)).grid(row=0, column=2, columnspan=2) emailSender = ttk.Entry(app, width=40) emailSender.grid(row=1, column=2, columnspan=2) ttk.Label(app, text="Receiver's email:", font=("Arial", 12)).grid(row=2, column=2, pady=10, columnspan=2) emailReceiver = ttk.Entry(app, width=40) emailReceiver.grid(row=3, column=2, pady=5, columnspan=2) randomNum = ttk.Checkbutton(app, text="Include random number at the\nend to prevent message stacking", takefocus=0, variable=var) randomNum.grid(row=6, column=2, pady=5, columnspan=2) button_start = ttk.Button(text="Start", width=19, takefocus=0, command=lambda: threading.Thread(target=spam).start()) button_start.grid(row=7, column=2) button_stop = ttk.Button(text="Stop", width=19, takefocus=0, command=stopThread) button_stop.grid(row=7, column=3) emailsSent = Label(font=("Arial", 12)) emailsSent.grid(row=8, column=2, columnspan=2, pady=20) app.mainloop()
futures.py
"""Tools for dealing with asynchronous execution""" from globus_sdk import GlobusAPIError from concurrent.futures import Future from threading import Thread from time import sleep class DLHubFuture(Future): """Utility class for simplifying asynchronous execution in DLHub""" def __init__(self, client, task_id: str, ping_interval: float): """ Args: client (DLHubClient): Already-initialized client, used to check task_id (str): Set the task ID of the ping_interval (float): How often to ping the server to check status in seconds """ super().__init__() self.client = client self.task_id = task_id self.ping_interval = ping_interval # List of pending statuses returned by funcX. # TODO: Replace this once funcX stops raising exceptions when a task is pending. self.pending_statuses = ["received", "waiting-for-ep", "waiting-for-nodes", "waiting-for-launch", "running"] # Once you create this, the task has already started self.set_running_or_notify_cancel() # Forcing the ping interval to be no less than 1s if ping_interval < 1: assert AttributeError('Ping interval must be at least 1 second') # Start a thread that polls status self._checker_thread = Thread(target=DLHubFuture._ping_server, args=(self,), daemon=True) self._checker_thread.start() def _ping_server(self): while True: sleep(self.ping_interval) try: if not self.running(): break except GlobusAPIError: # Keep pinging even if the results fail continue def running(self): if super().running(): # If the task isn't already completed, check if it is still running try: status = self.client.get_result(self.task_id, verbose=True) except Exception as e: # Check if it is "Task pending". funcX throws an exception on pending. if e.args[0] in self.pending_statuses: return True else: self.set_exception(e) return False if isinstance(status, tuple): # TODO pass in verbose setting? self.set_result(status[0]) return False return False def stop(self): """Stop the execution of the function""" # TODO (lw): Should be attempt to cancel the execution of the task on DLHub? self.set_exception(Exception('Cancelled by user'))
views.py
# -*- coding: UTF-8 -*- import re, time import simplejson as json from threading import Thread from collections import OrderedDict from django.db.models import Q, F from django.db import connection, transaction from django.utils import timezone from django.conf import settings from django.shortcuts import render, get_object_or_404 from django.http import HttpResponse, HttpResponseRedirect from django.core.urlresolvers import reverse from django.views.decorators.csrf import csrf_exempt from .dao import Dao from .api import ServerError, pages from .const import Const, WorkflowDict from .inception import InceptionDao from .aes_decryptor import Prpcrypt from .models import users, UserGroup, master_config, AliyunRdsConfig, workflow, slave_config, QueryPrivileges, Group, \ QueryPrivilegesApply, ProjectResource, GroupQueryPrivileges from .workflow import Workflow from .permission import role_required, superuser_required from .sqlreview import getDetailUrl, execute_call_back, execute_skipinc_call_back from .jobs import job_info, del_sqlcronjob from .pycrypt import MyCrypt from .projectresource import integration_resource, get_resource , PermissionVerification, get_query_permisshion from .query import get_query_clustername from archer.settings import HASH_KEY import logging logger = logging.getLogger('default') dao = Dao() inceptionDao = InceptionDao() prpCryptor = Prpcrypt() workflowOb = Workflow() # 登录 def login(request): access_itom_addr = settings.ACCESS_ITOM_ADDR return HttpResponseRedirect('%s/login/'%(access_itom_addr)) # return render(request, 'login.html') # 退出登录 def logout(request): access_itom_addr = settings.ACCESS_ITOM_ADDR if request.session.get('login_username', False): del request.session['login_username'] if request.session.get('resource_status', False): del request.session['resource_status'] return HttpResponseRedirect('%s/logout/'%(access_itom_addr)) # return render(request, 'login.html') # SQL上线工单页面 def allworkflow(request): context = {'currentMenu': 'allworkflow'} return render(request, 'allWorkflow.html', context) # 提交SQL的页面 def submitSql(request): # 获取数据连接信息 masters = master_config.objects.all().order_by('cluster_name') if len(masters) == 0: return HttpResponseRedirect('/admin/sql/master_config/add/') # 获取用户信息 loginUser = request.session.get('login_username', False) loginUserOb = users.objects.get(username=loginUser) pv = PermissionVerification(loginUser, loginUserOb) # 获取用户所属项目组信息 context = pv.get_group_info() if context["status"] == 1: group_list = context["data"] else: errMsg = context["msg"] return render(request, 'error.html', {'errMsg': errMsg}) # 获取用户所属项目组拥有权限的实列信息 context = pv.get_cluster_info(masters) if context["status"] == 1: listAllClusterName = context["data"] else: errMsg = context["msg"] return render(request, 'error.html', {'errMsg': errMsg}) # 获取所有有效用户,通知对象 active_user = users.objects.filter(is_active=1) context = {'currentMenu': 'allworkflow', 'listAllClusterName': listAllClusterName, 'active_user': active_user, 'group_list': group_list} return render(request, 'submitSql.html', context) # 提交SQL给inception进行解析 def autoreview(request): workflowid = request.POST.get('workflowid') sqlContent = request.POST['sql_content'] workflowName = request.POST['workflow_name'] group_name = request.POST['group_name'] group_id = Group.objects.get(group_name=group_name).group_id clusterName = request.POST['cluster_name'] db_name = request.POST.get('db_name') isBackup = request.POST['is_backup'] reviewMan = request.POST.get('workflow_auditors') notify_users = request.POST.getlist('notify_users') # 服务器端参数验证 if sqlContent is None or workflowName is None or clusterName is None or db_name is None or isBackup is None or reviewMan is None: context = {'errMsg': '页面提交参数可能为空'} return render(request, 'error.html', context) # 删除注释语句 sqlContent = ''.join( map(lambda x: re.compile(r'(^--.*|^/\*.*\*/;[\f\n\r\t\v\s]*$)').sub('', x, count=1), sqlContent.splitlines(1))).strip() # 去除空行 sqlContent = re.sub('[\r\n\f]{2,}', '\n', sqlContent) if sqlContent[-1] != ";": context = {'errMsg': "SQL语句结尾没有以;结尾,请后退重新修改并提交!"} return render(request, 'error.html', context) # 获取用户信息 loginUser = request.session.get('login_username', False) loginUserOb = users.objects.get(username=loginUser) pv = PermissionVerification(loginUser, loginUserOb) # 检测用户资源权限 if loginUserOb.is_superuser: reviewResult = pv.check_resource_priv(sqlContent, clusterName, db_name, 1) else: reviewResult = pv.check_resource_priv(sqlContent, clusterName, db_name, 0) result = reviewResult["data"] if reviewResult["status"] == 1: context = {'errMsg': reviewResult["msg"]} return render(request, 'error.html', context) if result is None or len(result) == 0: context = {'errMsg': 'inception返回的结果集为空!可能是SQL语句有语法错误'} return render(request, 'error.html', context) # 要把result转成JSON存进数据库里,方便SQL单子详细信息展示 jsonResult = json.dumps(result) # 遍历result,看是否有任何自动审核不通过的地方,一旦有,则为自动审核不通过;没有的话,则为等待人工审核状态 workflowStatus = Const.workflowStatus['manreviewing'] for row in result: if row[2] == 2: # 状态为2表示严重错误,必须修改 workflowStatus = Const.workflowStatus['autoreviewwrong'] break elif re.match(r"\w*comments\w*", row[4]): workflowStatus = Const.workflowStatus['autoreviewwrong'] break # 调用工作流生成工单 # 使用事务保持数据一致性 try: with transaction.atomic(): # 存进数据库里 engineer = request.session.get('login_username', False) if not workflowid: Workflow = workflow() Workflow.create_time = timezone.now() else: Workflow = workflow.objects.get(id=int(workflowid)) Workflow.workflow_name = workflowName Workflow.group_id = group_id Workflow.group_name = group_name Workflow.engineer = engineer Workflow.review_man = reviewMan Workflow.status = workflowStatus Workflow.is_backup = isBackup Workflow.review_content = jsonResult Workflow.cluster_name = clusterName Workflow.db_name = db_name Workflow.sql_content = sqlContent Workflow.execute_result = '' Workflow.audit_remark = '' Workflow.save() workflowId = Workflow.id # 自动审核通过了,才调用工作流 if workflowStatus == Const.workflowStatus['manreviewing']: # 调用工作流插入审核信息, 查询权限申请workflow_type=2 # 抄送通知人 listCcAddr = [email['email'] for email in users.objects.filter(username__in=notify_users).values('email')] workflowOb.addworkflowaudit(request, WorkflowDict.workflow_type['sqlreview'], workflowId, listCcAddr=listCcAddr) except Exception as msg: context = {'errMsg': msg} return render(request, 'error.html', context) return HttpResponseRedirect(reverse('sql:detail', kwargs={'workflowId':workflowId, 'workflowType':0})) # 展示SQL工单详细内容,以及可以人工审核,审核通过即可执行 def detail(request, workflowId, workflowType): workflowDetail = get_object_or_404(workflow, pk=workflowId) if workflowDetail.status in (Const.workflowStatus['finish'], Const.workflowStatus['exception']) \ and workflowDetail.is_manual == 0: listContent = json.loads(workflowDetail.execute_result) else: listContent = json.loads(workflowDetail.review_content) # 获取审核人 reviewMan = workflowDetail.review_man reviewMan = reviewMan.split(',') # 获取当前审核人 try: current_audit_user = workflowOb.auditinfobyworkflow_id(workflow_id=workflowId, workflow_type=WorkflowDict.workflow_type['sqlreview'] ).current_audit_user except Exception: current_audit_user = None # 获取用户信息 loginUser = request.session.get('login_username', False) loginUserOb = users.objects.get(username=loginUser) # 获取定时执行任务信息 if workflowDetail.status == Const.workflowStatus['tasktiming']: job_id = Const.workflowJobprefix['sqlreview'] + '-' + str(workflowId) job = job_info(job_id) if job: run_date = job.next_run_time else: run_date = '' else: run_date = '' # sql结果 column_list = ['ID', 'stage', 'errlevel', 'stagestatus', 'errormessage', 'SQL', 'Affected_rows', 'sequence', 'backup_dbname', 'execute_time', 'sqlsha1'] rows = [] for row_index, row_item in enumerate(listContent): row = {} row['ID'] = row_index + 1 row['stage'] = row_item[1] row['errlevel'] = row_item[2] row['stagestatus'] = row_item[3] row['errormessage'] = row_item[4] row['SQL'] = row_item[5] row['Affected_rows'] = row_item[6] row['sequence'] = row_item[7] row['backup_dbname'] = row_item[8] row['execute_time'] = row_item[9] row['sqlsha1'] = row_item[10] rows.append(row) if workflowDetail.status == '执行中': row['stagestatus'] = ''.join( ["<div id=\"td_" + str(row['ID']) + "\" class=\"form-inline\">", " <div class=\"progress form-group\" style=\"width: 80%; height: 18px; float: left;\">", " <div id=\"div_" + str(row['ID']) + "\" class=\"progress-bar\" role=\"progressbar\"", " aria-valuenow=\"60\"", " aria-valuemin=\"0\" aria-valuemax=\"100\">", " <span id=\"span_" + str(row['ID']) + "\"></span>", " </div>", " </div>", " <div class=\"form-group\" style=\"width: 10%; height: 18px; float: right;\">", " <form method=\"post\">", " <input type=\"hidden\" name=\"workflowid\" value=\"" + str(workflowDetail.id) + "\">", " <button id=\"btnstop_" + str(row['ID']) + "\" value=\"" + str(row['ID']) + "\"", " type=\"button\" class=\"close\" style=\"display: none\" title=\"停止pt-OSC进程\">", " <span class=\"glyphicons glyphicons-stop\">&times;</span>", " </button>", " </form>", " </div>", "</div>"]) context = {'currentMenu': 'allworkflow', 'workflowDetail': workflowDetail, 'column_list': column_list, 'rows': rows, 'reviewMan': reviewMan, 'current_audit_user': current_audit_user, 'loginUserOb': loginUserOb, 'run_date': run_date} if int(workflowType) == 1: return render(request, 'detailhash.html', context) else: return render(request, 'detail.html', context) # 审核通过,不执行 def passonly(request): workflowId = request.POST['workflowid'] workflowType = request.POST.get('workflowtype',0) if workflowId == '' or workflowId is None: context = {'errMsg': 'workflowId参数为空.'} return render(request, 'error.html', context) workflowId = int(workflowId) workflowDetail = workflow.objects.get(id=workflowId) # 获取审核人 reviewMan = workflowDetail.review_man reviewMan = reviewMan.split(',') # 服务器端二次验证,正在执行人工审核动作的当前登录用户必须为审核人. 避免攻击或被接口测试工具强行绕过 loginUser = request.session.get('login_username', False) loginUserOb = users.objects.get(username=loginUser) if loginUser is None or (loginUser not in reviewMan and loginUserOb.is_superuser != 1): context = {'errMsg': '当前登录用户不是审核人,请重新登录.'} return render(request, 'error.html', context) # 服务器端二次验证,当前工单状态必须为等待人工审核 if workflowDetail.status != Const.workflowStatus['manreviewing']: context = {'errMsg': '当前工单状态不是等待人工审核中,请刷新当前页面!'} return render(request, 'error.html', context) # 使用事务保持数据一致性 try: with transaction.atomic(): # 调用工作流接口审核 # 获取audit_id audit_id = workflowOb.auditinfobyworkflow_id(workflow_id=workflowId, workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id auditresult = workflowOb.auditworkflow(request, audit_id, WorkflowDict.workflow_status['audit_success'], loginUser, '') # 按照审核结果更新业务表审核状态 if auditresult['data']['workflow_status'] == WorkflowDict.workflow_status['audit_success']: # 将流程状态修改为审核通过,并更新reviewok_time字段 workflowDetail.status = Const.workflowStatus['pass'] workflowDetail.reviewok_time = timezone.now() workflowDetail.audit_remark = '' workflowDetail.save() except Exception as msg: context = {'errMsg': msg} if int(workflowType) == 1: return HttpResponse(context['errMsg']) else: return render(request, 'error.html', context) return HttpResponseRedirect(reverse('sql:detail', kwargs={'workflowId':workflowId, 'workflowType':workflowType})) # 仅执行SQL def executeonly(request): workflowId = request.POST['workflowid'] if workflowId == '' or workflowId is None: context = {'errMsg': 'workflowId参数为空.'} return render(request, 'error.html', context) workflowId = int(workflowId) workflowDetail = workflow.objects.get(id=workflowId) clusterName = workflowDetail.cluster_name db_name = workflowDetail.db_name url = getDetailUrl(request) + str(workflowId) + '/' # 获取审核人 reviewMan = workflowDetail.review_man reviewMan = reviewMan.split(',') # 服务器端二次验证,正在执行人工审核动作的当前登录用户必须为审核人或者提交人. 避免攻击或被接口测试工具强行绕过 loginUser = request.session.get('login_username', False) loginUserOb = users.objects.get(username=loginUser) if loginUser is None or (loginUser not in reviewMan and loginUser != workflowDetail.engineer and loginUserOb.role != 'DBA'): context = {'errMsg': '当前登录用户不是审核人或者提交人,请重新登录.'} return render(request, 'error.html', context) # 服务器端二次验证,当前工单状态必须为审核通过状态 if workflowDetail.status != Const.workflowStatus['pass']: context = {'errMsg': '当前工单状态不是审核通过,请刷新当前页面!'} return render(request, 'error.html', context) # 将流程状态修改为执行中,并更新reviewok_time字段 workflowDetail.status = Const.workflowStatus['executing'] workflowDetail.reviewok_time = timezone.now() # 执行之前重新split并check一遍,更新SHA1缓存;因为如果在执行中,其他进程去做这一步操作的话,会导致inception core dump挂掉 try: splitReviewResult = inceptionDao.sqlautoReview(workflowDetail.sql_content, workflowDetail.cluster_name, db_name, isSplit='yes') except Exception as msg: context = {'errMsg': msg} return render(request, 'error.html', context) workflowDetail.review_content = json.dumps(splitReviewResult) try: workflowDetail.save() except Exception: # 关闭后重新获取连接,防止超时 connection.close() workflowDetail.save() # 采取异步回调的方式执行语句,防止出现持续执行中的异常 t = Thread(target=execute_call_back, args=(workflowId, clusterName, url)) t.start() return HttpResponseRedirect(reverse('sql:detail', kwargs={ 'workflowId':workflowId, 'workflowType':0 })) # 跳过inception直接执行SQL,只是为了兼容inception不支持的语法,谨慎使用 @role_required(('DBA',)) def execute_skipinc(request): workflowId = request.POST['workflowid'] # 获取工单信息 workflowId = int(workflowId) workflowDetail = workflow.objects.get(id=workflowId) sql_content = workflowDetail.sql_content clusterName = workflowDetail.cluster_name url = getDetailUrl(request) + str(workflowId) + '/' # 服务器端二次验证,当前工单状态必须为自动审核不通过 if workflowDetail.status not in [Const.workflowStatus['manreviewing'], Const.workflowStatus['pass'], Const.workflowStatus['autoreviewwrong']]: context = {'errMsg': '当前工单状态不是自动审核不通过,请刷新当前页面!'} return render(request, 'error.html', context) # 更新工单状态为执行中 workflowDetail = workflow.objects.get(id=workflowId) workflowDetail.status = Const.workflowStatus['executing'] workflowDetail.reviewok_time = timezone.now() workflowDetail.save() # 采取异步回调的方式执行语句,防止出现持续执行中的异常 t = Thread(target=execute_skipinc_call_back, args=(workflowId, clusterName, sql_content, url)) t.start() return HttpResponseRedirect(reverse('sql:detail', kwargs={'workflowId':workflowId, 'workflowType':0})) # 终止流程 def cancel(request): workflowId = request.POST['workflowid'] workflowType = request.POST.get('workflowtype', 0) if workflowId == '' or workflowId is None: context = {'errMsg': 'workflowId参数为空.'} return render(request, 'error.html', context) workflowId = int(workflowId) workflowDetail = workflow.objects.get(id=workflowId) # 获取审核人 reviewMan = workflowDetail.review_man reviewMan = reviewMan.split(',') audit_remark = request.POST.get('audit_remark') if audit_remark is None: context = {'errMsg': '驳回原因不能为空'} return render(request, 'error.html', context) # 服务器端二次验证,如果正在执行终止动作的当前登录用户,不是提交人也不是审核人,则异常. loginUser = request.session.get('login_username', False) loginUserOb = users.objects.get(username=loginUser) if loginUser is None or (loginUser not in reviewMan and loginUser != workflowDetail.engineer and loginUserOb.role != 'DBA'): context = {'errMsg': '当前登录用户不是审核人也不是提交人,请重新登录.'} return render(request, 'error.html', context) # 服务器端二次验证,如果当前单子状态是结束状态,则不能发起终止 if workflowDetail.status in ( Const.workflowStatus['abort'], Const.workflowStatus['finish'], Const.workflowStatus['autoreviewwrong'], Const.workflowStatus['exception']): return HttpResponseRedirect(reverse('sql:detail', kwargs={'workflowId':workflowId, 'workflowType':workflowType})) # 使用事务保持数据一致性 try: with transaction.atomic(): # 调用工作流接口取消或者驳回 # 获取audit_id audit_id = workflowOb.auditinfobyworkflow_id(workflow_id=workflowId, workflow_type=WorkflowDict.workflow_type['sqlreview']).audit_id if loginUser == workflowDetail.engineer: auditresult = workflowOb.auditworkflow(request, audit_id, WorkflowDict.workflow_status['audit_abort'], loginUser, audit_remark) else: auditresult = workflowOb.auditworkflow(request, audit_id, WorkflowDict.workflow_status['audit_reject'], loginUser, audit_remark) # 删除定时执行job if workflowDetail.status == Const.workflowStatus['tasktiming']: job_id = Const.workflowJobprefix['sqlreview'] + '-' + str(workflowId) del_sqlcronjob(job_id) # 按照审核结果更新业务表审核状态 if auditresult['data']['workflow_status'] in ( WorkflowDict.workflow_status['audit_abort'], WorkflowDict.workflow_status['audit_reject']): # 将流程状态修改为人工终止流程 workflowDetail.status = Const.workflowStatus['abort'] workflowDetail.audit_remark = audit_remark workflowDetail.save() except Exception as msg: context = {'errMsg': msg} if int(workflowType) == 1: return HttpResponse(context['errMsg']) else: return render(request, 'error.html', context) return HttpResponseRedirect(reverse('sql:detail', kwargs={'workflowId':workflowId, 'workflowType':workflowType})) # 展示回滚的SQL def rollback(request): workflowId = request.GET['workflowid'] if workflowId == '' or workflowId is None: context = {'errMsg': 'workflowId参数为空.'} return render(request, 'error.html', context) workflowId = int(workflowId) try: listBackupSql = inceptionDao.getRollbackSqlList(workflowId) except Exception as msg: context = {'errMsg': msg} return render(request, 'error.html', context) workflowDetail = workflow.objects.get(id=workflowId) workflowName = workflowDetail.workflow_name rollbackWorkflowName = "【回滚工单】原工单Id:%s ,%s" % (workflowId, workflowName) context = {'listBackupSql': listBackupSql, 'currentMenu': 'sqlworkflow', 'workflowDetail': workflowDetail, 'rollbackWorkflowName': rollbackWorkflowName} return render(request, 'rollback.html', context) # SQL审核必读 def dbaprinciples(request): context = {'currentMenu': 'dbaprinciples'} return render(request, 'dbaprinciples.html', context) # 图表展示 def charts(request): context = {'currentMenu': 'charts'} return render(request, 'charts.html', context) # SQL在线查询 def sqlquery(request): # 获取用户信息 loginUser = request.session.get('login_username', False) loginUserOb = users.objects.get(username=loginUser) # 获取所有从库实例名称 slaves = slave_config.objects.all().order_by('cluster_name') if len(slaves) == 0: return HttpResponseRedirect('/admin/sql/slave_config/add/') #判断是否为管理员 if loginUserOb.is_superuser: listAllClusterName = [ slave.cluster_name for slave in slaves ] else: listAllClusterName = get_query_clustername(loginUser) context = {'currentMenu': 'sqlquery', 'listAllClusterName': listAllClusterName} return render(request, 'sqlquery.html', context) # SQL慢日志 def slowquery(request): # 获取所有实例主库名称 masters = master_config.objects.all().order_by('cluster_name') if len(masters) == 0: return HttpResponseRedirect('/admin/sql/master_config/add/') cluster_name_list = [master.cluster_name for master in masters] context = {'currentMenu': 'slowquery', 'tab': 'slowquery', 'cluster_name_list': cluster_name_list} return render(request, 'slowquery.html', context) # SQL优化工具 def sqladvisor(request): # 获取所有实例主库名称 masters = master_config.objects.all().order_by('cluster_name') if len(masters) == 0: return HttpResponseRedirect('/admin/sql/master_config/add/') cluster_name_list = [master.cluster_name for master in masters] context = {'currentMenu': 'sqladvisor', 'listAllClusterName': cluster_name_list} return render(request, 'sqladvisor.html', context) # 查询权限申请列表 def queryapplylist(request): slaves = slave_config.objects.all().order_by('cluster_name') # 获取用户所属项目组信息 loginUser = request.session.get('login_username', False) loginUserOb = users.objects.get(username=loginUser) groupname_list = [ group['group_name'] for group in UserGroup.objects.filter(user_name=loginUser).values('group_name') ] # 获取所有实例从库名称 listAllClusterName = [slave.cluster_name for slave in slaves] if len(slaves) == 0: return HttpResponseRedirect('/admin/sql/slave_config/add/') # 获取所有项组名称 # group_list = Group.objects.all().annotate(id=F('group_id'), # name=F('group_name'), # parent=F('group_parent_id'), # level=F('group_level') # ).values('id', 'name', 'parent', 'level') group_list = Group.objects.filter(group_name__in=groupname_list).annotate(id=F('group_id'), name=F('group_name'), parent=F('group_parent_id'), level=F('group_level') ).values('id', 'name', 'parent', 'level') group_list = [group for group in group_list] if len(group_list) == 0 and loginUserOb.is_superuser == False: errMsg = '您尚未属于任何项目组,请与管理员联系.' return render(request, 'error.html', {'errMsg': errMsg}) # elif len(group_list) == 0 and loginUserOb.is_superuser == True: # return HttpResponseRedirect('/config/') context = {'currentMenu': 'queryapply', 'listAllClusterName': listAllClusterName, 'group_list': group_list} return render(request, 'queryapplylist.html', context) # 查询权限申请详情 def queryapplydetail(request, apply_id, audit_type): workflowDetail = QueryPrivilegesApply.objects.get(apply_id=apply_id) # 获取当前审核人 audit_info = workflowOb.auditinfobyworkflow_id(workflow_id=apply_id, workflow_type=WorkflowDict.workflow_type['query']) context = {'currentMenu': 'queryapply', 'workflowDetail': workflowDetail, 'audit_info': audit_info} if int(audit_type) == 1: return render(request, 'queryapplydetailhash.html', context) else: return render(request, 'queryapplydetail.html', context) # 用户的查询权限管理 def queryuserprivileges(request): # 获取用户信息 loginUser = request.session.get('login_username', False) loginUserOb = users.objects.get(username=loginUser) # 获取所有用户 user_list_person = [ user['user_name'] for user in QueryPrivileges.objects.filter(is_deleted=0).values('user_name').distinct() ] group_name_list = [ group['group_name'] for group in GroupQueryPrivileges.objects.all().values('group_name').distinct() ] user_list_group = [ user['user_name'] for user in UserGroup.objects.filter(group_name__in=group_name_list).values('user_name').distinct() ] user_list = user_list_person + user_list_group # 排序去重 user_list = sorted(list(set(user_list))) context = {'currentMenu': 'queryapply', 'user_list': user_list, 'loginUserOb': loginUserOb} return render(request, 'queryuserprivileges.html', context) # 用户的执行权限管理 def executeuserprivileges(request): # 获取用户信息 loginUser = request.session.get('login_username', False) loginUserOb = users.objects.get(username=loginUser) # 获取所有用户 user_list = users.objects.all().values("username").distinct() context = {'currentMenu': 'queryapply', 'user_list': user_list, 'loginUserOb': loginUserOb} return render(request, 'executeuserprivileges.html', context) # 问题诊断--进程 def diagnosis_process(request): # 获取用户信息 loginUser = request.session.get('login_username', False) loginUserOb = users.objects.get(username=loginUser) # 获取所有实例名称 masters = AliyunRdsConfig.objects.all().order_by('cluster_name') cluster_name_list = [master.cluster_name for master in masters] context = {'currentMenu': 'diagnosis', 'tab': 'process', 'cluster_name_list': cluster_name_list, 'loginUserOb': loginUserOb} return render(request, 'diagnosis.html', context) # 问题诊断--空间 def diagnosis_sapce(request): # 获取所有实例名称 masters = AliyunRdsConfig.objects.all().order_by('cluster_name') cluster_name_list = [master.cluster_name for master in masters] context = {'currentMenu': 'diagnosis', 'tab': 'space', 'cluster_name_list': cluster_name_list} return render(request, 'diagnosis.html', context) # 获取工作流审核列表 def workflows(request): # 获取用户信息 loginUser = request.session.get('login_username', False) loginUserOb = users.objects.get(username=loginUser) context = {'currentMenu': 'workflow', "loginUserOb": loginUserOb} return render(request, "workflow.html", context) # 工作流审核列表 def workflowsdetail(request, audit_id): # 按照不同的workflow_type返回不同的详情 auditInfo = workflowOb.auditinfo(audit_id) if auditInfo.workflow_type == WorkflowDict.workflow_type['query']: return HttpResponseRedirect(reverse('sql:queryapplydetail', kwargs={'apply_id':auditInfo.workflow_id, 'audit_type':0})) elif auditInfo.workflow_type == WorkflowDict.workflow_type['sqlreview']: return HttpResponseRedirect(reverse('sql:detail', kwargs={'workflowId':auditInfo.workflow_id, 'workflowType':0})) # 工作流审核列表HASH认证审核 def workflowsdetailhash(request): # 用户免登录更加HASH认证快速审核 # http://192.168.123.110:8080/workflowshash/?timestamp=454545&hash=kkkkkkkk timestamp, uuid, audit_id = None, None, None dbom_host = request.scheme + "://" + request.get_host() + "/login/" timestamp_before = request.GET.get('timestamp', '') hash_encode = request.GET.get('hash', '') timestamp_after = int(time.time()) # 解密哈希 try: crypter = MyCrypt(HASH_KEY) hash_text = crypter.decrypt(hash_encode) hash_text_list = hash_text.split(',') timestamp = hash_text_list[0] uuid = hash_text_list[1] audit_id = hash_text_list[2] except Exception as e: errMsg = "HASH鉴权失败,请确保HASH值正常。" return HttpResponse(errMsg) if int(timestamp_before) != int(timestamp) or (int(timestamp_after) - int(timestamp)) > 3600: errMsg = "链接已经超过1小时或TIMESTAMP被修改,请登录DBOM(%s)进行审核。" % (dbom_host) return HttpResponse(errMsg) # 获取用户信息 loginUserOb = users.objects.get(uuid=uuid) login_username = loginUserOb.username if not loginUserOb: errMsg = "用户鉴权失败,请登录DBOM(%s)进行审核。" % (dbom_host) return HttpResponse(errMsg) else: request.session['login_username'] = login_username request.session.set_expiry(300) # 按照不同的workflow_type返回不同的详情 auditInfo = workflowOb.auditinfo(audit_id) if auditInfo.workflow_type == WorkflowDict.workflow_type['query']: return HttpResponseRedirect(reverse('sql:queryapplydetail', kwargs={'apply_id':auditInfo.workflow_id, 'audit_type':1})) elif auditInfo.workflow_type == WorkflowDict.workflow_type['sqlreview']: return HttpResponseRedirect(reverse('sql:detail', kwargs={'workflowId':auditInfo.workflow_id, 'workflowType':1})) # 配置管理 @superuser_required def config(request): # 获取所有项组名称 group_list = Group.objects.all().annotate(id=F('group_id'), name=F('group_name'), parent=F('group_parent_id'), level=F('group_level'), leader=F('group_leader') ).values('id', 'name', 'parent', 'level', 'leader') # 获取组的成员数 for group_name in group_list: members_num = UserGroup.objects.filter(group_name=group_name['name']).count() group_name['members_num'] = members_num group_list = [group for group in group_list] # 获取所有用户 user_list = users.objects.filter(is_active=1).values('username', 'display') context = {'currentMenu': 'config', 'group_list': group_list, 'user_list': user_list, 'WorkflowDict': WorkflowDict} group_list, p, groups, page_range, current_page, show_first, show_end, contacts = pages(group_list, request) return render(request, 'config.html', locals()) # 配置项目组信息 @csrf_exempt def configGroup(request): context = { 'status': 1, 'msg':'', 'data': {}} # 1是成功,0是失败 if request.method == "POST": operation_type = request.POST.get('operation_type', None) project_name = request.POST.get('project_name', None) project_auditors = request.POST.get('project_auditors', None) if operation_type == "project_add": try: if not project_name or len(project_name) == 0: msg = u'项目名称不能为空' raise ServerError(msg) elif not project_auditors or len(project_auditors) == 0: msg = u'请选择项目负责人' raise ServerError(msg) except ServerError as e: context['status'] = 0 context['msg'] = e.message logger.error('项目添加出错:%s'%e.message) else: try: # 添加组信息 group_default_dict = { 'group_name': project_name, 'group_leader': project_auditors } group_obj, group_created = Group.objects.get_or_create(group_name=project_name, group_leader=project_auditors, defaults=group_default_dict) logger.info('project add obj: %s created: %s' % (group_obj, group_created)) # 添加用户与组对应关系表 usergroup_default_dict = { 'group_name': project_name, 'user_name': project_auditors } usergroup_obj, usergroup_created = UserGroup.objects.get_or_create(group_name=project_name, user_name=project_auditors, defaults=usergroup_default_dict) logger.info('Relationship between the project and the user add obj: %s created: %s' % (usergroup_obj, usergroup_created)) # 配置项目成员 users_list_select_web = request.POST.getlist('users_selected', []) configGroupMembers(project_name, users_list_select_web) context['status'] = 1 context['msg'] = '项目组添加成功' logger.info('Project add %s is success.'%project_name) except Exception as e: context['status'] = 0 serache_result = re.search('Duplicate entry',str(e)) if serache_result: context['msg'] = '项目组已经存在' else: context['msg'] = '项目组添加失败' logger.info('Project add %s is failed. { %s }'%(project_name, e)) elif operation_type == "project_del": project_id = request.POST.get('project_id', None) project_name = Group.objects.get(group_id=project_id) try: # 删除组信息 Group.objects.filter(group_id=project_id).delete() # 删除组对应的用户信息 UserGroup.objects.filter(group_name=project_name.group_name).delete() context['status'] = 1 context['msg'] = '项目组删除成功' logger.info('Project %s delete success.' % project_name.group_name) except Exception as e: context['status'] = 0 context['msg'] = '项目组删除失败' logger.info('Project %s delete failed. { %s }' %(project_name.group_name, e)) elif operation_type == "get_project": project_dic = {} get_type = request.POST.get('get_type', None) project_id = request.POST.get('project_id', None) try: if get_type == 'edit': # 项目组信息 project_info = Group.objects.get(group_id=project_id) group_name = project_info.group_name user_list = list(users.objects.filter(is_active=1).values('username')) project_dic["group_id"] = project_info.group_id project_dic["group_name"] = group_name project_dic["group_leader"] = project_info.group_leader project_dic["user_list"] = user_list else: group_name = '' # 项目组成员信息 user_list_all = [user['username'] for user in list(users.objects.values('username'))] user_list_select = [user['user_name'] for user in list(UserGroup.objects.filter(group_name=group_name).values('user_name'))] user_list_noselect = [user for user in user_list_all if user not in user_list_select] project_dic["user_list_select"] = user_list_select project_dic["user_list_noselect"] = user_list_noselect context['data'] = project_dic context['status'] = 1 context['msg'] = '获取项目信息成功' logger.info('Get project %s info success.' %group_name) except Exception as e: context['status'] = 0 context['msg'] = '获取项目信息失败' logger.info('Get project info failed. { %s }' %e) elif operation_type == "project_edit": edit_group_id = request.POST.get('edit_group_id', None) edit_project_name = request.POST.get('edit_project_name', None) edit_project_auditors = request.POST.get('edit_project_auditors', None) try: if not edit_project_name or len(edit_project_name) == 0: msg = u'项目名称不能为空' raise ServerError(msg) elif not edit_project_auditors or len(edit_project_auditors) == 0: msg = u'请选择项目负责人' raise ServerError(msg) except ServerError as e: context['status'] = 0 context['msg'] = e.message logger.error('项目更新出错:%s'%e.message) else: try: # 更新组信息 obj, created = Group.objects.update_or_create(group_id=edit_group_id, defaults={"group_name":edit_project_name, "group_leader":edit_project_auditors}) logger.info('project update obj: %s created: %s' % (obj, created)) # 配置项目成员 users_list_select_web = request.POST.getlist('users_selected', []) configGroupMembers(edit_project_name, users_list_select_web) context['status'] = 1 context['msg'] = '项目组更新成功' logger.info('Project ID %s update success.' % edit_group_id) except Exception as e: context['status'] = 0 serache_result = re.search('Duplicate entry', str(e)) if serache_result: context['msg'] = '项目组已经存在' else: context['msg'] = '项目组更新失败' logger.info('Project ID %s update failed. { %s }' %(edit_group_id, e)) return HttpResponse(json.dumps(context), content_type="application/x-www-form-urlencoded") # 配置项目成员 @csrf_exempt def configGroupMembers(group_name, users_list_select_web): user_list_select = [ user['user_name'] for user in list(UserGroup.objects.filter(group_name=group_name).values('user_name')) ] insert_users_list = [ user for user in users_list_select_web if user not in user_list_select ] del_users_list = [ user for user in user_list_select if user not in users_list_select_web ] # 插入新增 for user in insert_users_list: obj, created = UserGroup.objects.get_or_create(group_name=group_name, user_name=user, defaults={'group_name':group_name, 'user_name':user}) logger.info('group members insert obj: %s created: %s'%(obj, created)) logger.info('group members insert data %s'%insert_users_list) # 删除剔除 for user in del_users_list: UserGroup.objects.filter(group_name=group_name, user_name=user).delete() logger.info('group members delete data %s' % del_users_list) # 获取项目资源 @csrf_exempt def projectresource(request): currentMenu = 'projectresource' context = {'status': 1, 'msg': '', 'data': {}} # 1是成功,0是失败 # 获取用户信息 loginUser = request.session.get('login_username', False) loginUserOb = users.objects.get(username=loginUser) # 获取项目集群 listAllCluster = slave_config.objects.all().order_by('cluster_name') listAllClusterName = [ str(cluster.cluster_name) for cluster in listAllCluster ] if request.session.get('resource_status', 0) == 0: logger.debug('异步整合现网表资源信息中...') # 采取异步回调的方式进行资源整合,防止出现持续执行中的异常 t = Thread(target=integration_resource, args=(listAllClusterName,)) t.start() request.session['resource_status'] = 1 # 获取当前用户所管理的项目列表 if loginUserOb.is_superuser: user_project_list = [ group["group_name"] for group in Group.objects.all().values("group_name").distinct() ] else: user_project_list = [ group["group_name"] for group in Group.objects.filter(group_leader=loginUser).values("group_name").distinct() ] if request.method == "POST": limitStart = int(request.POST.get('offset',0)) pageSize = int(request.POST.get('pageSize',0)) project_name = request.POST.get('project_name',None) cluster_name = request.POST.get('cluster_name',None) db_name = request.POST.get('db_name',None) search = request.POST.get('search',None) config_type = request.POST.get('config_type',None) if config_type == "change_cluster": listDatabase = [] if cluster_name: # 获取实列所有库信息 listDatabase = [ row['db_name'] for row in list(ProjectResource.objects.filter(cluster_name=cluster_name).values('db_name').distinct()) ] return HttpResponse(json.dumps(listDatabase), content_type="application/x-www-form-urlencoded") elif config_type == "get_resource": resource_id = request.POST.get('resource_id',None) project_name = request.POST.get('project_name',None) if not project_name or len(project_name) == 0: context['status'] = 0 context['msg'] = '请选择需要获取权限的项目' else: try: group_list_str = ProjectResource.objects.get(id=resource_id).group_list if len(group_list_str) > 0: group_list_tmp = group_list_str.split(",") else: group_list_tmp = [] group_list_tmp.append(project_name) group_list = ','.join(group_list_tmp) # 更新资源列表信息 ProjectResource.objects.update_or_create(id=resource_id, defaults={'group_list':group_list}) context['status'] = 1 context['data'] = group_list logger.info('Get resource ID %s is success.'%resource_id) except Exception as e: context['status'] = 0 context['msg'] = '资源获取失败' logger.error('Get resource ID %s is filed. { %s }' %(resource_id, e)) return HttpResponse(json.dumps(context), content_type="application/x-www-form-urlencoded") elif config_type == "get_db_all_resource": group_name = request.POST.get('group_name', None) cluster_name = request.POST.get('cluster_name',None) db_name = request.POST.get('db_name', None) if not group_name or len(group_name) == 0: context['status'] = 0 context['msg'] = '请选择项目组' elif not cluster_name or len(cluster_name) == 0: context['status'] = 0 context['msg'] = '请选择数据库实例' elif not db_name or len(db_name) == 0: context['status'] = 0 context['msg'] = '请选择数据库' else: try: group_info_list = list(ProjectResource.objects.filter(cluster_name=cluster_name, db_name=db_name).values('id', 'group_list')) for group_info in group_info_list: resource_id = group_info['id'] group_list_str = group_info['group_list'] if len(group_list_str) > 0: group_list_tmp = group_list_str.split(",") else: group_list_tmp = [] if group_name not in group_list_tmp: group_list_tmp.append(group_name) group_list = ','.join(group_list_tmp) # 更新资源列表信息 ProjectResource.objects.update_or_create(id=resource_id, defaults={'group_list':group_list}) context['status'] = 1 context['data'] = group_list logger.info('Get resource ID %s is success.'%resource_id) logger.info('Get whole database %s resource is success.' % db_name) except Exception as e: context['status'] = 0 context['msg'] = '整库资源获取失败' logger.error('Get whole database %s resource is filed. { %s }' %(db_name, e)) return HttpResponse(json.dumps(context), content_type="application/x-www-form-urlencoded") elif config_type == "del_resource": resource_id = request.POST.get('resource_id',None) project_name = request.POST.get('project_name',None) if not project_name or len(project_name) == 0: context['status'] = 0 context['msg'] = '请先选择项目' else: try: group_list_tmp = (ProjectResource.objects.get(id=resource_id).group_list).split(",") group_list_tmp.remove(project_name) group_list = ','.join(group_list_tmp) ProjectResource.objects.update_or_create(id=resource_id, defaults={'group_list':group_list}) context['status'] = 1 context['data'] = group_list logger.info('Delete resource ID %s is success.'%resource_id) except Exception as e: context['status'] = 0 context['msg'] = '资源清除失败' logger.error('Delete resource ID %s is filed. { %s }' %(resource_id, e)) return HttpResponse(json.dumps(context), content_type="application/x-www-form-urlencoded") elif config_type == "del_db_all_resource": group_name = request.POST.get('group_name', None) cluster_name = request.POST.get('cluster_name', None) db_name = request.POST.get('db_name', None) if not group_name or len(group_name) == 0: context['status'] = 0 context['msg'] = '请选择项目组' elif not cluster_name or len(cluster_name) == 0: context['status'] = 0 context['msg'] = '请选择数据库实例' elif not db_name or len(db_name) == 0: context['status'] = 0 context['msg'] = '请选择数据库' else: try: group_info_list = list(ProjectResource.objects.filter(cluster_name=cluster_name, db_name=db_name).values('id','group_list')) for group_info in group_info_list: resource_id = group_info['id'] group_list_str = group_info['group_list'] if len(group_list_str) > 0: group_list_tmp = group_list_str.split(",") else: group_list_tmp = [] if group_name in group_list_tmp: group_list_tmp.remove(group_name) group_list = ','.join(group_list_tmp) # 更新资源列表信息 ProjectResource.objects.update_or_create(id=resource_id, defaults={'group_list': group_list}) context['status'] = 1 context['data'] = group_list logger.info('Delete resource ID %s is success.' % resource_id) logger.info('Delete whole database %s resource is success.' % db_name) except Exception as e: context['status'] = 0 context['msg'] = '整库资源清除失败' logger.error('Delete whole database %s resource is filed. { %s }' % (db_name, e)) return HttpResponse(json.dumps(context), content_type="application/x-www-form-urlencoded") else: where_list = ['1=1'] if cluster_name: where_list.append('AND cluster_name="%s"'%cluster_name) if db_name: where_list.append('AND db_name="%s"'%db_name) if search: where_list.append('AND ( table_name LIKE "%%%s%%" OR group_list LIKE "%%%s%%" )'%(search, search)) if len(where_list) > 0: where_value = ' '.join(where_list) table = 'project_resource' count_sql = "SELECT COUNT(1) AS rowcount FROM %s WHERE %s;"%(table, where_value) row_sql = "SELECT id,cluster_name,db_name,table_name,group_list FROM %s WHERE %s ORDER by id ASC LIMIT %s,%s;"%(table, where_value, limitStart, pageSize) # 获取资源信息 resource_data = get_resource(count_sql, row_sql, project_name) else: table = 'project_resource' count_sql = "SELECT COUNT(1) AS rowcount FROM %s;"%(table) row_sql = "SELECT id,cluster_name,db_name,table_name,group_list FROM %s ORDER by id ASC LIMIT %s,%s;"%(table, limitStart, pageSize) # 获取资源信息 resource_data = get_resource(count_sql, row_sql , project_name) return HttpResponse(json.dumps(resource_data), content_type="application/x-www-form-urlencoded") group_list = Group.objects.all().annotate(id=F('group_id'), name=F('group_name'), parent=F('group_parent_id'), level=F('group_level') ).values('id', 'name', 'parent', 'level') group_list = [group for group in group_list] return render(request, 'project_config/get_project_group_resource.html', locals()) # 设置项目组的查询权限 @csrf_exempt def groupQueryPermission(request): currentMenu = 'projectresource' context = {'status': 1, 'msg': '', 'data': {}} # 1是成功,0是失败 # 获取用户信息 loginUser = request.session.get('login_username', False) loginUserOb = users.objects.get(username=loginUser) # 获取项目集群 listAllCluster = slave_config.objects.all().order_by('cluster_name') listAllClusterName = [ str(cluster.cluster_name) for cluster in listAllCluster ] # 获取当前用户所管理的项目列表 if loginUserOb.is_superuser: user_group_list = [ group["group_name"] for group in Group.objects.all().values("group_name").distinct() ] else: user_group_list = [ group["group_name"] for group in Group.objects.filter(group_leader=loginUser).values("group_name").distinct() ] if request.method == "POST": limitStart = int(request.POST.get('offset',0)) pageSize = int(request.POST.get('pageSize',0)) group_name = request.POST.get('group_name',None) cluster_name = request.POST.get('cluster_name',None) db_name = request.POST.get('db_name',None) search = request.POST.get('search',None) user_group_text = '\"' + '\",\"'.join(user_group_list) + '\"' where_list = ['1=1'] if group_name: where_list.append('AND group_name="%s"' % group_name) else: where_list.append('AND group_name IN (%s)' % user_group_text) if cluster_name: where_list.append('AND cluster_name="%s"' % cluster_name) if db_name: where_list.append('AND db_name="%s"' % db_name) if search: where_list.append('AND ( table_name LIKE "%%%s%%" OR group_name LIKE "%%%s%%" )' % (search, search)) where_value = ' '.join(where_list) table = 'group_query_privileges' count_sql = "SELECT COUNT(1) AS rowcount FROM %s WHERE %s;" % (table, where_value) row_sql = "SELECT privilege_id,group_name,cluster_name,db_name,table_name,valid_date,limit_num FROM %s WHERE %s ORDER by privilege_id ASC LIMIT %s,%s;" % ( table, where_value, limitStart, pageSize) # 获取资源信息 resource_data = get_query_permisshion(count_sql, row_sql) # logger.debug('获取权限资源信息:%s.'%resource_data) return HttpResponse(json.dumps(resource_data), content_type="application/x-www-form-urlencoded") return render(request, 'project_config/set_group_query_permission.html', locals()) # 设置项目组的查询权限 @csrf_exempt def getGroupQueryPermission(request): context = {'status': 1, 'msg': '', 'data': {}} # 1是成功,0是失败 group_name = request.POST.get('group_name', None) cluster_name = request.POST.get('cluster_name', None) db_name = request.POST.get('db_name', None) operation_type = request.POST.get('operation_type', None) valid_date = request.POST.get('valid_date', None) limit_num = request.POST.get('limit_num', 1000) table_resource_list = [ table['table_name'] for table in ProjectResource.objects.filter(cluster_name=cluster_name,db_name=db_name).values('table_name') ] permission_table_list = [ table['table_name'] for table in GroupQueryPrivileges.objects.filter(group_name=group_name,cluster_name=cluster_name,db_name=db_name).values('table_name') ] no_permission_table_list = [ table_name for table_name in table_resource_list if table_name not in permission_table_list ] if operation_type == 'resource_save': try: if not group_name or len(group_name) == 0: msg = u'请选择项目组' raise ServerError(msg) elif not cluster_name or len(cluster_name) == 0: msg = u'请选择数据库实列' raise ServerError(msg) elif not db_name or len(db_name) == 0: msg = u'请选择数据库' raise ServerError(msg) elif not valid_date or len(valid_date) == 0: msg = u'请选择授权时间' raise ServerError(msg) elif not limit_num or len(limit_num) == 0: msg = u'请选择查询限制行数' raise ServerError(msg) except ServerError as e: context['status'] = 0 context['msg'] = e.message logger.error('Group premission set error:%s' % e.message) else: try: web_permission_table_list = request.POST.getlist('tables_selected', []) new_permission_table_list = [ table_name for table_name in web_permission_table_list if table_name not in permission_table_list ] del_permission_table_list = [ table_name for table_name in permission_table_list if table_name not in web_permission_table_list ] defaults_data = {'group_name': group_name, 'cluster_name': cluster_name, 'db_name': db_name, 'valid_date': valid_date, 'limit_num': limit_num} # 添加新增数据 for table_name in new_permission_table_list: defaults_data['table_name'] = table_name # 插入数据 GroupQueryPrivileges.objects.create(**defaults_data) logger.debug('Insert group query permission %s.' % new_permission_table_list) # 删除排除的数据 for table_name in del_permission_table_list: # 删除数据 GroupQueryPrivileges.objects.filter(group_name=group_name,cluster_name=cluster_name,db_name=db_name,table_name=table_name).delete() logger.debug('Delete group query permission %s.' % del_permission_table_list) logger.debug('Save group query permission success.%s'%web_permission_table_list) except Exception as e: context['status'] = 0 context['msg'] = e logger.error('Save group query permission error {%s}.'%e) elif operation_type == 'del_premission': privilege_id = request.POST.get('privilege_id', None) try: # 删除对应权限数据 GroupQueryPrivileges.objects.filter(privilege_id=privilege_id).delete() logger.info("Delete group query permission sucdess.") except Exception as e: context['status'] = 0 context['msg'] = e logger.error('Group premission delete error,:%s' % e) table_resource = {} table_resource['permission_table_list'] = permission_table_list table_resource['no_permission_table_list'] = no_permission_table_list context['data'] = table_resource return HttpResponse(json.dumps(context), content_type="application/x-www-form-urlencoded")
run.py
#! /usr/bin/env python import io import codecs import base64 import argparse import collections import logging import json import os import stat import signal import sys import threading import time import pexpect import psutil logger = logging.getLogger('cyborgbackup.main.utils.expect') def args2cmdline(*args): return ' '.join(args) def wrap_args_with_ssh_agent(args, ssh_key_path, ssh_auth_sock=None, silence_ssh_add=False): logger.debug(' '.join(args)) if ssh_key_path: if isinstance(ssh_key_path, list): listcmd = [] for key in ssh_key_path: ssh_add_command = args2cmdline('ssh-add', key) if silence_ssh_add: ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null']) listcmd.append(ssh_add_command) listcmd.append(args2cmdline('rm', '-f', key)) listcmd.append(args2cmdline(*args)) cmd = ' && '.join(listcmd) else: ssh_add_command = args2cmdline('ssh-add', ssh_key_path) if silence_ssh_add: ssh_add_command = ' '.join([ssh_add_command, '2>/dev/null']) cmd = ' && '.join([ssh_add_command, args2cmdline('rm', '-f', ssh_key_path), args2cmdline(*args)]) args = ['ssh-agent'] if ssh_auth_sock: args.extend(['-a', ssh_auth_sock]) args.extend(['sh', '-c', cmd]) return args def open_fifo_write(path, data): '''open_fifo_write opens the fifo named pipe in a new thread. This blocks the thread until an external process (such as ssh-agent) reads data from the pipe. ''' os.mkfifo(path, 0o600) threading.Thread(target=lambda p, d: open(p, 'w').write(d), args=(path, data)).start() def run_pexpect(args, cwd, env, logfile, cancelled_callback=None, expect_passwords={}, extra_update_fields=None, idle_timeout=None, job_timeout=0, pexpect_timeout=5, proot_cmd='bwrap'): ''' Run the given command using pexpect to capture output and provide passwords when requested. :param args: a list of `subprocess.call`-style arguments representing a subprocess e.g., ['ls', '-la'] :param cwd: the directory in which the subprocess should run :param env: a dict containing environment variables for the subprocess, ala `os.environ` :param logfile: a file-like object for capturing stdout :param cancelled_callback: a callable - which returns `True` or `False` - signifying if the job has been prematurely cancelled :param expect_passwords: a dict of regular expression password prompts to input values, i.e., {r'Password:\s*?$': 'some_password'} :param extra_update_fields: a dict used to specify DB fields which should be updated on the underlying model object after execution completes :param idle_timeout a timeout (in seconds); if new output is not sent to stdout in this interval, the process will be terminated :param job_timeout a timeout (in seconds); if the total job runtime exceeds this, the process will be killed :param pexpect_timeout a timeout (in seconds) to wait on `pexpect.spawn().expect()` calls :param proot_cmd the command used to isolate processes, `bwrap` Returns a tuple (status, return_code) i.e., `('successful', 0)` ''' expect_passwords[pexpect.TIMEOUT] = None expect_passwords[pexpect.EOF] = None if not isinstance(expect_passwords, collections.OrderedDict): # We iterate over `expect_passwords.keys()` and # `expect_passwords.values()` separately to map matched inputs to # patterns and choose the proper string to send to the subprocess; # enforce usage of an OrderedDict so that the ordering of elements in # `keys()` matches `values()`. expect_passwords = collections.OrderedDict(expect_passwords) password_patterns = list(expect_passwords.keys()) password_values = list(expect_passwords.values()) logger.debug('Launch Command') logger.debug(args) logger.debug('With Environment') logger.debug(env) child = pexpect.spawn( args[0], args[1:], cwd=cwd, env=env, ignore_sighup=True, encoding='utf-8', echo=False, ) child.logfile_read = logfile canceled = False timed_out = False errored = False last_stdout_update = time.time() job_start = time.time() while child.isalive(): result_id = child.expect(password_patterns, timeout=pexpect_timeout, searchwindowsize=200) password = password_values[result_id] if password is not None: child.sendline(password) last_stdout_update = time.time() if cancelled_callback: try: canceled = cancelled_callback() except Exception: logger.exception('Could not check cancel callback - canceling immediately') if isinstance(extra_update_fields, dict): extra_update_fields['job_explanation'] = "System error during job execution, check system logs" errored = True else: canceled = False if not canceled and job_timeout != 0 and (time.time() - job_start) > job_timeout: timed_out = True if isinstance(extra_update_fields, dict): extra_update_fields['job_explanation'] = "Job terminated due to timeout" if canceled or timed_out or errored: handle_termination(child.pid, child.args, proot_cmd, is_cancel=canceled) if idle_timeout and (time.time() - last_stdout_update) > idle_timeout: child.close(True) canceled = True if errored: return 'error', child.exitstatus elif canceled: return 'canceled', child.exitstatus elif child.exitstatus == 0 and not timed_out: return 'successful', child.exitstatus else: return 'failed', child.exitstatus def handle_termination(pid, args, proot_cmd, is_cancel=True): ''' Terminate a subprocess spawned by `pexpect`. :param pid: the process id of the running the job. :param args: the args for the job, i.e., ['python', 'test.py'] :param proot_cmd the command used to isolate processes i.e., `bwrap` :param is_cancel: flag showing whether this termination is caused by instance's cancel_flag. ''' try: if proot_cmd in ' '.join(args.decode('utf-8')): if not psutil: os.kill(pid, signal.SIGKILL) else: try: main_proc = psutil.Process(pid=pid) child_procs = main_proc.children(recursive=True) for child_proc in child_procs: os.kill(child_proc.pid, signal.SIGKILL) os.kill(main_proc.pid, signal.SIGKILL) except (TypeError, psutil.Error): os.kill(pid, signal.SIGKILL) else: os.kill(pid, signal.SIGTERM) time.sleep(3) except OSError: keyword = 'cancel' if is_cancel else 'timeout' logger.warn("Attempted to %s already finished job, ignoring" % keyword) def __run__(private_data_dir): buff = io.StringIO() with open(os.path.join(private_data_dir, 'env'), 'r') as f: for line in f: buff.write(line) artifacts_dir = os.path.join(private_data_dir, 'artifacts') # Standard out directed to pickup location without event filtering applied stdout_filename = os.path.join(artifacts_dir, 'stdout') os.mknod(stdout_filename, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR) stdout_handle = codecs.open(stdout_filename, 'w', encoding='utf-8') status, rc = run_isolated_job( private_data_dir, json.loads(base64.b64decode(buff.getvalue())), stdout_handle ) for filename, data in [ ('status', status), ('rc', rc), ]: artifact_path = os.path.join(private_data_dir, 'artifacts', filename) os.mknod(artifact_path, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR) with open(artifact_path, 'w') as f: f.write(str(data)) if __name__ == '__main__': import cyborgbackup __version__ = cyborgbackup.__version__ parser = argparse.ArgumentParser(description='manage a daemonized task') parser.add_argument('--version', action='version', version=__version__ + '-isolated') parser.add_argument('command', choices=['start', 'stop', 'is-alive']) parser.add_argument('private_data_dir') args = parser.parse_args() private_data_dir = args.private_data_dir pidfile = os.path.join(private_data_dir, 'pid') if args.command == 'start': # create a file to log stderr in case the daemonized process throws # an exception before it gets to `pexpect.spawn` stderr_path = os.path.join(private_data_dir, 'artifacts', 'daemon.log') if not os.path.exists(stderr_path): os.mknod(stderr_path, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR) stderr = open(stderr_path, 'w+') import daemon from daemon.pidfile import TimeoutPIDLockFile context = daemon.DaemonContext( pidfile=TimeoutPIDLockFile(pidfile), stderr=stderr ) with context: __run__(private_data_dir) sys.exit(0) try: with open(pidfile, 'r') as f: pid = int(f.readline()) except IOError: sys.exit(1) if args.command == 'stop': try: with open(os.path.join(private_data_dir, 'args'), 'r') as args: handle_termination(pid, json.load(args), 'bwrap') except IOError: handle_termination(pid, [], 'bwrap') elif args.command == 'is-alive': try: os.kill(pid, signal.SIG_DFL) sys.exit(0) except OSError: sys.exit(1)
testing.py
############################################################################## # # Copyright (c) 2011 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Mongo Persistence Testing Support""" import doctest import io import logging import os import psycopg2 import psycopg2.extras import psycopg2.pool import re import sys import threading import transaction import unittest from pprint import pprint import zope.component from zope.testing import module, renormalizing from pjpersist import datamanager, serialize, interfaces py3checkers = [ # Mangle unicode strings (re.compile("u('.*?')"), r"\1"), (re.compile('u(".*?")'), r"\1"), # Mangle long ints (re.compile('([0-9]+)L$'), r"\1"), (re.compile('__builtin__'), 'builtins'), (re.compile('pjpersist.interfaces.CircularReferenceError'), 'CircularReferenceError'), ] checker = renormalizing.RENormalizing([ # Date/Time objects (re.compile(r'datetime.datetime\(.*\)'), 'datetime.datetime(2011, 10, 1, 9, 45)'), # IDs (re.compile(r"'[0-9a-f]{24}'"), "'0001020304050607080a0b0c0'"), ] + py3checkers) OPTIONFLAGS = ( doctest.NORMALIZE_WHITESPACE| doctest.ELLIPSIS| doctest.REPORT_ONLY_FIRST_FAILURE #|doctest.REPORT_NDIFF ) DBNAME = 'pjpersist_test' DBNAME_OTHER = 'pjpersist_test_other' class DummyConnectionPool: def __init__(self, conn): self._available = conn self._taken = None def getconn(self): if self._available is None: raise psycopg2.pool.PoolError("Connection is already taken") self._available.reset() self._available.set_isolation_level( psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE) self._taken = self._available self._available = None return self._taken def putconn(self, conn, key=None, close=False): assert conn is self._taken self._available = self._taken self._taken = None def isTaken(self): return self._taken is not None @zope.interface.implementer(interfaces.IPJDataManagerProvider) class SimpleDataManagerProvider(object): def __init__(self, dms, default=None): self.idx = {dm.database: dm for dm in dms} self.idx[None] = default def get(self, database): return self.idx[database] def getConnection(database=None): conn = psycopg2.connect( database=database or 'template1', host='localhost', port=5432, user='pjpersist', password='pjpersist') conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE) return conn def createDB(): dropDB() conn = getConnection() with conn.cursor() as cur: cur.execute('END') cur.execute('DROP DATABASE IF EXISTS %s' % DBNAME) cur.execute('CREATE DATABASE %s' % DBNAME) cur.execute('DROP DATABASE IF EXISTS %s' % DBNAME_OTHER) cur.execute('CREATE DATABASE %s' % DBNAME_OTHER) conn.commit() conn.close() def dropDB(): conn = getConnection() with conn.cursor() as cur: cur.execute('END') try: cur.execute('DROP DATABASE IF EXISTS %s' % DBNAME_OTHER) cur.execute('DROP DATABASE IF EXISTS %s' % DBNAME) except psycopg2.ProgrammingError: pass conn.commit() conn.close() def cleanDB(conn=None): if conn is None: conn = getConnection(DBNAME) conn.rollback() with conn.cursor() as cur: cur.execute("""SELECT tablename FROM pg_tables""") for res in cur.fetchall(): if not res[0].startswith('pg_') and not res[0].startswith('sql_'): cur.execute('DROP TABLE ' + res[0]) conn.commit() def setUpSerializers(test): serialize.SERIALIZERS = [] def tearDownSerializers(test): del serialize.SERIALIZERS[:] def setUp(test): module.setUp(test) setUpSerializers(test) g = test.globs g['conn'] = getConnection(DBNAME) g['conn_other'] = getConnection(DBNAME_OTHER) cleanDB(g['conn']) cleanDB(g['conn_other']) g['commit'] = transaction.commit g['dm'] = datamanager.PJDataManager(DummyConnectionPool(g['conn'])) g['dm_other'] = datamanager.PJDataManager(DummyConnectionPool(g['conn_other'])) def dumpTable(table, flush=True, isolate=False): if isolate: conn = getConnection(database=DBNAME) else: conn = g['conn'] with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur: try: cur.execute('SELECT * FROM ' + table) except psycopg2.ProgrammingError as err: print(err) else: pprint([dict(e) for e in cur.fetchall()]) if isolate: conn.close() g['dumpTable'] = dumpTable dmp = SimpleDataManagerProvider([g['dm'], g['dm_other']], g['dm']) zope.component.provideUtility(dmp) def tearDown(test): module.tearDown(test) tearDownSerializers(test) transaction.abort() cleanDB(test.globs['conn']) cleanDB(test.globs['conn_other']) test.globs['conn'].close() test.globs['conn_other'].close() resetCaches() class DatabaseLayer(object): __bases__ = () def __init__(self, name): self.__name__ = name def setUp(self): createDB() self.setUpSqlLogging() def tearDown(self): self.tearDownSqlLogging() dropDB() def setUpSqlLogging(self): if "SHOW_SQL" not in os.environ: return self.save_PJ_ACCESS_LOGGING = datamanager.PJ_ACCESS_LOGGING datamanager.PJ_ACCESS_LOGGING = True setUpLogging(datamanager.TABLE_LOG, copy_to_stdout=True) setUpLogging(datamanager.LOG, copy_to_stdout=True) def tearDownSqlLogging(self): if "SHOW_SQL" not in os.environ: return tearDownLogging(datamanager.LOG) tearDownLogging(datamanager.TABLE_LOG) datamanager.PJ_ACCESS_LOGGING = self.save_PJ_ACCESS_LOGGING db_layer = DatabaseLayer("db_layer") class PJTestCase(unittest.TestCase): layer = db_layer def setUp(self): setUpSerializers(self) self.conn = getConnection(DBNAME) cleanDB(self.conn) self.dm = datamanager.PJDataManager(DummyConnectionPool(self.conn)) def tearDown(self): datamanager.CONFLICT_TRACEBACK_INFO.traceback = None tearDownSerializers(self) transaction.abort() cleanDB(self.conn) self.conn.close() resetCaches() def resetCaches(): serialize.AVAILABLE_NAME_MAPPINGS.__init__() serialize.PATH_RESOLVE_CACHE = {} serialize.TABLE_KLASS_MAP = {} def log_sql_to_file(fname, add_tb=True, tb_limit=15): import logging datamanager.PJ_ENABLE_QUERY_STATS = True datamanager.PJ_ACCESS_LOGGING = True datamanager.TABLE_LOG.setLevel(logging.DEBUG) datamanager.PJPersistCursor.TB_LIMIT = tb_limit fh = logging.FileHandler(fname) fh.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) datamanager.TABLE_LOG.addHandler(fh) class StdoutHandler(logging.StreamHandler): """Logging handler that follows the current binding of sys.stdout.""" def __init__(self): # skip logging.StreamHandler.__init__() logging.Handler.__init__(self) @property def stream(self): return sys.stdout def setUpLogging(logger, level=logging.DEBUG, format='%(message)s', copy_to_stdout=False): if isinstance(logger, str): logger = logging.getLogger(logger) buf = io.StringIO() handler = logging.StreamHandler(buf) handler._added_by_tests_ = True handler._old_propagate_ = logger.propagate handler._old_level_ = logger.level handler.setFormatter(logging.Formatter(format)) logger.addHandler(handler) if copy_to_stdout: # can't use logging.StreamHandler(sys.stdout) because sys.stdout might # be changed latter to a StringIO, and we want messages to be seen # by doctests. handler = StdoutHandler() handler._added_by_tests_ = True handler._old_propagate_ = logger.propagate handler._old_level_ = logger.level handler.setFormatter(logging.Formatter(format)) logger.addHandler(handler) logger.propagate = False logger.setLevel(level) return buf def tearDownLogging(logger): if isinstance(logger, str): logger = logging.getLogger(logger) for handler in list(logger.handlers): if hasattr(handler, '_added_by_tests_'): logger.removeHandler(handler) logger.propagate = handler._old_propagate_ logger.setLevel(handler._old_level_) #TO_JOIN = [] def run_in_thread(func): t = threading.Thread(target=func) t.setDaemon(True) t.start() #TO_JOIN.append(t) return t
extraction_line_pyscript.py
# =============================================================================== # Copyright 2011 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== import inspect import re import time from threading import Event from threading import Thread from traits.api import List, Dict from pychron.core.ramper import Ramper from pychron.external_pipette.protocol import IPipetteManager from pychron.furnace.ifurnace_manager import IFurnaceManager from pychron.hardware.core.exceptions import TimeoutError from pychron.hardware.core.i_core_device import ICoreDevice from pychron.lasers.laser_managers.ilaser_manager import ILaserManager from pychron.pychron_constants import EXTRACTION_COLOR, LINE_STR, NULL_STR, EL_PROTOCOL, PATTERN, POSTCLEANUP, \ PRECLEANUP, CLEANUP, DURATION, CRYO_TEMP from pychron.pyscripts.context_managers import RecordingCTX, LightingCTX, GrainPolygonCTX from pychron.pyscripts.decorators import verbose_skip, makeRegistry, calculate_duration from pychron.pyscripts.valve_pyscript import ValvePyScript COMPRE = re.compile(r'[A-Za-z]*') # make a registry to hold all the commands exposed by ExtractionPyScript # used when building the context # see PyScript.get_context and get_command_register command_register = makeRegistry() class ExtractionPyScript(ValvePyScript): """ The ExtractionPyScript is used to program the extraction and gettering of sample gas. """ automated_run = None _resource_flag = None info_color = EXTRACTION_COLOR snapshots = List videos = List extraction_context = Dict _extraction_positions = List _grain_polygons = List def set_load_identifier(self, v): self.setup_context(load_identifier=v) def set_run_identifier(self, v): self.setup_context(run_identifier=v) def get_extraction_positions(self, clear=True): """ Returns a list of x,y,z tuples each tuple represents where the extraction occurred if clear is True (default) ``self._extraction_positions`` set to an empty list :return: list of x,y,z tuples :rtype: list of tuples """ ret = self._extraction_positions if clear: self._extraction_positions = [] return ret def get_grain_polygons(self): m = self._grain_polygons if not m: m = self._extraction_action(('get_grain_polygon_blob', (), {})) return m def get_response_blob(self): """ Get the extraction device's response blob :return: response blob. binary string representing time v measured output :rtype: str """ result = self._extraction_action(('get_response_blob', (), {})) return result or b'' def get_output_blob(self): """ Get the extraction device's output blob :return: output blob: binary string representing time v percent output :rtype: str """ result = self._extraction_action(('get_output_blob', (), {})) return result or b'' def get_setpoint_blob(self): """ Get the extraction device's setpoint blob :return: setpoint blob: binary string representing time v requested setpoint :rtype: str """ result = self._extraction_action(('get_setpoint_blob', (), {})) return result or b'' def output_achieved(self): """ Return a formated string with the extraction "heating" results:: Requested Output= 100.000 Achieved Output= 99.012 :return: Formatted string with results :rtype: str """ request = self.extract ach = self._extraction_action(('get_achieved_output', (), {})) try: request = float(request) except (ValueError, TypeError): request = 0 try: ach = float(ach) except (ValueError, TypeError): ach = 0 return ach, request def get_active_pid_parameters(self): result = self._extraction_action(('get_active_pid_parameters', (), {})) return result or '' def get_command_register(self): cm = super(ExtractionPyScript, self).get_command_register() return list(command_register.commands.items()) + cm def set_default_context(self, **kw): """ provide default values for all the properties exposed in the script """ for attr in ('analysis_type', 'extract_device'): if attr not in kw: kw[attr] = '' self.setup_context(position='', pattern='', extract_value=0, extract_units='', tray='', ramp_rate=0, ramp_duration=0, duration=0, delay_after=0, cleanup=0, pre_cleanup=0, post_cleanup=0, cryo_temperature=0, light_value=0, beam_diameter=None, load_identifier='default_load', run_identifier='default_runid', **kw) # ========================================================================== # commands # ========================================================================== @verbose_skip @command_register def store_manometer_pressure(self, idx=0): result = self._manager_action(('get_manometer_pressure', (), {'idx': idx}), protocol=EL_PROTOCOL) self.extraction_context['manometer_pressure{}'.format(idx)] = result return result @verbose_skip @command_register def get_manometer_pressure(self, idx=0): result = self._manager_action(('get_manometer_pressure', (), {'idx': idx}), protocol=EL_PROTOCOL) return result @verbose_skip @command_register def get_pressure(self, controller, gauge): result = self._manager_action(('get_pressure', (controller, gauge), {}), protocol=EL_PROTOCOL) return result @verbose_skip @command_register def set_cryo(self, value): result = self._manager_action(('set_cryo', (value,), {}), protocol=EL_PROTOCOL) self.debug('set cyro result={}'.format(result)) return result @verbose_skip @command_register def get_cryo_temp(self, value): result = self._manager_action(('get_cryo_temp', (value,), {}), protocol=EL_PROTOCOL) return result @calculate_duration @command_register def begin_heating_interval(self, duration, min_rise_rate=None, check_time=60, check_delay=60, check_period=1, temperature=None, timeout=300, tol=10, name=None, calc_time=False): duration = float(duration) if calc_time: self._estimated_duration += duration if self._cancel: return def wait(dur, flag, n): if not min_rise_rate: self._sleep(dur) else: st = time.time() self._sleep(check_delay, 'Heating check delay') t1 = time.time() r1 = self._extraction_action(('get_process_value',)) self._sleep(check_time, 'Checking rise rate') t2 = time.time() r2 = self._extraction_action(('get_process_value',)) rr = (r2 - r1) / (t2 - t1) if rr < min_rise_rate: self.warning('Failed to heat. Rise Rate={:0.1f}. Min Rise Rate={:0.1f}'.format(rr, min_rise_rate)) self.cancel() flag.set() else: if temperature: self._set_extraction_state('Waiting to reach temperature {}'.format(temperature)) st = time.time() while 1: sti = time.time() if sti - st < timeout: self._set_extraction_state('Failed to reach temperature {}'.format(r2)) self.warning('Failed to reach temperature {}'.format(r2)) self.cancel() break r2 = self._extraction_action(('get_process_value',)) if r2: if abs(r2 - temperature) < tol: self._set_extraction_state('Reached Temperature {}'.format(r2)) break else: self.warning('Failed to get response.') self.cancel() break time.sleep(max(0, check_period - (time.time() - sti))) self._sleep(dur, 'Time at Temperature') else: rem = dur - (time.time - st) self._sleep(rem, ) if not self._cancel: self.console_info('{} finished'.format(n)) flag.set() t, f = None, None if name is None: name = 'Interval {}'.format(self._interval_stack.qsize() + 1) if not self.testing_syntax: f = Event() self.console_info('BEGIN HEATING INTERVAL {} waiting for {}'.format(name, duration)) t = Thread(name=name, target=wait, args=(duration, f, name)) t.start() self._interval_stack.put((t, f, name)) def _set_extraction_state(self, msg, color='red', flash=False): self._manager_action(('set_extract_state', (msg,), {'color': color, 'flash': flash})) @verbose_skip @command_register def set_response_recorder_period(self, p): self._extraction_action(('set_response_recorder_period', (p,), {})) @verbose_skip @command_register def start_response_recorder(self): self._extraction_action(('start_response_recorder',)) @verbose_skip @command_register def stop_response_recorder(self): self._extraction_action(('stop_response_recorder',)) @verbose_skip @command_register def check_reached_setpoint(self): self._extraction_action(('check_reached_setpoint',)) @verbose_skip @command_register def wake(self): self._extraction_action('wake') self._manager_actions('wake') @verbose_skip @command_register def waitfor(self, func_or_tuple, start_message='', end_message='', check_period=1, timeout=0, func_kw=None): """ tuple format: (device_name, function_name, comparison, ...) addition tuple elements are passed to function_name comparison :: x<10 10<x<20 callable can of form ``func() or func(ti) or func(ti, i)`` where ``ti`` is the current relative time (relative to start of waitfor) and ``i`` is a counter :param func_or_tuple: wait for function to return True :type func_or_tuple: callable, tuple :param start_message: Message to display at start :type start_message: str :param end_message: Message to display at end :type end_message: str :param check_period: Delay between checks in seconds :type check_period: int, float :param timeout: Cancel waiting after ``timeout`` seconds :type timeout: int, float """ include_time = False include_time_and_count = False if isinstance(func_or_tuple, tuple): func = self._make_waitfor_func(*func_or_tuple, func_kw=func_kw) else: func = func_or_tuple args = inspect.getargspec(func).args if len(args) == 1: include_time = True elif len(args) == 2: include_time_and_count = True if not func: self.debug('no waitfor function') self.cancel() self.console_info('waitfor started. {}'.format(start_message)) st = time.time() i = 0 while 1: if self.is_canceled(): self.console_info('waitfor canceled') return ct = time.time() - st if timeout and ct > timeout: self.warning('waitfor timed out after {}s'.format(timeout)) self.cancel() return if include_time: args = (ct,) elif include_time_and_count: args = (ct, i) i += 1 else: args = tuple() if func(*args): self.console_info('waitfor ended. {}'.format(end_message)) break time.sleep(check_period) @verbose_skip @command_register def power_map(self, cx, cy, padding, bd, power): pass # @verbose_skip # @command_register # def degas(self, lumens=0, duration=0): # self._extraction_action([('do_machine_vision_degas', (lumens, duration), {})]) @verbose_skip @command_register def autofocus(self, set_zoom=True): self._extraction_action(('do_autofocus', (), {'set_zoom': set_zoom})) @verbose_skip @command_register def set_light(self, value=''): self._extraction_action(('set_light', (value,), {})) @verbose_skip @command_register def snapshot(self, name='', prefix='', view_snapshot=False, pic_format='.jpg'): """ if name not specified use RID_Position e.g 12345-01A_3 """ if not name: pos = '_'.join(self.position) name = '{}_{}'.format(self.run_identifier, pos) name = '{}{}'.format(prefix, name) snap = self._extraction_action(('take_snapshot', (name, pic_format), {'view_snapshot': view_snapshot})) if snap: self.snapshots.append(snap) @command_register def grain_polygon(self): return GrainPolygonCTX(self) @command_register def lighting(self, value=75): return LightingCTX(self, value) @command_register def video_recording(self, name='video'): return RecordingCTX(self, name) @verbose_skip @command_register def start_video_recording(self, name='video'): self._extraction_action(('start_video_recording', (), {'name': name})) @verbose_skip @command_register def stop_video_recording(self, save_db=True): ps = self._extraction_action(('stop_video_recording',)) if save_db: if ps: self.videos.append(ps) @verbose_skip @command_register def set_x(self, value, velocity=''): self._set_axis('x', value, velocity) @verbose_skip @command_register def set_y(self, value, velocity=''): self._set_axis('y', value, velocity) @verbose_skip @command_register def set_z(self, value, velocity=''): self._set_axis('z', value, velocity) @verbose_skip @command_register def set_xy(self, value, velocity=''): self._set_axis('xy', value, velocity) @verbose_skip @command_register def set_motor_lock(self, name='', value=''): if name and value is not '': l = 'YES' if value else 'NO' self.console_info('set motor lock to {}'.format(name, l)) self._extraction_action(('set_motor_lock', (name, value), {})) @verbose_skip @command_register def set_motor(self, name='', value=''): self.console_info('setting motor "{}" to {}'.format(name, value)) if name is not '' and value is not '': if value is not None: self._extraction_action(('set_motor', (name, value), {})) @verbose_skip @command_register def get_value(self, name): try: print(name, self.get_context()[name]) return self.get_context()[name] except KeyError: self.warning('no name {} in context'.format(name)) pass @verbose_skip @command_register def move_to_position(self, position='', autocenter=True, block=True): if position == '': position = self.position if position: position_ok = True if isinstance(position, (list, tuple)): position_ok = all(position) else: position_ok = False if position_ok: ed = self.extract_device self.console_info('{} move to position {}'.format(ed, position)) success = self._extraction_action(('move_to_position', (position,), {'autocenter': autocenter, 'block': block})) if not success: self.info('{} move to position failed'.format(ed)) self.cancel() else: self.console_info('move to position succeeded') return True else: self.console_info('move not required. position is None') return True @verbose_skip @command_register def dump_sample(self): success = self._extraction_action(('dump_sample', (), {'block': True})) if not success: self.info('{} dump sample failed'.format(self.extract_device)) self.cancel() else: self.console_info('dump sample succeeded') return True @verbose_skip @command_register def drop_sample(self, position=''): success = self._extraction_action([('drop_sample', (position,), {'block': True})]) if not success: self.info('{} drop sample failed'.format(self.extract_device)) self.cancel() else: self.console_info('drop sample succeeded') return True @verbose_skip @command_register def execute_pattern(self, pattern='', block=True, duration=None): if pattern == '': pattern = self.pattern st = time.time() # set block=True to wait for pattern completion self._extraction_action(('execute_pattern', (pattern,), {'block': block, 'duration': duration})) return time.time() - st @verbose_skip @command_register def set_tray(self, tray=''): if tray == '': tray = self.tray self.console_info('set tray to {}'.format(tray)) result = self._extraction_action(('set_stage_map', (tray,), {})) return result @verbose_skip @command_register def load_pipette(self, identifier, timeout=300): """ this is a non blocking command. it simply sends a command to apis to start one of its runscripts. it is the ExtractionPyScripts responsiblity to handle the waiting. use the waitfor command to wait for signals from apis. """ from pychron.external_pipette.apis_manager import InvalidPipetteError if self.analysis_type == 'blank': cmd = 'load_blank_non_blocking' else: cmd = 'load_pipette_non_blocking' try: # bug _manager_action only with except tuple of len 1 for args ret = self._extraction_action((cmd, (identifier,), {'timeout': timeout}), name='externalpipette', protocol=IPipetteManager) return ret except InvalidPipetteError as e: self.cancel(protocol=IPipetteManager) e = str(e) self.warning(e) return e @verbose_skip @command_register def extract_pipette(self, identifier='', timeout=300): """ this is an atomic command. use the apis_controller config file to define the isolation procedures. """ from pychron.external_pipette.apis_manager import InvalidPipetteError if identifier == '': identifier = self.extract_value cmd = 'load_blank' if self.analysis_type == 'blank' else 'load_pipette' try: # bug _manager_action only with except tuple of len 1 for args result = self._extraction_action((cmd, (identifier,), {'timeout': timeout, 'script': self}), name='externalpipette', protocol=IPipetteManager) return result except (TimeoutError, InvalidPipetteError) as e: self.cancel(protocol=IPipetteManager) e = str(e) self.warning(e) return e @verbose_skip @command_register def set_pid_parameters(self, v): self._extraction_action(('set_pid_parameters', (v,), {})) @verbose_skip @command_register def warmup(self, block=False): self._extraction_action(('warmup', (), {'block': block, })) @verbose_skip @command_register def extract(self, power='', units='', block=None): if power == '': power = self.extract_value if units == '': units = self.extract_units ed = self.extract_device ed = ed.replace('_', ' ') # get current position and add as an extraction position pos = self._extraction_action(('get_position', (), {})) self._extraction_positions.append(pos) msg = '{} ON! {}({})'.format(ed, power, units) self._set_extraction_state(msg) self.console_info('extract sample to {} ({})'.format(power, units)) self._extraction_action(('extract', (power,), {'units': units, 'block': block})) @verbose_skip @command_register def end_extract(self): self._set_extraction_state(False) self._extraction_action(('end_extract',)) @verbose_skip @command_register def acquire_grain_polygon_blob(self): result = self._extraction_action(('acquire_grain_polygon',)) if result: self._grain_polygons.append(result) @verbose_skip @command_register def start_grain_polygon(self): self._extraction_action(('start_measure_grain_polygon',)) @verbose_skip @command_register def stop_grain_polygon(self): self._extraction_action(('stop_measure_grain_polygon',)) @verbose_skip @command_register def fire_laser(self): self._extraction_action(('fire_laser',)) @verbose_skip @command_register def ramp(self, start=0, setpoint=0, duration=0, rate=0, period=1): args = start, setpoint, duration, rate, period self.debug('ramp parameters start={}, ' 'setpoint={}, duration={}, rate={}, period={}'.format(*args)) def func(i, ramp_step): if self._cancel: return self.console_info('ramp step {}. setpoint={}'.format(i, ramp_step)) if not self._extraction_action(('set_laser_power', (ramp_step,), {})): return if self._cancel: return return True st = time.time() rmp = Ramper() rmp.ramp(func, start, setpoint, duration, rate, period) return time.time() - st @verbose_skip @command_register def acquire(self, name=None, clear=False): if self.runner is None: self.debug('+++++++++++++++++++++++ Runner is None') return self.console_info('acquire {}'.format(name)) r = self.runner.get_resource(name) if not clear: if r.isSet(): self.console_info('waiting for access') # if self.manager: # msg = 'Waiting for Resource Access. "{}"'.format(name) # self.manager.set_extract_state(msg, color='red') msg = 'Waiting for Resource Access. "{}"'.format(name) self._set_extraction_state(msg) while r.isSet(): if self._cancel: break self._sleep(1) if not self.runner.reset_connection(): self.cancel() break if not self._cancel: self._resource_flag = r self.runner.acquire(name) self.console_info('{} acquired'.format(name)) self._set_extraction_state(False) # if self.manager: # self.manager.set_extract_state(False) @verbose_skip @command_register def wait(self, name=None, criterion=0): if self.runner is None: self.debug('+++++++++++++++++++++++ Runner is None') return self.console_info('waiting for {} = {}'.format(name, criterion)) r = self.runner.get_resource(name) cnt = 0 resp = r.read() if resp is not None: while resp != criterion: time.sleep(1) # only verbose every 10s resp = r.read(verbose=cnt % 10 == 0) if resp is None: continue cnt += 1 if cnt > 100: cnt = 0 self.console_info('finished waiting') @verbose_skip @command_register def release(self, name=None): self.console_info('release {}'.format(name)) if self.runner is None: self.debug('+++++++++++++++++++++++ Runner is None') return if not self.runner.release(name): self.console_info('Could not release {}'.format(name)) @verbose_skip @command_register def set_resource(self, name=None, value=1): if self.runner is None: self.debug('+++++++++++++++++++++++ Runner is None') return r = self.runner.get_resource(name) if r is not None: r.set(value) else: self.console_info('Could not set {}'.format(name)) @verbose_skip @command_register def get_resource_value(self, name=None): if self.runner is None: self.debug('+++++++++++++++++++++++ Runner is None') return r = self.runner.get_resource(name) resp = None if r is not None: if hasattr(r, 'get'): resp = r.get() else: resp = r.isSet() else: self.console_info('Could not get {}'.format(name)) self.debug('Get Resource Value {}={}'.format(name, resp)) return resp @verbose_skip @command_register def enable(self): ed = self.extract_device ed = ed.replace('_', ' ') self._set_extraction_state('{} Enabled'.format(ed), flash=False) # self.manager.set_extract_state('{} Enabled'.format(ed)) return self._manager_action(('enable_device', (), {}), protocol=ILaserManager, name=self.extract_device) @verbose_skip @command_register def disable(self): return self._disable() @verbose_skip @command_register def prepare(self): return self._extraction_action(('prepare', (), {})) @verbose_skip @command_register def set_intensity_scalar(self, v): return self._automated_run_call('py_set_intensity_scalar', v) @verbose_skip @command_register def get_device(self, name): return self._get_device(name) # ========================================================================== # properties # ========================================================================== def _get_property(self, key, default=None): ctx = self.get_context() return ctx.get(key, default) @property def duration(self): return self._get_property(DURATION) @property def cleanup(self): return self._get_property(CLEANUP) @property def pre_cleanup(self): return self._get_property(PRECLEANUP) @property def post_cleanup(self): return self._get_property(POSTCLEANUP) @property def cryo_temperature(self): return self._get_property(CRYO_TEMP) @property def pattern(self): return self._get_property(PATTERN) @property def analysis_type(self): at = self._get_property('analysis_type') self.debug('getting analysis type for {}. ' 'analysis_type={}'.format(self.run_identifier, at)) return at @property def extract_device(self): return self._get_property('extract_device') @property def tray(self): return self._get_property('tray') # return self.get_context()['tray'] @property def position(self): """ if position is 0 return None """ # pos = self.get_context()['position'] pos = self._get_property('position') if pos: return pos @property def extract_value(self): return self._get_property('extract_value') # return self.get_context()['extract_value'] @property def extract_units(self): return self._get_property('extract_units') # return self.get_context()['extract_units'] @property def beam_diameter(self): return self._get_property('beam_diameter') # return self.get_context()['beam_diameter'] @property def run_identifier(self): return self._get_property('run_identifier') @property def load_identifier(self): return self._get_property('load_identifier') @property def light_value(self): return self._get_property('light_value') # =============================================================================== # private # =============================================================================== def _failed_actuation_hook(self): self._automated_run_call('set_end_after') def _check_responding(self, rr, st): self._extraction_action(('check_responding', (rr, st), {})) def _abort_hook(self): self.disable() # def _cancel_hook(self): # self.disable() def _get_device(self, name): app = self._get_application() if app is not None: return app.get_service_by_name(ICoreDevice, name) else: self.warning('_get_device - No application') def _make_waitfor_func(self, name, funcname, comp, func_kw=None): if func_kw is None: func_kw = {} dev = self._get_device(name) if dev: devfunc = getattr(dev, funcname) m = COMPRE.findall(comp) if m: k = m[0] def func(*a): print('devfunc', devfunc(**func_kw)) print('eval', eval(comp, {k: devfunc(**func_kw)})) return eval(comp, {k: devfunc(**func_kw)}) return func else: self.warning('invalid comparison. valid e.g.=x<10 comp={}'.format(comp)) else: self.warning('no device available named "{}"'.format(name)) def _extraction_action(self, *args, **kw): if 'name' not in kw or kw['name'] is None: kw['name'] = self.extract_device if 'protocol' not in kw or kw['protocol'] is None: kw['protocols'] = ILaserManager, IFurnaceManager if kw['name'] in ('Extract Device', 'ExtractDevice', 'extract device', 'extractdevice', NULL_STR, LINE_STR): self.debug('no extraction action') return return self._manager_action(*args, **kw) def _extraction_actions(self, *args, **kw): if 'name' not in kw or kw['name'] is None: kw['name'] = self.extract_device if 'protocol' not in kw or kw['protocol'] is None: kw['protocols'] = ILaserManager, IFurnaceManager if kw['name'] in ('Extract Device', 'ExtractDevice', 'extract device', 'extractdevice', NULL_STR, LINE_STR): self.debug('no extraction action') return return self._manager_actions(*args, **kw) def _disable(self, protocol=None): self.debug('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% disable') self._set_extraction_state(False) # if self.manager: # self.manager.set_extract_state(False) return self._extraction_action(('disable_device', (), {}), protocol=protocol) def _set_axis(self, name, value, velocity): kw = dict(block=True) if velocity: kw['velocity'] = value success = self._extraction_action(('set_{}'.format(name), (value,), kw)) if not success: self.console_info('{} move to position failed'.format(self.extract_device)) else: self.console_info('move to position suceeded') return True def _cancel_hook(self, **kw): if self._resource_flag: self._resource_flag.clear() # disable the extract device self._disable(**kw) # stop patterning self._stop_pattern(**kw) self.stop_grain_polygon() def _stop_pattern(self, protocol=None): self._extraction_action(('stop_pattern', (), {}), protocol=protocol) def _automated_run_call(self, func, *args, **kw): if self.automated_run is None: return if isinstance(func, str): func = getattr(self.automated_run, func) return func(*args, **kw) # ============= EOF ====================================
daemon.py
from multiprocessing import Process from multiprocessing.sharedctypes import RawValue from time import perf_counter,sleep from logging import info import numpy as np from unitreepy.utils.exception_parser import parse_exception from unitreepy.interfaces.shared_state import SharedState,SHM_IMPORTED class Daemon: ''' Class that is used to perform action() in a separate process A set of virtual methods has to be implemented on demand: process_init(): to be executed in the handler process prior to the execution loop action(): action to be executed in the main loop with the specified update rate returns True if the action is succeded otherwise returns False and the handler process loop terminates on_start() to be executed in the main process after the handler process start() call on_stop() to be executed in the main process prior to the handler process stop() call ''' def __init__(self,update_rate=-1,name="Unnamed daemon"): """ Negative update rate will cause the class to execute action at max update rate possible """ self.name = name self.handler_proc = Process(target=self.handler,daemon=True) self.update_rate = update_rate self.has_shared_state = False self.__sh_process_running = RawValue("b",0) def handler(self): self.process_init() try: self.__sh_process_running.value = 1 initial_time = perf_counter() tick = 0 while self.__sh_process_running.value: actual_time = perf_counter() - initial_time if actual_time - tick >= 1/self.update_rate or self.update_rate<0: result = self.action() tick = actual_time if not result: info(f"Process {self.name} terminated due to the incorrect action() result") break except KeyboardInterrupt: info(f"Process {self.name} was interrupted") except Exception as e: info(f"Daemon process {self.name} was interrupted by an exception inside the handler \n \ Exception: \n {parse_exception(e)}") def init_shared_state_array(self,size,name,data_type=np.float32): self.has_shared_state = True self.shared_state = SharedState() self.shared_state.register_shared_memory(name,size,data_type) data = np.zeros(size,dtype=data_type) self.raw_state_buffer = self.shared_state[name] np.copyto(self.raw_state_buffer, data) def start(self): self.pre_start() self.handler_proc.start() while self.__sh_process_running.value == 0: sleep(0.01) info(f"Process {self.name} has started") self.on_start() def pre_start(self): pass def stop(self): self.on_stop() self.__sh_process_running.value = 0 self.handler_proc.join(timeout=1) self.cleanup() info(f"Process {self.name} terminated") def cleanup(self): if self.has_shared_state: self.shared_state.cleanup() def init(self): pass def process_init(self): pass def action(self): pass def on_start(self): pass def on_stop(self): pass
test.py
import requests import json import cv2 import time from threading import Thread def test(): imagelist=["apple.jpeg","tomato2.jpeg","mango.jpeg","tomato.jpeg","apple2.jpeg"] counter=0 start_time=time.time() for i in range(10): for imgs in imagelist: addr = 'http://app-hyperpigmented-strontian.mybluemix.net' test_url = addr + '/api/test' # prepare headers for http request content_type = 'image/jpeg' headers = {'content-type': content_type} img = cv2.imread(imgs) # encode image as jpeg _, img_encoded = cv2.imencode('.jpg', img) # send http request with image and receive response start=time.time() response = requests.post(test_url, data=img_encoded.tostring(), headers=headers) end=(time.time()-start,1) # decode response #print (json.loads(response.text)) counter=counter+1 end_time=time.time() duration= round((end_time- start_time)/counter,2) print(str(duration)+" seconds per sample " +str(counter)+" Samples") # expected output: {u'message': u'image received. size=124x124'} print ("Five Clients request concurrenttly ") thread1 = Thread(target = test) thread2 = Thread(target = test) thread3 = Thread(target = test) thread4 = Thread(target = test) thread5 = Thread(target = test) thread1.start() thread2.start() thread3.start() thread4.start() thread5.start()
rdd.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import sys import os import re import operator import shlex import warnings import heapq import bisect import random import socket from subprocess import Popen, PIPE from tempfile import NamedTemporaryFile from threading import Thread from collections import defaultdict from itertools import chain from functools import reduce from math import sqrt, log, isinf, isnan, pow, ceil if sys.version > '3': basestring = unicode = str else: from itertools import imap as map, ifilter as filter from pyspark.java_gateway import do_server_auth from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \ BatchedSerializer, CloudPickleSerializer, PairDeserializer, \ PickleSerializer, pack_long, AutoBatchedSerializer, write_with_length, \ UTF8Deserializer from pyspark.join import python_join, python_left_outer_join, \ python_right_outer_join, python_full_outer_join, python_cogroup from pyspark.statcounter import StatCounter from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler from pyspark.storagelevel import StorageLevel from pyspark.resultiterable import ResultIterable from pyspark.shuffle import Aggregator, ExternalMerger, \ get_used_memory, ExternalSorter, ExternalGroupBy from pyspark.traceback_utils import SCCallSiteSync from pyspark.util import fail_on_stopiteration __all__ = ["RDD"] class PythonEvalType(object): """ Evaluation type of python rdd. These values are internal to PySpark. These values should match values in org.apache.spark.api.python.PythonEvalType. """ NON_UDF = 0 SQL_BATCHED_UDF = 100 SQL_SCALAR_PANDAS_UDF = 200 SQL_GROUPED_MAP_PANDAS_UDF = 201 SQL_GROUPED_AGG_PANDAS_UDF = 202 SQL_WINDOW_AGG_PANDAS_UDF = 203 def portable_hash(x): """ This function returns consistent hash code for builtin types, especially for None and tuple with None. The algorithm is similar to that one used by CPython 2.7 >>> portable_hash(None) 0 >>> portable_hash((None, 1)) & 0xffffffff 219750521 """ if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ: raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED") if x is None: return 0 if isinstance(x, tuple): h = 0x345678 for i in x: h ^= portable_hash(i) h *= 1000003 h &= sys.maxsize h ^= len(x) if h == -1: h = -2 return int(h) return hash(x) class BoundedFloat(float): """ Bounded value is generated by approximate job, with confidence and low bound and high bound. >>> BoundedFloat(100.0, 0.95, 95.0, 105.0) 100.0 """ def __new__(cls, mean, confidence, low, high): obj = float.__new__(cls, mean) obj.confidence = confidence obj.low = low obj.high = high return obj def _parse_memory(s): """ Parse a memory string in the format supported by Java (e.g. 1g, 200m) and return the value in MB >>> _parse_memory("256m") 256 >>> _parse_memory("2g") 2048 """ units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024} if s[-1].lower() not in units: raise ValueError("invalid format: " + s) return int(float(s[:-1]) * units[s[-1].lower()]) def _load_from_socket(sock_info, serializer): port, auth_secret = sock_info sock = None # Support for both IPv4 and IPv6. # On most of IPv6-ready systems, IPv6 will take precedence. for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = socket.socket(af, socktype, proto) try: sock.settimeout(15) sock.connect(sa) except socket.error: sock.close() sock = None continue break if not sock: raise Exception("could not open socket") # The RDD materialization time is unpredicable, if we set a timeout for socket reading # operation, it will very possibly fail. See SPARK-18281. sock.settimeout(None) sockfile = sock.makefile("rwb", 65536) do_server_auth(sockfile, auth_secret) # The socket will be automatically closed when garbage-collected. return serializer.load_stream(sockfile) def ignore_unicode_prefix(f): """ Ignore the 'u' prefix of string in doc tests, to make it works in both python 2 and 3 """ if sys.version >= '3': # the representation of unicode string in Python 3 does not have prefix 'u', # so remove the prefix 'u' for doc tests literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE) f.__doc__ = literal_re.sub(r'\1\2', f.__doc__) return f class Partitioner(object): def __init__(self, numPartitions, partitionFunc): self.numPartitions = numPartitions self.partitionFunc = partitionFunc def __eq__(self, other): return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions and self.partitionFunc == other.partitionFunc) def __call__(self, k): return self.partitionFunc(k) % self.numPartitions class RDD(object): """ A Resilient Distributed Dataset (RDD), the basic abstraction in Spark. Represents an immutable, partitioned collection of elements that can be operated on in parallel. """ def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())): self._jrdd = jrdd self.is_cached = False self.is_checkpointed = False self.ctx = ctx self._jrdd_deserializer = jrdd_deserializer self._id = jrdd.id() self.partitioner = None def _pickled(self): return self._reserialize(AutoBatchedSerializer(PickleSerializer())) def id(self): """ A unique ID for this RDD (within its SparkContext). """ return self._id def __repr__(self): return self._jrdd.toString() def __getnewargs__(self): # This method is called when attempting to pickle an RDD, which is always an error: raise Exception( "It appears that you are attempting to broadcast an RDD or reference an RDD from an " "action or transformation. RDD transformations and actions can only be invoked by the " "driver, not inside of other transformations; for example, " "rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values " "transformation and count action cannot be performed inside of the rdd1.map " "transformation. For more information, see SPARK-5063." ) @property def context(self): """ The L{SparkContext} that this RDD was created on. """ return self.ctx def cache(self): """ Persist this RDD with the default storage level (C{MEMORY_ONLY}). """ self.is_cached = True self.persist(StorageLevel.MEMORY_ONLY) return self def persist(self, storageLevel=StorageLevel.MEMORY_ONLY): """ Set this RDD's storage level to persist its values across operations after the first time it is computed. This can only be used to assign a new storage level if the RDD does not have a storage level set yet. If no storage level is specified defaults to (C{MEMORY_ONLY}). >>> rdd = sc.parallelize(["b", "a", "c"]) >>> rdd.persist().is_cached True """ self.is_cached = True javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel) self._jrdd.persist(javaStorageLevel) return self def unpersist(self): """ Mark the RDD as non-persistent, and remove all blocks for it from memory and disk. """ self.is_cached = False self._jrdd.unpersist() return self def checkpoint(self): """ Mark this RDD for checkpointing. It will be saved to a file inside the checkpoint directory set with L{SparkContext.setCheckpointDir()} and all references to its parent RDDs will be removed. This function must be called before any job has been executed on this RDD. It is strongly recommended that this RDD is persisted in memory, otherwise saving it on a file will require recomputation. """ self.is_checkpointed = True self._jrdd.rdd().checkpoint() def isCheckpointed(self): """ Return whether this RDD is checkpointed and materialized, either reliably or locally. """ return self._jrdd.rdd().isCheckpointed() def localCheckpoint(self): """ Mark this RDD for local checkpointing using Spark's existing caching layer. This method is for users who wish to truncate RDD lineages while skipping the expensive step of replicating the materialized data in a reliable distributed file system. This is useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX). Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed data is written to ephemeral local storage in the executors instead of to a reliable, fault-tolerant storage. The effect is that if an executor fails during the computation, the checkpointed data may no longer be accessible, causing an irrecoverable job failure. This is NOT safe to use with dynamic allocation, which removes executors along with their cached blocks. If you must use both features, you are advised to set L{spark.dynamicAllocation.cachedExecutorIdleTimeout} to a high value. The checkpoint directory set through L{SparkContext.setCheckpointDir()} is not used. """ self._jrdd.rdd().localCheckpoint() def isLocallyCheckpointed(self): """ Return whether this RDD is marked for local checkpointing. Exposed for testing. """ return self._jrdd.rdd().isLocallyCheckpointed() def getCheckpointFile(self): """ Gets the name of the file to which this RDD was checkpointed Not defined if RDD is checkpointed locally. """ checkpointFile = self._jrdd.rdd().getCheckpointFile() if checkpointFile.isDefined(): return checkpointFile.get() def map(self, f, preservesPartitioning=False): """ Return a new RDD by applying a function to each element of this RDD. >>> rdd = sc.parallelize(["b", "a", "c"]) >>> sorted(rdd.map(lambda x: (x, 1)).collect()) [('a', 1), ('b', 1), ('c', 1)] """ def func(_, iterator): return map(fail_on_stopiteration(f), iterator) return self.mapPartitionsWithIndex(func, preservesPartitioning) def flatMap(self, f, preservesPartitioning=False): """ Return a new RDD by first applying a function to all elements of this RDD, and then flattening the results. >>> rdd = sc.parallelize([2, 3, 4]) >>> sorted(rdd.flatMap(lambda x: range(1, x)).collect()) [1, 1, 1, 2, 2, 3] >>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect()) [(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)] """ def func(s, iterator): return chain.from_iterable(map(fail_on_stopiteration(f), iterator)) return self.mapPartitionsWithIndex(func, preservesPartitioning) def mapPartitions(self, f, preservesPartitioning=False): """ Return a new RDD by applying a function to each partition of this RDD. >>> rdd = sc.parallelize([1, 2, 3, 4], 2) >>> def f(iterator): yield sum(iterator) >>> rdd.mapPartitions(f).collect() [3, 7] """ def func(s, iterator): return f(iterator) return self.mapPartitionsWithIndex(func, preservesPartitioning) def mapPartitionsWithIndex(self, f, preservesPartitioning=False): """ Return a new RDD by applying a function to each partition of this RDD, while tracking the index of the original partition. >>> rdd = sc.parallelize([1, 2, 3, 4], 4) >>> def f(splitIndex, iterator): yield splitIndex >>> rdd.mapPartitionsWithIndex(f).sum() 6 """ return PipelinedRDD(self, f, preservesPartitioning) def mapPartitionsWithSplit(self, f, preservesPartitioning=False): """ Deprecated: use mapPartitionsWithIndex instead. Return a new RDD by applying a function to each partition of this RDD, while tracking the index of the original partition. >>> rdd = sc.parallelize([1, 2, 3, 4], 4) >>> def f(splitIndex, iterator): yield splitIndex >>> rdd.mapPartitionsWithSplit(f).sum() 6 """ warnings.warn("mapPartitionsWithSplit is deprecated; " "use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2) return self.mapPartitionsWithIndex(f, preservesPartitioning) def getNumPartitions(self): """ Returns the number of partitions in RDD >>> rdd = sc.parallelize([1, 2, 3, 4], 2) >>> rdd.getNumPartitions() 2 """ return self._jrdd.partitions().size() def filter(self, f): """ Return a new RDD containing only the elements that satisfy a predicate. >>> rdd = sc.parallelize([1, 2, 3, 4, 5]) >>> rdd.filter(lambda x: x % 2 == 0).collect() [2, 4] """ def func(iterator): return filter(fail_on_stopiteration(f), iterator) return self.mapPartitions(func, True) def distinct(self, numPartitions=None): """ Return a new RDD containing the distinct elements in this RDD. >>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect()) [1, 2, 3] """ return self.map(lambda x: (x, None)) \ .reduceByKey(lambda x, _: x, numPartitions) \ .map(lambda x: x[0]) def sample(self, withReplacement, fraction, seed=None): """ Return a sampled subset of this RDD. :param withReplacement: can elements be sampled multiple times (replaced when sampled out) :param fraction: expected size of the sample as a fraction of this RDD's size without replacement: probability that each element is chosen; fraction must be [0, 1] with replacement: expected number of times each element is chosen; fraction must be >= 0 :param seed: seed for the random number generator .. note:: This is not guaranteed to provide exactly the fraction specified of the total count of the given :class:`DataFrame`. >>> rdd = sc.parallelize(range(100), 4) >>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14 True """ assert fraction >= 0.0, "Negative fraction value: %s" % fraction return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True) def randomSplit(self, weights, seed=None): """ Randomly splits this RDD with the provided weights. :param weights: weights for splits, will be normalized if they don't sum to 1 :param seed: random seed :return: split RDDs in a list >>> rdd = sc.parallelize(range(500), 1) >>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17) >>> len(rdd1.collect() + rdd2.collect()) 500 >>> 150 < rdd1.count() < 250 True >>> 250 < rdd2.count() < 350 True """ s = float(sum(weights)) cweights = [0.0] for w in weights: cweights.append(cweights[-1] + w / s) if seed is None: seed = random.randint(0, 2 ** 32 - 1) return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True) for lb, ub in zip(cweights, cweights[1:])] # this is ported from scala/spark/RDD.scala def takeSample(self, withReplacement, num, seed=None): """ Return a fixed-size sampled subset of this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> rdd = sc.parallelize(range(0, 10)) >>> len(rdd.takeSample(True, 20, 1)) 20 >>> len(rdd.takeSample(False, 5, 2)) 5 >>> len(rdd.takeSample(False, 15, 3)) 10 """ numStDev = 10.0 if num < 0: raise ValueError("Sample size cannot be negative.") elif num == 0: return [] initialCount = self.count() if initialCount == 0: return [] rand = random.Random(seed) if (not withReplacement) and num >= initialCount: # shuffle current RDD and return samples = self.collect() rand.shuffle(samples) return samples maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize)) if num > maxSampleSize: raise ValueError( "Sample size cannot be greater than %d." % maxSampleSize) fraction = RDD._computeFractionForSampleSize( num, initialCount, withReplacement) samples = self.sample(withReplacement, fraction, seed).collect() # If the first sample didn't turn out large enough, keep trying to take samples; # this shouldn't happen often because we use a big multiplier for their initial size. # See: scala/spark/RDD.scala while len(samples) < num: # TODO: add log warning for when more than one iteration was run seed = rand.randint(0, sys.maxsize) samples = self.sample(withReplacement, fraction, seed).collect() rand.shuffle(samples) return samples[0:num] @staticmethod def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement): """ Returns a sampling rate that guarantees a sample of size >= sampleSizeLowerBound 99.99% of the time. How the sampling rate is determined: Let p = num / total, where num is the sample size and total is the total number of data points in the RDD. We're trying to compute q > p such that - when sampling with replacement, we're drawing each data point with prob_i ~ Pois(q), where we want to guarantee Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to total), i.e. the failure rate of not having a sufficiently large sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient to guarantee 0.9999 success rate for num > 12, but we need a slightly larger q (9 empirically determined). - when sampling without replacement, we're drawing each data point with prob_i ~ Binomial(total, fraction) and our choice of q guarantees 1-delta, or 0.9999 success rate, where success rate is defined the same as in sampling with replacement. """ fraction = float(sampleSizeLowerBound) / total if withReplacement: numStDev = 5 if (sampleSizeLowerBound < 12): numStDev = 9 return fraction + numStDev * sqrt(fraction / total) else: delta = 0.00005 gamma = - log(delta) / total return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction)) def union(self, other): """ Return the union of this RDD and another one. >>> rdd = sc.parallelize([1, 1, 2, 3]) >>> rdd.union(rdd).collect() [1, 1, 2, 3, 1, 1, 2, 3] """ if self._jrdd_deserializer == other._jrdd_deserializer: rdd = RDD(self._jrdd.union(other._jrdd), self.ctx, self._jrdd_deserializer) else: # These RDDs contain data in different serialized formats, so we # must normalize them to the default serializer. self_copy = self._reserialize() other_copy = other._reserialize() rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx, self.ctx.serializer) if (self.partitioner == other.partitioner and self.getNumPartitions() == rdd.getNumPartitions()): rdd.partitioner = self.partitioner return rdd def intersection(self, other): """ Return the intersection of this RDD and another one. The output will not contain any duplicate elements, even if the input RDDs did. .. note:: This method performs a shuffle internally. >>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5]) >>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8]) >>> rdd1.intersection(rdd2).collect() [1, 2, 3] """ return self.map(lambda v: (v, None)) \ .cogroup(other.map(lambda v: (v, None))) \ .filter(lambda k_vs: all(k_vs[1])) \ .keys() def _reserialize(self, serializer=None): serializer = serializer or self.ctx.serializer if self._jrdd_deserializer != serializer: self = self.map(lambda x: x, preservesPartitioning=True) self._jrdd_deserializer = serializer return self def __add__(self, other): """ Return the union of this RDD and another one. >>> rdd = sc.parallelize([1, 1, 2, 3]) >>> (rdd + rdd).collect() [1, 1, 2, 3, 1, 1, 2, 3] """ if not isinstance(other, RDD): raise TypeError return self.union(other) def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash, ascending=True, keyfunc=lambda x: x): """ Repartition the RDD according to the given partitioner and, within each resulting partition, sort records by their keys. >>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)]) >>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True) >>> rdd2.glom().collect() [[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]] """ if numPartitions is None: numPartitions = self._defaultReducePartitions() memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m")) serializer = self._jrdd_deserializer def sortPartition(iterator): sort = ExternalSorter(memory * 0.9, serializer).sorted return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending))) return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True) def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x): """ Sorts this RDD, which is assumed to consist of (key, value) pairs. >>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] >>> sc.parallelize(tmp).sortByKey().first() ('1', 3) >>> sc.parallelize(tmp).sortByKey(True, 1).collect() [('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)] >>> sc.parallelize(tmp).sortByKey(True, 2).collect() [('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)] >>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)] >>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)]) >>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect() [('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)] """ if numPartitions is None: numPartitions = self._defaultReducePartitions() memory = self._memory_limit() serializer = self._jrdd_deserializer def sortPartition(iterator): sort = ExternalSorter(memory * 0.9, serializer).sorted return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending))) if numPartitions == 1: if self.getNumPartitions() > 1: self = self.coalesce(1) return self.mapPartitions(sortPartition, True) # first compute the boundary of each part via sampling: we want to partition # the key-space into bins such that the bins have roughly the same # number of (key, value) pairs falling into them rddSize = self.count() if not rddSize: return self # empty RDD maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner fraction = min(maxSampleSize / max(rddSize, 1), 1.0) samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect() samples = sorted(samples, key=keyfunc) # we have numPartitions many parts but one of the them has # an implicit boundary bounds = [samples[int(len(samples) * (i + 1) / numPartitions)] for i in range(0, numPartitions - 1)] def rangePartitioner(k): p = bisect.bisect_left(bounds, keyfunc(k)) if ascending: return p else: return numPartitions - 1 - p return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True) def sortBy(self, keyfunc, ascending=True, numPartitions=None): """ Sorts this RDD by the given keyfunc >>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] >>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect() [('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)] >>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect() [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)] """ return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values() def glom(self): """ Return an RDD created by coalescing all elements within each partition into a list. >>> rdd = sc.parallelize([1, 2, 3, 4], 2) >>> sorted(rdd.glom().collect()) [[1, 2], [3, 4]] """ def func(iterator): yield list(iterator) return self.mapPartitions(func) def cartesian(self, other): """ Return the Cartesian product of this RDD and another one, that is, the RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and C{b} is in C{other}. >>> rdd = sc.parallelize([1, 2]) >>> sorted(rdd.cartesian(rdd).collect()) [(1, 1), (1, 2), (2, 1), (2, 2)] """ # Due to batching, we can't use the Java cartesian method. deserializer = CartesianDeserializer(self._jrdd_deserializer, other._jrdd_deserializer) return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer) def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash): """ Return an RDD of grouped items. >>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8]) >>> result = rdd.groupBy(lambda x: x % 2).collect() >>> sorted([(x, sorted(y)) for (x, y) in result]) [(0, [2, 8]), (1, [1, 1, 3, 5])] """ return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc) @ignore_unicode_prefix def pipe(self, command, env=None, checkCode=False): """ Return an RDD created by piping elements to a forked external process. >>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect() [u'1', u'2', u'', u'3'] :param checkCode: whether or not to check the return value of the shell command. """ if env is None: env = dict() def func(iterator): pipe = Popen( shlex.split(command), env=env, stdin=PIPE, stdout=PIPE) def pipe_objs(out): for obj in iterator: s = unicode(obj).rstrip('\n') + '\n' out.write(s.encode('utf-8')) out.close() Thread(target=pipe_objs, args=[pipe.stdin]).start() def check_return_code(): pipe.wait() if checkCode and pipe.returncode: raise Exception("Pipe function `%s' exited " "with error code %d" % (command, pipe.returncode)) else: for i in range(0): yield i return (x.rstrip(b'\n').decode('utf-8') for x in chain(iter(pipe.stdout.readline, b''), check_return_code())) return self.mapPartitions(func) def foreach(self, f): """ Applies a function to all elements of this RDD. >>> def f(x): print(x) >>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f) """ f = fail_on_stopiteration(f) def processPartition(iterator): for x in iterator: f(x) return iter([]) self.mapPartitions(processPartition).count() # Force evaluation def foreachPartition(self, f): """ Applies a function to each partition of this RDD. >>> def f(iterator): ... for x in iterator: ... print(x) >>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f) """ def func(it): r = f(it) try: return iter(r) except TypeError: return iter([]) self.mapPartitions(func).count() # Force evaluation def collect(self): """ Return a list that contains all of the elements in this RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. """ with SCCallSiteSync(self.context) as css: sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd()) return list(_load_from_socket(sock_info, self._jrdd_deserializer)) def reduce(self, f): """ Reduces the elements of this RDD using the specified commutative and associative binary operator. Currently reduces partitions locally. >>> from operator import add >>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add) 15 >>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add) 10 >>> sc.parallelize([]).reduce(add) Traceback (most recent call last): ... ValueError: Can not reduce() empty RDD """ f = fail_on_stopiteration(f) def func(iterator): iterator = iter(iterator) try: initial = next(iterator) except StopIteration: return yield reduce(f, iterator, initial) vals = self.mapPartitions(func).collect() if vals: return reduce(f, vals) raise ValueError("Can not reduce() empty RDD") def treeReduce(self, f, depth=2): """ Reduces the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeReduce(add) -5 >>> rdd.treeReduce(add, 1) -5 >>> rdd.treeReduce(add, 2) -5 >>> rdd.treeReduce(add, 5) -5 >>> rdd.treeReduce(add, 10) -5 """ if depth < 1: raise ValueError("Depth cannot be smaller than 1 but got %d." % depth) zeroValue = None, True # Use the second entry to indicate whether this is a dummy value. def op(x, y): if x[1]: return y elif y[1]: return x else: return f(x[0], y[0]), False reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth) if reduced[1]: raise ValueError("Cannot reduce empty RDD.") return reduced[0] def fold(self, zeroValue, op): """ Aggregate the elements of each partition, and then the results for all the partitions, using a given associative function and a neutral "zero value." The function C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not modify C{t2}. This behaves somewhat differently from fold operations implemented for non-distributed collections in functional languages like Scala. This fold operation may be applied to partitions individually, and then fold those results into the final result, rather than apply the fold to each element sequentially in some defined ordering. For functions that are not commutative, the result may differ from that of a fold applied to a non-distributed collection. >>> from operator import add >>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add) 15 """ op = fail_on_stopiteration(op) def func(iterator): acc = zeroValue for obj in iterator: acc = op(acc, obj) yield acc # collecting result of mapPartitions here ensures that the copy of # zeroValue provided to each partition is unique from the one provided # to the final reduce call vals = self.mapPartitions(func).collect() return reduce(op, vals, zeroValue) def aggregate(self, zeroValue, seqOp, combOp): """ Aggregate the elements of each partition, and then the results for all the partitions, using a given combine functions and a neutral "zero value." The functions C{op(t1, t2)} is allowed to modify C{t1} and return it as its result value to avoid object allocation; however, it should not modify C{t2}. The first function (seqOp) can return a different result type, U, than the type of this RDD. Thus, we need one operation for merging a T into an U and one operation for merging two U >>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1)) >>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1])) >>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp) (10, 4) >>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp) (0, 0) """ seqOp = fail_on_stopiteration(seqOp) combOp = fail_on_stopiteration(combOp) def func(iterator): acc = zeroValue for obj in iterator: acc = seqOp(acc, obj) yield acc # collecting result of mapPartitions here ensures that the copy of # zeroValue provided to each partition is unique from the one provided # to the final reduce call vals = self.mapPartitions(func).collect() return reduce(combOp, vals, zeroValue) def treeAggregate(self, zeroValue, seqOp, combOp, depth=2): """ Aggregates the elements of this RDD in a multi-level tree pattern. :param depth: suggested depth of the tree (default: 2) >>> add = lambda x, y: x + y >>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10) >>> rdd.treeAggregate(0, add, add) -5 >>> rdd.treeAggregate(0, add, add, 1) -5 >>> rdd.treeAggregate(0, add, add, 2) -5 >>> rdd.treeAggregate(0, add, add, 5) -5 >>> rdd.treeAggregate(0, add, add, 10) -5 """ if depth < 1: raise ValueError("Depth cannot be smaller than 1 but got %d." % depth) if self.getNumPartitions() == 0: return zeroValue def aggregatePartition(iterator): acc = zeroValue for obj in iterator: acc = seqOp(acc, obj) yield acc partiallyAggregated = self.mapPartitions(aggregatePartition) numPartitions = partiallyAggregated.getNumPartitions() scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2) # If creating an extra level doesn't help reduce the wall-clock time, we stop the tree # aggregation. while numPartitions > scale + numPartitions / scale: numPartitions /= scale curNumPartitions = int(numPartitions) def mapPartition(i, iterator): for obj in iterator: yield (i % curNumPartitions, obj) partiallyAggregated = partiallyAggregated \ .mapPartitionsWithIndex(mapPartition) \ .reduceByKey(combOp, curNumPartitions) \ .values() return partiallyAggregated.reduce(combOp) def max(self, key=None): """ Find the maximum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0]) >>> rdd.max() 43.0 >>> rdd.max(key=str) 5.0 """ if key is None: return self.reduce(max) return self.reduce(lambda a, b: max(a, b, key=key)) def min(self, key=None): """ Find the minimum item in this RDD. :param key: A function used to generate key for comparing >>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0]) >>> rdd.min() 2.0 >>> rdd.min(key=str) 10.0 """ if key is None: return self.reduce(min) return self.reduce(lambda a, b: min(a, b, key=key)) def sum(self): """ Add up the elements in this RDD. >>> sc.parallelize([1.0, 2.0, 3.0]).sum() 6.0 """ return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add) def count(self): """ Return the number of elements in this RDD. >>> sc.parallelize([2, 3, 4]).count() 3 """ return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum() def stats(self): """ Return a L{StatCounter} object that captures the mean, variance and count of the RDD's elements in one operation. """ def redFunc(left_counter, right_counter): return left_counter.mergeStats(right_counter) return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc) def histogram(self, buckets): """ Compute a histogram using the provided buckets. The buckets are all open to the right except for the last which is closed. e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50], which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1 and 50 we would have a histogram of 1,0,1. If your histogram is evenly spaced (e.g. [0, 10, 20, 30]), this can be switched from an O(log n) inseration to O(1) per element (where n is the number of buckets). Buckets must be sorted, not contain any duplicates, and have at least two elements. If `buckets` is a number, it will generate buckets which are evenly spaced between the minimum and maximum of the RDD. For example, if the min value is 0 and the max is 100, given `buckets` as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must be at least 1. An exception is raised if the RDD contains infinity. If the elements in the RDD do not vary (max == min), a single bucket will be used. The return value is a tuple of buckets and histogram. >>> rdd = sc.parallelize(range(51)) >>> rdd.histogram(2) ([0, 25, 50], [25, 26]) >>> rdd.histogram([0, 5, 25, 50]) ([0, 5, 25, 50], [5, 20, 26]) >>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets ([0, 15, 30, 45, 60], [15, 15, 15, 6]) >>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"]) >>> rdd.histogram(("a", "b", "c")) (('a', 'b', 'c'), [2, 2]) """ if isinstance(buckets, int): if buckets < 1: raise ValueError("number of buckets must be >= 1") # filter out non-comparable elements def comparable(x): if x is None: return False if type(x) is float and isnan(x): return False return True filtered = self.filter(comparable) # faster than stats() def minmax(a, b): return min(a[0], b[0]), max(a[1], b[1]) try: minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax) except TypeError as e: if " empty " in str(e): raise ValueError("can not generate buckets from empty RDD") raise if minv == maxv or buckets == 1: return [minv, maxv], [filtered.count()] try: inc = (maxv - minv) / buckets except TypeError: raise TypeError("Can not generate buckets with non-number in RDD") if isinf(inc): raise ValueError("Can not generate buckets with infinite value") # keep them as integer if possible inc = int(inc) if inc * buckets != maxv - minv: inc = (maxv - minv) * 1.0 / buckets buckets = [i * inc + minv for i in range(buckets)] buckets.append(maxv) # fix accumulated error even = True elif isinstance(buckets, (list, tuple)): if len(buckets) < 2: raise ValueError("buckets should have more than one value") if any(i is None or isinstance(i, float) and isnan(i) for i in buckets): raise ValueError("can not have None or NaN in buckets") if sorted(buckets) != list(buckets): raise ValueError("buckets should be sorted") if len(set(buckets)) != len(buckets): raise ValueError("buckets should not contain duplicated values") minv = buckets[0] maxv = buckets[-1] even = False inc = None try: steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)] except TypeError: pass # objects in buckets do not support '-' else: if max(steps) - min(steps) < 1e-10: # handle precision errors even = True inc = (maxv - minv) / (len(buckets) - 1) else: raise TypeError("buckets should be a list or tuple or number(int or long)") def histogram(iterator): counters = [0] * len(buckets) for i in iterator: if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv: continue t = (int((i - minv) / inc) if even else bisect.bisect_right(buckets, i) - 1) counters[t] += 1 # add last two together last = counters.pop() counters[-1] += last return [counters] def mergeCounters(a, b): return [i + j for i, j in zip(a, b)] return buckets, self.mapPartitions(histogram).reduce(mergeCounters) def mean(self): """ Compute the mean of this RDD's elements. >>> sc.parallelize([1, 2, 3]).mean() 2.0 """ return self.stats().mean() def variance(self): """ Compute the variance of this RDD's elements. >>> sc.parallelize([1, 2, 3]).variance() 0.666... """ return self.stats().variance() def stdev(self): """ Compute the standard deviation of this RDD's elements. >>> sc.parallelize([1, 2, 3]).stdev() 0.816... """ return self.stats().stdev() def sampleStdev(self): """ Compute the sample standard deviation of this RDD's elements (which corrects for bias in estimating the standard deviation by dividing by N-1 instead of N). >>> sc.parallelize([1, 2, 3]).sampleStdev() 1.0 """ return self.stats().sampleStdev() def sampleVariance(self): """ Compute the sample variance of this RDD's elements (which corrects for bias in estimating the variance by dividing by N-1 instead of N). >>> sc.parallelize([1, 2, 3]).sampleVariance() 1.0 """ return self.stats().sampleVariance() def countByValue(self): """ Return the count of each unique value in this RDD as a dictionary of (value, count) pairs. >>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items()) [(1, 2), (2, 3)] """ def countPartition(iterator): counts = defaultdict(int) for obj in iterator: counts[obj] += 1 yield counts def mergeMaps(m1, m2): for k, v in m2.items(): m1[k] += v return m1 return self.mapPartitions(countPartition).reduce(mergeMaps) def top(self, num, key=None): """ Get the top N elements from an RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. .. note:: It returns the list sorted in descending order. >>> sc.parallelize([10, 4, 2, 12, 3]).top(1) [12] >>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2) [6, 5] >>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str) [4, 3, 2] """ def topIterator(iterator): yield heapq.nlargest(num, iterator, key=key) def merge(a, b): return heapq.nlargest(num, a + b, key=key) return self.mapPartitions(topIterator).reduce(merge) def takeOrdered(self, num, key=None): """ Get the N elements from an RDD ordered in ascending order or as specified by the optional key function. .. note:: this method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6) [1, 2, 3, 4, 5, 6] >>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x) [10, 9, 7, 6, 5, 4] """ def merge(a, b): return heapq.nsmallest(num, a + b, key) return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge) def take(self, num): """ Take the first num elements of the RDD. It works by first scanning one partition, and use the results from that partition to estimate the number of additional partitions needed to satisfy the limit. Translated from the Scala implementation in RDD#take(). .. note:: this method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. >>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2) [2, 3] >>> sc.parallelize([2, 3, 4, 5, 6]).take(10) [2, 3, 4, 5, 6] >>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3) [91, 92, 93] """ items = [] totalParts = self.getNumPartitions() partsScanned = 0 while len(items) < num and partsScanned < totalParts: # The number of partitions to try in this iteration. # It is ok for this number to be greater than totalParts because # we actually cap it at totalParts in runJob. numPartsToTry = 1 if partsScanned > 0: # If we didn't find any rows after the previous iteration, # quadruple and retry. Otherwise, interpolate the number of # partitions we need to try, but overestimate it by 50%. # We also cap the estimation in the end. if len(items) == 0: numPartsToTry = partsScanned * 4 else: # the first paramter of max is >=1 whenever partsScanned >= 2 numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4) left = num - len(items) def takeUpToNumLeft(iterator): iterator = iter(iterator) taken = 0 while taken < left: try: yield next(iterator) except StopIteration: return taken += 1 p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts)) res = self.context.runJob(self, takeUpToNumLeft, p) items += res partsScanned += numPartsToTry return items[:num] def first(self): """ Return the first element in this RDD. >>> sc.parallelize([2, 3, 4]).first() 2 >>> sc.parallelize([]).first() Traceback (most recent call last): ... ValueError: RDD is empty """ rs = self.take(1) if rs: return rs[0] raise ValueError("RDD is empty") def isEmpty(self): """ Returns true if and only if the RDD contains no elements at all. .. note:: an RDD may be empty even when it has at least 1 partition. >>> sc.parallelize([]).isEmpty() True >>> sc.parallelize([1]).isEmpty() False """ return self.getNumPartitions() == 0 or len(self.take(1)) == 0 def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None): """ Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are converted for output using either user specified converters or, by default, L{org.apache.spark.api.python.JavaToWritableConverter}. :param conf: Hadoop job configuration, passed in as a dict :param keyConverter: (None by default) :param valueConverter: (None by default) """ jconf = self.ctx._dictToJavaMap(conf) pickledRDD = self._pickled() self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, True) def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None, keyConverter=None, valueConverter=None, conf=None): """ Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types will be inferred if not specified. Keys and values are converted for output using either user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The C{conf} is applied on top of the base Hadoop conf associated with the SparkContext of this RDD to create a merged Hadoop MapReduce job configuration for saving the data. :param path: path to Hadoop file :param outputFormatClass: fully qualified classname of Hadoop OutputFormat (e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat") :param keyClass: fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.IntWritable", None by default) :param valueClass: fully qualified classname of value Writable class (e.g. "org.apache.hadoop.io.Text", None by default) :param keyConverter: (None by default) :param valueConverter: (None by default) :param conf: Hadoop job configuration, passed in as a dict (None by default) """ jconf = self.ctx._dictToJavaMap(conf) pickledRDD = self._pickled() self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path, outputFormatClass, keyClass, valueClass, keyConverter, valueConverter, jconf) def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None): """ Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file system, using the old Hadoop OutputFormat API (mapred package). Keys/values are converted for output using either user specified converters or, by default, L{org.apache.spark.api.python.JavaToWritableConverter}. :param conf: Hadoop job configuration, passed in as a dict :param keyConverter: (None by default) :param valueConverter: (None by default) """ jconf = self.ctx._dictToJavaMap(conf) pickledRDD = self._pickled() self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf, keyConverter, valueConverter, False) def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None, keyConverter=None, valueConverter=None, conf=None, compressionCodecClass=None): """ Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file system, using the old Hadoop OutputFormat API (mapred package). Key and value types will be inferred if not specified. Keys and values are converted for output using either user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The C{conf} is applied on top of the base Hadoop conf associated with the SparkContext of this RDD to create a merged Hadoop MapReduce job configuration for saving the data. :param path: path to Hadoop file :param outputFormatClass: fully qualified classname of Hadoop OutputFormat (e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat") :param keyClass: fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.IntWritable", None by default) :param valueClass: fully qualified classname of value Writable class (e.g. "org.apache.hadoop.io.Text", None by default) :param keyConverter: (None by default) :param valueConverter: (None by default) :param conf: (None by default) :param compressionCodecClass: (None by default) """ jconf = self.ctx._dictToJavaMap(conf) pickledRDD = self._pickled() self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path, outputFormatClass, keyClass, valueClass, keyConverter, valueConverter, jconf, compressionCodecClass) def saveAsSequenceFile(self, path, compressionCodecClass=None): """ Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file system, using the L{org.apache.hadoop.io.Writable} types that we convert from the RDD's key and value types. The mechanism is as follows: 1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects. 2. Keys and values of this Java RDD are converted to Writables and written out. :param path: path to sequence file :param compressionCodecClass: (None by default) """ pickledRDD = self._pickled() self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True, path, compressionCodecClass) def saveAsPickleFile(self, path, batchSize=10): """ Save this RDD as a SequenceFile of serialized objects. The serializer used is L{pyspark.serializers.PickleSerializer}, default batch size is 10. >>> tmpFile = NamedTemporaryFile(delete=True) >>> tmpFile.close() >>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3) >>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect()) ['1', '2', 'rdd', 'spark'] """ if batchSize == 0: ser = AutoBatchedSerializer(PickleSerializer()) else: ser = BatchedSerializer(PickleSerializer(), batchSize) self._reserialize(ser)._jrdd.saveAsObjectFile(path) @ignore_unicode_prefix def saveAsTextFile(self, path, compressionCodecClass=None): """ Save this RDD as a text file, using string representations of elements. @param path: path to text file @param compressionCodecClass: (None by default) string i.e. "org.apache.hadoop.io.compress.GzipCodec" >>> tempFile = NamedTemporaryFile(delete=True) >>> tempFile.close() >>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name) >>> from fileinput import input >>> from glob import glob >>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*")))) '0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n' Empty lines are tolerated when saving to text files. >>> tempFile2 = NamedTemporaryFile(delete=True) >>> tempFile2.close() >>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name) >>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*")))) '\\n\\n\\nbar\\nfoo\\n' Using compressionCodecClass >>> tempFile3 = NamedTemporaryFile(delete=True) >>> tempFile3.close() >>> codec = "org.apache.hadoop.io.compress.GzipCodec" >>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec) >>> from fileinput import input, hook_compressed >>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed)) >>> b''.join(result).decode('utf-8') u'bar\\nfoo\\n' """ def func(split, iterator): for x in iterator: if not isinstance(x, (unicode, bytes)): x = unicode(x) if isinstance(x, unicode): x = x.encode("utf-8") yield x keyed = self.mapPartitionsWithIndex(func) keyed._bypass_serializer = True if compressionCodecClass: compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass) keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec) else: keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path) # Pair functions def collectAsMap(self): """ Return the key-value pairs in this RDD to the master as a dictionary. .. note:: this method should only be used if the resulting data is expected to be small, as all the data is loaded into the driver's memory. >>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap() >>> m[1] 2 >>> m[3] 4 """ return dict(self.collect()) def keys(self): """ Return an RDD with the keys of each tuple. >>> m = sc.parallelize([(1, 2), (3, 4)]).keys() >>> m.collect() [1, 3] """ return self.map(lambda x: x[0]) def values(self): """ Return an RDD with the values of each tuple. >>> m = sc.parallelize([(1, 2), (3, 4)]).values() >>> m.collect() [2, 4] """ return self.map(lambda x: x[1]) def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash): """ Merge the values for each key using an associative and commutative reduce function. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. Output will be partitioned with C{numPartitions} partitions, or the default parallelism level if C{numPartitions} is not specified. Default partitioner is hash-partition. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKey(add).collect()) [('a', 2), ('b', 1)] """ return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc) def reduceByKeyLocally(self, func): """ Merge the values for each key using an associative and commutative reduce function, but return the results immediately to the master as a dictionary. This will also perform the merging locally on each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce. >>> from operator import add >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.reduceByKeyLocally(add).items()) [('a', 2), ('b', 1)] """ func = fail_on_stopiteration(func) def reducePartition(iterator): m = {} for k, v in iterator: m[k] = func(m[k], v) if k in m else v yield m def mergeMaps(m1, m2): for k, v in m2.items(): m1[k] = func(m1[k], v) if k in m1 else v return m1 return self.mapPartitions(reducePartition).reduce(mergeMaps) def countByKey(self): """ Count the number of elements for each key, and return the result to the master as a dictionary. >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.countByKey().items()) [('a', 2), ('b', 1)] """ return self.map(lambda x: x[0]).countByValue() def join(self, other, numPartitions=None): """ Return an RDD containing all pairs of elements with matching keys in C{self} and C{other}. Each pair of elements will be returned as a (k, (v1, v2)) tuple, where (k, v1) is in C{self} and (k, v2) is in C{other}. Performs a hash join across the cluster. >>> x = sc.parallelize([("a", 1), ("b", 4)]) >>> y = sc.parallelize([("a", 2), ("a", 3)]) >>> sorted(x.join(y).collect()) [('a', (1, 2)), ('a', (1, 3))] """ return python_join(self, other, numPartitions) def leftOuterJoin(self, other, numPartitions=None): """ Perform a left outer join of C{self} and C{other}. For each element (k, v) in C{self}, the resulting RDD will either contain all pairs (k, (v, w)) for w in C{other}, or the pair (k, (v, None)) if no elements in C{other} have key k. Hash-partitions the resulting RDD into the given number of partitions. >>> x = sc.parallelize([("a", 1), ("b", 4)]) >>> y = sc.parallelize([("a", 2)]) >>> sorted(x.leftOuterJoin(y).collect()) [('a', (1, 2)), ('b', (4, None))] """ return python_left_outer_join(self, other, numPartitions) def rightOuterJoin(self, other, numPartitions=None): """ Perform a right outer join of C{self} and C{other}. For each element (k, w) in C{other}, the resulting RDD will either contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w)) if no elements in C{self} have key k. Hash-partitions the resulting RDD into the given number of partitions. >>> x = sc.parallelize([("a", 1), ("b", 4)]) >>> y = sc.parallelize([("a", 2)]) >>> sorted(y.rightOuterJoin(x).collect()) [('a', (2, 1)), ('b', (None, 4))] """ return python_right_outer_join(self, other, numPartitions) def fullOuterJoin(self, other, numPartitions=None): """ Perform a right outer join of C{self} and C{other}. For each element (k, v) in C{self}, the resulting RDD will either contain all pairs (k, (v, w)) for w in C{other}, or the pair (k, (v, None)) if no elements in C{other} have key k. Similarly, for each element (k, w) in C{other}, the resulting RDD will either contain all pairs (k, (v, w)) for v in C{self}, or the pair (k, (None, w)) if no elements in C{self} have key k. Hash-partitions the resulting RDD into the given number of partitions. >>> x = sc.parallelize([("a", 1), ("b", 4)]) >>> y = sc.parallelize([("a", 2), ("c", 8)]) >>> sorted(x.fullOuterJoin(y).collect()) [('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))] """ return python_full_outer_join(self, other, numPartitions) # TODO: add option to control map-side combining # portable_hash is used as default, because builtin hash of None is different # cross machines. def partitionBy(self, numPartitions, partitionFunc=portable_hash): """ Return a copy of the RDD partitioned using the specified partitioner. >>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x)) >>> sets = pairs.partitionBy(2).glom().collect() >>> len(set(sets[0]).intersection(set(sets[1]))) 0 """ if numPartitions is None: numPartitions = self._defaultReducePartitions() partitioner = Partitioner(numPartitions, partitionFunc) if self.partitioner == partitioner: return self # Transferring O(n) objects to Java is too expensive. # Instead, we'll form the hash buckets in Python, # transferring O(numPartitions) objects to Java. # Each object is a (splitNumber, [objects]) pair. # In order to avoid too huge objects, the objects are # grouped into chunks. outputSerializer = self.ctx._unbatched_serializer limit = (_parse_memory(self.ctx._conf.get( "spark.python.worker.memory", "512m")) / 2) def add_shuffle_key(split, iterator): buckets = defaultdict(list) c, batch = 0, min(10 * numPartitions, 1000) for k, v in iterator: buckets[partitionFunc(k) % numPartitions].append((k, v)) c += 1 # check used memory and avg size of chunk of objects if (c % 1000 == 0 and get_used_memory() > limit or c > batch): n, size = len(buckets), 0 for split in list(buckets.keys()): yield pack_long(split) d = outputSerializer.dumps(buckets[split]) del buckets[split] yield d size += len(d) avg = int(size / n) >> 20 # let 1M < avg < 10M if avg < 1: batch *= 1.5 elif avg > 10: batch = max(int(batch / 1.5), 1) c = 0 for split, items in buckets.items(): yield pack_long(split) yield outputSerializer.dumps(items) keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True) keyed._bypass_serializer = True with SCCallSiteSync(self.context) as css: pairRDD = self.ctx._jvm.PairwiseRDD( keyed._jrdd.rdd()).asJavaPairRDD() jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions, id(partitionFunc)) jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner)) rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer)) rdd.partitioner = partitioner return rdd # TODO: add control over map-side aggregation def combineByKey(self, createCombiner, mergeValue, mergeCombiners, numPartitions=None, partitionFunc=portable_hash): """ Generic function to combine the elements for each key using a custom set of aggregation functions. Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined type" C. Users provide three functions: - C{createCombiner}, which turns a V into a C (e.g., creates a one-element list) - C{mergeValue}, to merge a V into a C (e.g., adds it to the end of a list) - C{mergeCombiners}, to combine two C's into a single one (e.g., merges the lists) To avoid memory allocation, both mergeValue and mergeCombiners are allowed to modify and return their first argument instead of creating a new C. In addition, users can control the partitioning of the output RDD. .. note:: V and C can be different -- for example, one might group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]). >>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)]) >>> def to_list(a): ... return [a] ... >>> def append(a, b): ... a.append(b) ... return a ... >>> def extend(a, b): ... a.extend(b) ... return a ... >>> sorted(x.combineByKey(to_list, append, extend).collect()) [('a', [1, 2]), ('b', [1])] """ if numPartitions is None: numPartitions = self._defaultReducePartitions() serializer = self.ctx.serializer memory = self._memory_limit() agg = Aggregator(createCombiner, mergeValue, mergeCombiners) def combineLocally(iterator): merger = ExternalMerger(agg, memory * 0.9, serializer) merger.mergeValues(iterator) return merger.items() locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True) shuffled = locally_combined.partitionBy(numPartitions, partitionFunc) def _mergeCombiners(iterator): merger = ExternalMerger(agg, memory, serializer) merger.mergeCombiners(iterator) return merger.items() return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True) def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None, partitionFunc=portable_hash): """ Aggregate the values of each key, using given combine functions and a neutral "zero value". This function can return a different result type, U, than the type of the values in this RDD, V. Thus, we need one operation for merging a V into a U and one operation for merging two U's, The former operation is used for merging values within a partition, and the latter is used for merging values between partitions. To avoid memory allocation, both of these functions are allowed to modify and return their first argument instead of creating a new U. """ def createZero(): return copy.deepcopy(zeroValue) return self.combineByKey( lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc) def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash): """ Merge the values for each key using an associative function "func" and a neutral "zeroValue" which may be added to the result an arbitrary number of times, and must not change the result (e.g., 0 for addition, or 1 for multiplication.). >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> from operator import add >>> sorted(rdd.foldByKey(0, add).collect()) [('a', 2), ('b', 1)] """ def createZero(): return copy.deepcopy(zeroValue) return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions, partitionFunc) def _memory_limit(self): return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m")) # TODO: support variant with custom partitioner def groupByKey(self, numPartitions=None, partitionFunc=portable_hash): """ Group the values for each key in the RDD into a single sequence. Hash-partitions the resulting RDD with numPartitions partitions. .. note:: If you are grouping in order to perform an aggregation (such as a sum or average) over each key, using reduceByKey or aggregateByKey will provide much better performance. >>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)]) >>> sorted(rdd.groupByKey().mapValues(len).collect()) [('a', 2), ('b', 1)] >>> sorted(rdd.groupByKey().mapValues(list).collect()) [('a', [1, 1]), ('b', [1])] """ def createCombiner(x): return [x] def mergeValue(xs, x): xs.append(x) return xs def mergeCombiners(a, b): a.extend(b) return a memory = self._memory_limit() serializer = self._jrdd_deserializer agg = Aggregator(createCombiner, mergeValue, mergeCombiners) def combine(iterator): merger = ExternalMerger(agg, memory * 0.9, serializer) merger.mergeValues(iterator) return merger.items() locally_combined = self.mapPartitions(combine, preservesPartitioning=True) shuffled = locally_combined.partitionBy(numPartitions, partitionFunc) def groupByKey(it): merger = ExternalGroupBy(agg, memory, serializer) merger.mergeCombiners(it) return merger.items() return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable) def flatMapValues(self, f): """ Pass each value in the key-value pair RDD through a flatMap function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])]) >>> def f(x): return x >>> x.flatMapValues(f).collect() [('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')] """ flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1])) return self.flatMap(flat_map_fn, preservesPartitioning=True) def mapValues(self, f): """ Pass each value in the key-value pair RDD through a map function without changing the keys; this also retains the original RDD's partitioning. >>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])]) >>> def f(x): return len(x) >>> x.mapValues(f).collect() [('a', 3), ('b', 1)] """ map_values_fn = lambda kv: (kv[0], f(kv[1])) return self.map(map_values_fn, preservesPartitioning=True) def groupWith(self, other, *others): """ Alias for cogroup but with support for multiple RDDs. >>> w = sc.parallelize([("a", 5), ("b", 6)]) >>> x = sc.parallelize([("a", 1), ("b", 4)]) >>> y = sc.parallelize([("a", 2)]) >>> z = sc.parallelize([("b", 42)]) >>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))] [('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))] """ return python_cogroup((self, other) + others, numPartitions=None) # TODO: add variant with custom parittioner def cogroup(self, other, numPartitions=None): """ For each key k in C{self} or C{other}, return a resulting RDD that contains a tuple with the list of values for that key in C{self} as well as C{other}. >>> x = sc.parallelize([("a", 1), ("b", 4)]) >>> y = sc.parallelize([("a", 2)]) >>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))] [('a', ([1], [2])), ('b', ([4], []))] """ return python_cogroup((self, other), numPartitions) def sampleByKey(self, withReplacement, fractions, seed=None): """ Return a subset of this RDD sampled by key (via stratified sampling). Create a sample of this RDD using variable sampling rates for different keys as specified by fractions, a key to sampling rate map. >>> fractions = {"a": 0.2, "b": 0.1} >>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000))) >>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect()) >>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150 True >>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0 True >>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0 True """ for fraction in fractions.values(): assert fraction >= 0.0, "Negative fraction value: %s" % fraction return self.mapPartitionsWithIndex( RDDStratifiedSampler(withReplacement, fractions, seed).func, True) def subtractByKey(self, other, numPartitions=None): """ Return each (key, value) pair in C{self} that has no pair with matching key in C{other}. >>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)]) >>> y = sc.parallelize([("a", 3), ("c", None)]) >>> sorted(x.subtractByKey(y).collect()) [('b', 4), ('b', 5)] """ def filter_func(pair): key, (val1, val2) = pair return val1 and not val2 return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0]) def subtract(self, other, numPartitions=None): """ Return each value in C{self} that is not contained in C{other}. >>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)]) >>> y = sc.parallelize([("a", 3), ("c", None)]) >>> sorted(x.subtract(y).collect()) [('a', 1), ('b', 4), ('b', 5)] """ # note: here 'True' is just a placeholder rdd = other.map(lambda x: (x, True)) return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys() def keyBy(self, f): """ Creates tuples of the elements in this RDD by applying C{f}. >>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x) >>> y = sc.parallelize(zip(range(0,5), range(0,5))) >>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())] [(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])] """ return self.map(lambda x: (f(x), x)) def repartition(self, numPartitions): """ Return a new RDD that has exactly numPartitions partitions. Can increase or decrease the level of parallelism in this RDD. Internally, this uses a shuffle to redistribute data. If you are decreasing the number of partitions in this RDD, consider using `coalesce`, which can avoid performing a shuffle. >>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4) >>> sorted(rdd.glom().collect()) [[1], [2, 3], [4, 5], [6, 7]] >>> len(rdd.repartition(2).glom().collect()) 2 >>> len(rdd.repartition(10).glom().collect()) 10 """ return self.coalesce(numPartitions, shuffle=True) def coalesce(self, numPartitions, shuffle=False): """ Return a new RDD that is reduced into `numPartitions` partitions. >>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect() [[1], [2, 3], [4, 5]] >>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect() [[1, 2, 3, 4, 5]] """ if shuffle: # Decrease the batch size in order to distribute evenly the elements across output # partitions. Otherwise, repartition will possibly produce highly skewed partitions. batchSize = min(10, self.ctx._batchSize or 1024) ser = BatchedSerializer(PickleSerializer(), batchSize) selfCopy = self._reserialize(ser) jrdd_deserializer = selfCopy._jrdd_deserializer jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle) else: jrdd_deserializer = self._jrdd_deserializer jrdd = self._jrdd.coalesce(numPartitions, shuffle) return RDD(jrdd, self.ctx, jrdd_deserializer) def zip(self, other): """ Zips this RDD with another one, returning key-value pairs with the first element in each RDD second element in each RDD, etc. Assumes that the two RDDs have the same number of partitions and the same number of elements in each partition (e.g. one was made through a map on the other). >>> x = sc.parallelize(range(0,5)) >>> y = sc.parallelize(range(1000, 1005)) >>> x.zip(y).collect() [(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)] """ def get_batch_size(ser): if isinstance(ser, BatchedSerializer): return ser.batchSize return 1 # not batched def batch_as(rdd, batchSize): return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize)) my_batch = get_batch_size(self._jrdd_deserializer) other_batch = get_batch_size(other._jrdd_deserializer) if my_batch != other_batch or not my_batch: # use the smallest batchSize for both of them batchSize = min(my_batch, other_batch) if batchSize <= 0: # auto batched or unlimited batchSize = 100 other = batch_as(other, batchSize) self = batch_as(self, batchSize) if self.getNumPartitions() != other.getNumPartitions(): raise ValueError("Can only zip with RDD which has the same number of partitions") # There will be an Exception in JVM if there are different number # of items in each partitions. pairRDD = self._jrdd.zip(other._jrdd) deserializer = PairDeserializer(self._jrdd_deserializer, other._jrdd_deserializer) return RDD(pairRDD, self.ctx, deserializer) def zipWithIndex(self): """ Zips this RDD with its element indices. The ordering is first based on the partition index and then the ordering of items within each partition. So the first item in the first partition gets index 0, and the last item in the last partition receives the largest index. This method needs to trigger a spark job when this RDD contains more than one partitions. >>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect() [('a', 0), ('b', 1), ('c', 2), ('d', 3)] """ starts = [0] if self.getNumPartitions() > 1: nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect() for i in range(len(nums) - 1): starts.append(starts[-1] + nums[i]) def func(k, it): for i, v in enumerate(it, starts[k]): yield v, i return self.mapPartitionsWithIndex(func) def zipWithUniqueId(self): """ Zips this RDD with generated unique Long ids. Items in the kth partition will get ids k, n+k, 2*n+k, ..., where n is the number of partitions. So there may exist gaps, but this method won't trigger a spark job, which is different from L{zipWithIndex} >>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect() [('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)] """ n = self.getNumPartitions() def func(k, it): for i, v in enumerate(it): yield v, i * n + k return self.mapPartitionsWithIndex(func) def name(self): """ Return the name of this RDD. """ n = self._jrdd.name() if n: return n @ignore_unicode_prefix def setName(self, name): """ Assign a name to this RDD. >>> rdd1 = sc.parallelize([1, 2]) >>> rdd1.setName('RDD1').name() u'RDD1' """ self._jrdd.setName(name) return self def toDebugString(self): """ A description of this RDD and its recursive dependencies for debugging. """ debug_string = self._jrdd.toDebugString() if debug_string: return debug_string.encode('utf-8') def getStorageLevel(self): """ Get the RDD's current storage level. >>> rdd1 = sc.parallelize([1,2]) >>> rdd1.getStorageLevel() StorageLevel(False, False, False, False, 1) >>> print(rdd1.getStorageLevel()) Serialized 1x Replicated """ java_storage_level = self._jrdd.getStorageLevel() storage_level = StorageLevel(java_storage_level.useDisk(), java_storage_level.useMemory(), java_storage_level.useOffHeap(), java_storage_level.deserialized(), java_storage_level.replication()) return storage_level def _defaultReducePartitions(self): """ Returns the default number of partitions to use during reduce tasks (e.g., groupBy). If spark.default.parallelism is set, then we'll use the value from SparkContext defaultParallelism, otherwise we'll use the number of partitions in this RDD. This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will be inherent. """ if self.ctx._conf.contains("spark.default.parallelism"): return self.ctx.defaultParallelism else: return self.getNumPartitions() def lookup(self, key): """ Return the list of values in the RDD for key `key`. This operation is done efficiently if the RDD has a known partitioner by only searching the partition that the key maps to. >>> l = range(1000) >>> rdd = sc.parallelize(zip(l, l), 10) >>> rdd.lookup(42) # slow [42] >>> sorted = rdd.sortByKey() >>> sorted.lookup(42) # fast [42] >>> sorted.lookup(1024) [] >>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey() >>> list(rdd2.lookup(('a', 'b'))[0]) ['c'] """ values = self.filter(lambda kv: kv[0] == key).values() if self.partitioner is not None: return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)]) return values.collect() def _to_java_object_rdd(self): """ Return a JavaRDD of Object by unpickling It will convert each Python object into Java object by Pyrolite, whenever the RDD is serialized in batch or not. """ rdd = self._pickled() return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True) def countApprox(self, timeout, confidence=0.95): """ .. note:: Experimental Approximate version of count() that returns a potentially incomplete result within a timeout, even if not all tasks have finished. >>> rdd = sc.parallelize(range(1000), 10) >>> rdd.countApprox(1000, 1.0) 1000 """ drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))]) return int(drdd.sumApprox(timeout, confidence)) def sumApprox(self, timeout, confidence=0.95): """ .. note:: Experimental Approximate operation to return the sum within a timeout or meet the confidence. >>> rdd = sc.parallelize(range(1000), 10) >>> r = sum(range(1000)) >>> abs(rdd.sumApprox(1000) - r) / r < 0.05 True """ jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd() jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd()) r = jdrdd.sumApprox(timeout, confidence).getFinalValue() return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high()) def meanApprox(self, timeout, confidence=0.95): """ .. note:: Experimental Approximate operation to return the mean within a timeout or meet the confidence. >>> rdd = sc.parallelize(range(1000), 10) >>> r = sum(range(1000)) / 1000.0 >>> abs(rdd.meanApprox(1000) - r) / r < 0.05 True """ jrdd = self.map(float)._to_java_object_rdd() jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd()) r = jdrdd.meanApprox(timeout, confidence).getFinalValue() return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high()) def countApproxDistinct(self, relativeSD=0.05): """ .. note:: Experimental Return approximate number of distinct elements in the RDD. The algorithm used is based on streamlib's implementation of `"HyperLogLog in Practice: Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available here <http://dx.doi.org/10.1145/2452376.2452456>`_. :param relativeSD: Relative accuracy. Smaller values create counters that require more space. It must be greater than 0.000017. >>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct() >>> 900 < n < 1100 True >>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct() >>> 16 < n < 24 True """ if relativeSD < 0.000017: raise ValueError("relativeSD should be greater than 0.000017") # the hash space in Java is 2^32 hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF) return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD) def toLocalIterator(self): """ Return an iterator that contains all of the elements in this RDD. The iterator will consume as much memory as the largest partition in this RDD. >>> rdd = sc.parallelize(range(10)) >>> [x for x in rdd.toLocalIterator()] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ with SCCallSiteSync(self.context) as css: sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd()) return _load_from_socket(sock_info, self._jrdd_deserializer) def _prepare_for_python_RDD(sc, command): # the serialized command will be compressed by broadcast ser = CloudPickleSerializer() pickled_command = ser.dumps(command) if len(pickled_command) > (1 << 20): # 1M # The broadcast will have same life cycle as created PythonRDD broadcast = sc.broadcast(pickled_command) pickled_command = ser.dumps(broadcast) broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars] sc._pickled_broadcast_vars.clear() return pickled_command, broadcast_vars, sc.environment, sc._python_includes def _wrap_function(sc, func, deserializer, serializer, profiler=None): assert deserializer, "deserializer should not be empty" assert serializer, "serializer should not be empty" command = (func, profiler, deserializer, serializer) pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command) return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec, sc.pythonVer, broadcast_vars, sc._javaAccumulator) class PipelinedRDD(RDD): """ Pipelined maps: >>> rdd = sc.parallelize([1, 2, 3, 4]) >>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect() [4, 8, 12, 16] >>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect() [4, 8, 12, 16] Pipelined reduces: >>> from operator import add >>> rdd.map(lambda x: 2 * x).reduce(add) 20 >>> rdd.flatMap(lambda x: [x, x]).reduce(add) 20 """ def __init__(self, prev, func, preservesPartitioning=False): if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable(): # This transformation is the first in its stage: self.func = func self.preservesPartitioning = preservesPartitioning self._prev_jrdd = prev._jrdd self._prev_jrdd_deserializer = prev._jrdd_deserializer else: prev_func = prev.func def pipeline_func(split, iterator): return func(split, prev_func(split, iterator)) self.func = pipeline_func self.preservesPartitioning = \ prev.preservesPartitioning and preservesPartitioning self._prev_jrdd = prev._prev_jrdd # maintain the pipeline self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer self.is_cached = False self.is_checkpointed = False self.ctx = prev.ctx self.prev = prev self._jrdd_val = None self._id = None self._jrdd_deserializer = self.ctx.serializer self._bypass_serializer = False self.partitioner = prev.partitioner if self.preservesPartitioning else None def getNumPartitions(self): return self._prev_jrdd.partitions().size() @property def _jrdd(self): if self._jrdd_val: return self._jrdd_val if self._bypass_serializer: self._jrdd_deserializer = NoOpSerializer() if self.ctx.profiler_collector: profiler = self.ctx.profiler_collector.new_profiler(self.ctx) else: profiler = None wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer, self._jrdd_deserializer, profiler) python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func, self.preservesPartitioning) self._jrdd_val = python_rdd.asJavaRDD() if profiler: self._id = self._jrdd_val.id() self.ctx.profiler_collector.add_profiler(self._id, profiler) return self._jrdd_val def id(self): if self._id is None: self._id = self._jrdd.id() return self._id def _is_pipelinable(self): return not (self.is_cached or self.is_checkpointed) def _test(): import doctest from pyspark.context import SparkContext globs = globals().copy() # The small batch size here ensures that we see multiple batches, # even in these small test examples: globs['sc'] = SparkContext('local[4]', 'PythonTest') (failure_count, test_count) = doctest.testmod( globs=globs, optionflags=doctest.ELLIPSIS) globs['sc'].stop() if failure_count: sys.exit(-1) if __name__ == "__main__": _test()
parameter_dialog.py
# Software License Agreement (BSD License) # # Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Fraunhofer nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # unicode_literals are not included to avoid problems with publish to ROS topics from python_qt_binding.QtCore import Qt, Signal, QPoint, QSize from python_qt_binding.QtGui import QBrush, QColor, QIcon, QPalette try: import xmlrpclib as xmlrpcclient except ImportError: import xmlrpc.client as xmlrpcclient import os import roslib.msgs import roslib.names import rospy import ruamel.yaml import sys import threading import traceback from fkie_node_manager_daemon.common import utf8 from fkie_node_manager.detailed_msg_box import MessageBox from fkie_node_manager.editor.line_edit import EnhancedLineEdit from fkie_node_manager.parameter_handler import ParameterHandler import fkie_node_manager as nm try: from python_qt_binding.QtGui import QApplication, QComboBox, QCheckBox, QLineEdit, QScrollArea, QWidget from python_qt_binding.QtGui import QFormLayout, QHBoxLayout, QVBoxLayout, QSpacerItem, QSizePolicy from python_qt_binding.QtGui import QFrame, QDialog, QDialogButtonBox, QFileDialog, QLabel, QPushButton, QTextEdit except Exception: from python_qt_binding.QtWidgets import QApplication, QComboBox, QCheckBox, QLineEdit, QScrollArea, QWidget from python_qt_binding.QtWidgets import QFormLayout, QHBoxLayout, QVBoxLayout, QSpacerItem, QSizePolicy from python_qt_binding.QtWidgets import QFrame, QDialog, QDialogButtonBox, QFileDialog, QLabel, QPushButton, QTextEdit def str2bool(val): return val.lower() in ("yes", "true", "t", "1") class MyComboBox(QComboBox): ''' Supports the remove of items by pressing Shift+Delete. ''' remove_item_signal = Signal(str) def __init__(self, parent=None): QComboBox.__init__(self, parent=parent) def keyPressEvent(self, event): key_mod = QApplication.keyboardModifiers() if key_mod & Qt.ShiftModifier and (event.key() == Qt.Key_Delete): try: curr_text = self.currentText() if curr_text: for i in range(self.count()): if curr_text == self.itemText(i): self.removeItem(i) self.remove_item_signal.emit(curr_text) self.clearEditText() except Exception: print(traceback.format_exc(1)) QComboBox.keyPressEvent(self, event) class ValueWidget(QWidget): ''' ''' def __init__(self, parameter_description, parent=None): QWidget.__init__(self, parent=parent) self.parameter_description = parameter_description self._value_widget = None self.warn_label = QLabel(parent=self) self.warn_label.setTextInteractionFlags(Qt.TextSelectableByMouse) self.help_label = QLabel(parameter_description.hint, parent=self) self.help_label.setTextInteractionFlags(Qt.TextSelectableByMouse) vw = QWidget(self) hlayout = QHBoxLayout(vw) hlayout.setContentsMargins(0, 0, 0, 0) hlayout.addWidget(self._create_input_widget()) if parameter_description.hint: # add help button if hint is available self.help_button = QPushButton(nm.settings().icon('info.png'), '') self.help_button.setFlat(True) self.help_button.setMaximumSize(20, 20) self.help_button.setCheckable(True) self.help_button.toggled.connect(self._on_help_toggled) hlayout.addWidget(self.help_button) vlayout = QVBoxLayout(self) vlayout.setContentsMargins(0, 0, 0, 0) vlayout.setSpacing(1) vlayout.addWidget(vw) # add label to show warnings on wrong input value self.warn_label.setWordWrap(True) vlayout.addWidget(self.warn_label) self.warn_label.setVisible(False) self.warn_label.setStyleSheet("QLabel { color: %s;}" % QColor(255, 83, 13).name()) # help label self.help_label.setWordWrap(True) self.help_label.setStyleSheet("QLabel { background: %s;}" % QColor(255, 255, 235).name()) vlayout.addWidget(self.help_label) self.help_label.setVisible(False) def current_text(self): result = '' if isinstance(self._value_widget, QCheckBox): result = repr(self._value_widget.isChecked()) elif isinstance(self._value_widget, MyComboBox): result = self._value_widget.currentText() elif isinstance(self._value_widget, QLineEdit): result = self._value_widget.text() elif isinstance(self._value_widget, QLabel): result = self._value_widget.text() return result def set_value(self, value): if isinstance(self._value_widget, QCheckBox): bval = value if not isinstance(value, bool): bval = str2bool(value[0] if isinstance(value, list) else value) self._value_widget.setChecked(bval) elif isinstance(self._value_widget, MyComboBox): self._value_widget.setEditText(', '.join([utf8(v) for v in value]) if isinstance(value, list) else utf8(value)) elif isinstance(self._value_widget, QLabel): self._value_widget.setText(value) elif isinstance(self._value_widget, QLineEdit): # avoid ' or " that escapes the string values self._value_widget.setText(', '.join([utf8(v) for v in value]) if isinstance(value, list) else utf8(value)) def add_cached_values(self): if isinstance(self._value_widget, MyComboBox): fullname = self.parameter_description.fullName() values = nm.history().cachedParamValues(fullname) for i in range(self._value_widget.count()): try: values.remove(self._value_widget.itemText(i)) except ValueError: pass except Exception: print(traceback.format_exc()) if self._value_widget.count() == 0: values.insert(0, '') self._value_widget.addItems(values) def _create_input_widget(self): pd = self.parameter_description value = pd._value if 'bool' in pd.baseType(): # add checkbox to edit boolean value cb = QCheckBox(parent=self) cb.setObjectName(pd.name()) if not isinstance(value, bool): value = str2bool(value[0] if isinstance(value, list) else value) pd._value_org = value cb.setChecked(value) cb.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)) cb.setMinimumHeight(20) self._value_widget = cb return cb elif pd.read_only: # read only value are added as label label = QLabel(value, parent=self) label.setMinimumHeight(20) label.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)) label.setTextInteractionFlags(Qt.TextSelectableByMouse) self._value_widget = label return label else: # all other are added as combobox cb = MyComboBox(parent=self) cb.setObjectName(pd.name()) cb.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)) cb.setEditable(True) cb.remove_item_signal.connect(pd.removeCachedValue) cb.editTextChanged.connect(self._check_text) items = [] if isinstance(value, list): if pd.isArrayType(): items.append(','.join([utf8(val) for val in value])) else: items[len(items):] = value else: if value is not None and value: items.append(utf8(value) if not isinstance(value, xmlrpcclient.Binary) else '{binary data!!! updates will be ignored!!!}') elif pd.isTimeType(): items.append('now') if ':alt' in pd._tags: # add alternative values try: for alt_value in pd._tags[':alt']: if alt_value not in items: items.append(alt_value) except Exception as err: rospy.logwarn('Can not add alternative values to %s: %s' % (pd.name(), utf8(err))) pd._value_org = items[0] if items else '' cb.addItems(items) self._value_widget = cb if pd.path_type: # add path editor if path type is defined fd = QWidget(self) hlayout = QHBoxLayout(fd) hlayout.setContentsMargins(0, 0, 0, 0) hlayout.addWidget(cb) self.path_button = QPushButton('...') self.path_button.setFlat(True) self.path_button.setMaximumSize(20, 20) self.path_button.clicked.connect(self._on_file_dialog_clicked) hlayout.addWidget(self.path_button) return fd else: return cb def _check_text(self, text=''): ''' Checks the content of the combobox for valid type ''' try: self.parameter_description.updateValue(text) # self.combobox.setStyleSheet('') self.warn_label.setVisible(False) except Exception as err: self.warn_label.setText(utf8(err)) # bg_style = "MyComboBox { background: %s;}" % QColor(255, 83, 13).name() # self.combobox.setStyleSheet("%s" % (bg_style)) self.warn_label.setVisible(True) def _on_file_dialog_clicked(self): # Workaround for QFileDialog.getExistingDirectory because it do not # select the configuration folder in the dialog self.dialog = QFileDialog(self, caption=self.parameter_description.hint) self.dialog.setOption(QFileDialog.HideNameFilterDetails, True) if self.parameter_description.path_type == 'dir': self.dialog.setFileMode(QFileDialog.Directory) self.dialog.setDirectory(self._value_widget.currentText()) if self.dialog.exec_(): fileNames = self.dialog.selectedFiles() self._value_widget.setEditText(fileNames[0]) def _on_help_toggled(self, checked): self.help_label.setVisible(checked) class ParameterDescription(object): ''' Used for internal representation of the parameter in dialog. ''' def __init__(self, name, msg_type, value=None, widget=None): self._name = str(name) self._type = msg_type self._value = None self._value_org = None self.read_only = False self.path_type = '' self.hint = '' self._min = None self._max = None self._tags = {} self._read_value(value) self._widget = widget try: self._base_type, self._is_array_type, self._array_length = roslib.msgs.parse_type(self._type) except Exception: pass if msg_type == 'binary': self._base_type = msg_type def _read_value(self, value): if isinstance(value, dict): for key, val in value.items(): if key.startswith(':'): if key == ':value': self._value = val self._value_org = val elif key == ':ro': self.read_only = val elif key == ':hint': self.hint = val elif key == ':path': self.path_type = val elif key == ':min': self._min = val elif key == ':max': self._max = val self._tags[key] = val else: self._value = value self._value_org = value def __repr__(self): return "%s [%s]: %s" % (self._name, self._type, utf8(self._value)) @classmethod def is_primitive_type(cls, value_type): result = value_type in roslib.msgs.PRIMITIVE_TYPES result = result or value_type in ['string', 'int', 'float', 'time', 'duration', 'binary', 'unicode'] return result def add_tag(self, key, value): self._tags[key] = value def origin_value(self): return self._value_org def clear_origin_value(self): self._value_org = None def changed(self): return utf8(self.origin_value()) != utf8(self._value) def name(self): return self._name def setWidget(self, widget): self._widget = widget if widget is not None: self.addCachedValuesToWidget() def widget(self): return self._widget def fullName(self): result = self.name() widget = self._widget while widget is not None: if isinstance(widget, (MainBox, GroupBox, ArrayBox)): result = roslib.names.ns_join(widget.name, result) widget = widget.parent() return result def isArrayType(self): # handle representation of `rosparam` return self._is_array_type or (self._type in ['[]']) def arrayLength(self): return self._array_length def isPrimitiveType(self): result = self.is_primitive_type(self._base_type) # if value is a string, the list is represented as a string, see `rosparam` result = result or self._type in ['[]'] return result def isTimeType(self): return self._base_type in ['time', 'duration'] def isBinaryType(self): return self._base_type in ['binary'] def baseType(self): return self._base_type def msgType(self): return self._type def updateValueFromField(self): if self.read_only: # do no change any values return result = self.widget().current_text() self._value = self.updateValue(result, raise_on_min_max=False) if self.changed(): nm.history().addParamCache(self.fullName(), self._value) def updateValue(self, value, raise_on_min_max=True): rvalue = value try: if isinstance(value, (dict, list)): rvalue = value elif value: if self.isArrayType(): if 'int' in self.baseType() or 'byte' in self.baseType(): rvalue = map(int, value.lstrip('[').rstrip(']').split(',')) elif 'float' in self.baseType(): rvalue = map(float, value.lstrip('[').rstrip(']').split(',')) elif 'bool' in self.baseType(): rvalue = map(str2bool, value.lstrip('[').rstrip(']').split(',')) elif self.isBinaryType(): rvalue = value else: try: rvalue = value.lstrip('[').rstrip(']') rvalue = ruamel.yaml.load("[%s]" % rvalue, Loader=ruamel.yaml.Loader) # if there is no YAML, load() will return an # empty string. We want an empty dictionary instead # for our representation of empty. if rvalue is None: rvalue = [] except ruamel.yaml.MarkedYAMLError as e: raise Exception("Field [%s] yaml error: %s" % (self.fullName(), utf8(e))) if self.arrayLength() is not None and self.arrayLength() != len(rvalue): raise Exception(''.join(["Field [", self.fullName(), "] has incorrect number of elements: ", utf8(len(rvalue)), " != ", str(self.arrayLength())])) else: if 'int' in self.baseType() or 'byte' in self.baseType(): rvalue = int(value) elif 'float' in self.baseType(): rvalue = float(value) elif 'bool' in self.baseType(): if isinstance(value, bool): rvalue = value else: rvalue = str2bool(value) elif self.isBinaryType(): rvalue = utf8(value) elif self.isTimeType(): if value == 'now': rvalue = 'now' else: try: val = eval(value) if isinstance(val, dict): rvalue = val else: secs = int(val) nsecs = int((val - secs) * 1000000000) rvalue = {'secs': secs, 'nsecs': nsecs} except Exception: rvalue = {'secs': 0, 'nsecs': 0} else: if sys.version_info[0] <= 2: rvalue = value.encode(sys.getfilesystemencoding()) else: rvalue = value else: if self.isArrayType(): arr = [] rvalue = arr else: if 'int' in self.baseType() or 'byte' in self.baseType(): rvalue = 0 elif 'float' in self.baseType(): rvalue = 0.0 elif 'bool' in self.baseType(): rvalue = False elif self.isBinaryType(): rvalue = utf8(value) elif self.isTimeType(): rvalue = {'secs': 0, 'nsecs': 0} else: rvalue = '' except Exception as e: raise Exception("Error while set value '%s', for '%s': %s" % (utf8(value), self.fullName(), utf8(e))) if self._min is not None: if rvalue < self._min: if raise_on_min_max: raise Exception("%s is smaller than minimum: %s" % (utf8(rvalue), utf8(self._min))) rvalue = self._min if self._max is not None: if rvalue > self._max: if raise_on_min_max: raise Exception("%s is greater than maximum: %s" % (utf8(rvalue), utf8(self._max))) rvalue = self._max return rvalue def value(self, with_tags=False): if not self.isPrimitiveType() and not self.widget() is None: return self.widget().value(with_tags) elif self.isPrimitiveType(): self.updateValueFromField() if with_tags: result = {} result.update(self._tags) result[':value'] = self._value return result return self._value def removeCachedValue(self, value): nm.history().removeParamCache(self.fullName(), value) def createTypedWidget(self, parent): result = None if self.isPrimitiveType(): result = ValueWidget(self, parent) else: if self.isArrayType(): result = ArrayBox(self.name(), self._type, dynamic=self.arrayLength() is None, parent=parent) else: result = GroupBox(self.name(), self._type, parent=parent) return result def addCachedValuesToWidget(self): if isinstance(self.widget(), ValueWidget): self.widget().add_cached_values() class MainBox(QFrame): ''' Groups the parameter without visualization of the group. It is the main widget. ''' def __init__(self, name, param_type, collapsible=True, parent=None): QFrame.__init__(self, parent) self.setObjectName(name) self.name = name self.type_msg = param_type self.params = [] self.collapsed = False self.parameter_description = None vLayout = QVBoxLayout(self) vLayout.setContentsMargins(1, 1, 1, 1) vLayout.setSpacing(1) self.param_widget = QFrame(self) self.collapsible = collapsible if collapsible: self.options_layout = QHBoxLayout() self.options_layout.setContentsMargins(1, 1, 1, 1) self.options_layout.setSpacing(1) self.hide_button = QPushButton('-') self.hide_button.setFlat(True) self.hide_button.setMaximumSize(20, 20) self.hide_button.clicked.connect(self._on_hide_clicked) self.name_label = QLabel(name) self.name_label.setTextInteractionFlags(Qt.TextSelectableByMouse) font = self.name_label.font() font.setBold(True) self.name_label.setFont(font) self.options_layout.addWidget(self.hide_button) self.options_layout.addWidget(self.name_label) self.type_label = QLabel('(%s)' % param_type) self.options_layout.addWidget(self.type_label) self.options_layout.addStretch() vLayout.addLayout(self.options_layout) self.param_widget.setFrameShape(QFrame.StyledPanel) self.param_widget.setFrameShadow(QFrame.Sunken) boxLayout = QFormLayout(self.param_widget) boxLayout.setContentsMargins(3, 3, 3, 3) boxLayout.setVerticalSpacing(1) vLayout.addWidget(self.param_widget) if param_type in ['std_msgs/Header']: self.setCollapsed(True) def setCollapsed(self, value): self.collapsed = value self.param_widget.setVisible(not value) self.hide_button.setText('+' if self.collapsed else '-') def _on_hide_clicked(self): self.setCollapsed(not self.collapsed) # self.param_widget.setVisible(not self.param_widget.isVisible()) # vis = self.param_widget.isVisible() # self.hide_button.setText('-' if vis else '+') def createFieldFromValue(self, value, clear_origin_value=False): self.setUpdatesEnabled(False) try: if isinstance(value, (dict, list)): self._createFieldFromDict(value, clear_origin_value=clear_origin_value) except Exception: print(traceback.format_exc()) finally: self.setUpdatesEnabled(True) def _createFieldFromDict(self, value, layout=None, clear_origin_value=False): if layout is None: layout = self.param_widget.layout() # sort the items: 1. header, 2. all primitives (sorted), 3. list, dict (sorted) all_params = [] primitives = [] komplex = [] for name, val in value.items(): _type = type(val).__name__ if isinstance(val, dict): if ':type' in val: _type = val[':type'] elif ':value' in val: _type = type(val[':value']).__name__ if _type == 'str': _type = 'string' if _type in ['std_msgs/Header']: all_params.append((name, _type, val)) elif ParameterDescription.is_primitive_type(_type): primitives.append((name, _type, val)) else: komplex.append((name, _type, val)) all_params.extend(sorted(primitives)) all_params.extend(sorted(komplex)) # create widgets for name, _type, val in all_params: if name.startswith(':'): continue # search for existing field field = self.getField(name) if field is None: # add parameter object first param_desc = ParameterDescription(name, _type, val) # create widget for parameter field = param_desc.createTypedWidget(self) if clear_origin_value: param_desc.clear_origin_value() param_desc.setWidget(field) self.params.append(param_desc) if isinstance(field, (GroupBox, ArrayBox)): field.createFieldFromValue(val[':value'] if ':value' in val else val, clear_origin_value) layout.addRow(field) else: # we have e simple parameter, create label for it label_name = name if _type in ['string', 'str', 'unicode', 'bool'] else '%s (%s)' % (name, _type) label = QLabel(label_name, self) label.setObjectName('%s_label' % name) label.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)) label.setTextInteractionFlags(Qt.TextSelectableByMouse) hint = field.toolTip() if hint: label.setToolTip(hint) # whatsThis destroys the layout # label.whatsThis(hint) label.setBuddy(field) layout.addRow(label, field) else: # field exists already -> update groups or arrays if isinstance(field, (GroupBox, ArrayBox)): field.createFieldFromValue(val[':value'] if ':value' in val else val, clear_origin_value) else: raise Exception("Parameter with name '%s' already exists!" % name) def value(self, with_tags=False, only_changed=False): result = dict() for param in self.params: if not param.isBinaryType(): if param.isPrimitiveType(): val = param.value(with_tags=with_tags) if param.changed() or not only_changed: result[param.name()] = val else: val = param.value(with_tags=with_tags) if val or not only_changed: result[param.name()] = val return result def set_values(self, values): ''' Sets the values for existing fields. Used e.g. while load parameter from file :param dict values: the dictionary with values to set. :raise Exception: on errors ''' if isinstance(values, dict): for param, val in values.items(): value = val _type = 'unknown' if isinstance(val, tuple): # backward compatibility (_type, value) = val elif isinstance(val, dict): if ':value' in val: value = val[':value'] if ':type' in val: _type = val[':type'] field = self.getField(param) if field is not None: if isinstance(field, (GroupBox, ArrayBox)): field.set_values(value) else: field.set_value(value) elif isinstance(values, list): raise Exception("Setting 'list' values in MainBox or GroupBox not supported!!!") def getField(self, name, recursive=False): for child in self.children(): for c in child.children(): if recursive and isinstance(c, MainBox): result = c.getField(name, recursive=recursive) if result is not None: return result elif c.objectName() == name: return c return None def removeAllFields(self): ''' Remove the references between parameter and corresponding widgets (ComboBox, CheckBox, ..) and remove these widgets from layouts. ''' for child in self.param_widget.children(): if isinstance(child, MyComboBox): child.parameter_description.setWidget(None) self.params.remove(child.parameter_description) elif isinstance(child, MainBox): child.removeAllFields() self.param_widget.layout().removeWidget(child) def filter(self, arg, force_show=False): ''' Hide the parameter input field, which label dosn't contains the C{arg}. :param str arg: the filter text :param bool force_show: override filter, if group is shown ''' result = False for child in self.param_widget.children(): if isinstance(child, (MainBox, GroupBox, ArrayBox)): show = force_show or not arg if not show: show = child.objectName().lower().find(arg) != -1 show = child.filter(arg, force_show=show) or show # hide group, if no parameter are visible child.setVisible(show) if show: child.setCollapsed(False) result = True elif isinstance(child, ValueWidget): label = child.parentWidget().layout().labelForField(child) if label is not None: show = force_show or not arg if not show: show = child.current_text().lower().find(arg) != -1 or label.text().lower().find(arg) != -1 # set the parent group visible if it is not visible if show and not child.parentWidget().isVisible(): child.parentWidget().setVisible(show) label.setVisible(show) child.setVisible(show) if show: result = True return result def setVisible(self, arg): if arg and not self.parentWidget() is None and not self.parentWidget().isVisible(): self.parentWidget().setVisible(arg) QWidget.setVisible(self, arg) class GroupBox(MainBox): ''' Groups the parameter of a dictionary, struct or class using the group box for visualization. ''' def __init__(self, name, param_type, parent=None): MainBox.__init__(self, name, param_type, True, parent) self.setObjectName(name) class ArrayEntry(MainBox): ''' A part of the ArrayBox to represent the elements of a list. ''' def __init__(self, index, param_type, parent=None): MainBox.__init__(self, '#%s' % utf8(index), param_type, True, parent) self.index = index self.setObjectName(''.join(['[', utf8(index), ']'])) self.param_widget.setFrameShape(QFrame.Box) self.param_widget.setFrameShadow(QFrame.Plain) self.type_label.setVisible(False) # boxLayout = QFormLayout() # boxLayout.setVerticalSpacing(0) # label = QLabel(''.join(['[', str(index), ']'])) # self.param_widget.layout().addRow(label) # self.setLayout(boxLayout) def value(self, with_tags=False, only_changed=False): ''' Retruns a dictionary for an entry of an array, e.g. {name: value}. If with_tags is True it looks like: {name: {':value': value, ':type': type}} :rtype: dict ''' result = dict() for param in self.params: val = param.value(with_tags) if val or not only_changed: result[param.name()] = val return result class ArrayBox(MainBox): ''' Groups the parameter of a list. ''' def __init__(self, name, param_type, dynamic, parent=None): MainBox.__init__(self, name, param_type, True, parent) self._is_dynamic = dynamic self._dynamic_value = None self._dynamic_widget = None self._dynamic_items_count = 0 def addDynamicBox(self): self._dynamic_items_count = 0 addButton = QPushButton("+") addButton.setMaximumSize(25, 25) addButton.clicked.connect(self._on_add_dynamic_entry) self.options_layout.addWidget(addButton) self.count_label = QLabel('0') self.options_layout.addWidget(self.count_label) remButton = QPushButton("-") remButton.setMaximumSize(25, 25) remButton.clicked.connect(self._on_rem_dynamic_entry) self.options_layout.addWidget(remButton) def _on_add_dynamic_entry(self, checked=False, value=None): self.setUpdatesEnabled(False) try: val = value if val is None: val = self._dynamic_value if val is not None: self._create_dynamic_frame(val) finally: self.setUpdatesEnabled(True) def _create_dynamic_frame(self, value): entry_frame = ArrayEntry(self._dynamic_items_count, self.type_msg) self.param_widget.layout().addRow(entry_frame) entry_frame._createFieldFromDict(value) self._dynamic_items_count += 1 self.count_label.setText(utf8(self._dynamic_items_count)) def _on_rem_dynamic_entry(self): if self._dynamic_items_count > 0: self._dynamic_items_count -= 1 item = self.param_widget.layout().takeAt(self._dynamic_items_count) self.param_widget.layout().removeItem(item) try: # remove the referenced parameter, too for child in item.widget().children(): if isinstance(child, MyComboBox): child.parameter_description.setWidget(None) self.params.remove(child.parameter_description) elif isinstance(child, MainBox): child.removeAllFields() self.param_widget.layout().removeWidget(child) child.parameter_description.setWidget(None) self.params.remove(child.parameter_description) item.widget().setParent(None) del item except Exception: print(traceback.format_exc(3)) self.count_label.setText(utf8(self._dynamic_items_count)) def createFieldFromValue(self, value, clear_origin_value=False): self.setUpdatesEnabled(False) try: if self._is_dynamic: self.addDynamicBox() # Set value used to add dynamic array fields. # On republish there is an array filled array. So only last enry will be used on add new entry. if isinstance(value, list): if value: self._dynamic_value = value[-1] else: self._dynamic_value = value self.set_values(value) except Exception: print(traceback.format_exc()) finally: self.setUpdatesEnabled(True) def value(self, with_tags=False, only_changed=False): ''' Goes through the list and creates dictionary with values of each element. Returns a list with dictionaries, e.g. [{name: value}, {name: value}]. If with_tags is True the result is a dictionary, e.g. {':type': type[], ':value': [{name: value}, {name: value}]} :rtype: list or dict, if with_tags==True ''' result_list = list() for i in range(self.param_widget.layout().rowCount()): item = self.param_widget.layout().itemAt(i, QFormLayout.SpanningRole) if item and isinstance(item.widget(), ArrayEntry): value = item.widget().value(with_tags=with_tags, only_changed=only_changed) result_list.append(value) result = result_list if with_tags: result = {} result[':type'] = self.type_msg result[':value'] = result_list return result def set_values(self, values): ''' Create a list of the elements and sets their values. :param list values: The list of dictionaries with parameter values ''' if isinstance(values, list): count_entries = 0 # determine the count of existing elements for i in range(self.param_widget.layout().rowCount()): item = self.param_widget.layout().itemAt(i, QFormLayout.SpanningRole) if item and isinstance(item.widget(), ArrayEntry): count_entries += 1 # create the list of the elements of the length of values if count_entries < len(values): for i in range(len(values) - count_entries): # use array entry self._on_add_dynamic_entry(value=values[i]) elif count_entries > len(values): for i in range(count_entries - len(values)): self._on_rem_dynamic_entry() # set the values for i in range(self.param_widget.layout().rowCount()): item = self.param_widget.layout().itemAt(i, QFormLayout.SpanningRole) if item and isinstance(item.widget(), ArrayEntry): item.widget().set_values(values[i]) class ScrollArea(QScrollArea): ''' ScrollArea provides the maximal width of the internal widget. ''' def viewportEvent(self, arg): if self.widget() and self.viewport().size().width() != self.widget().maximumWidth(): self.widget().setMaximumWidth(self.viewport().size().width()) return QScrollArea.viewportEvent(self, arg) class ParameterDialog(QDialog): ''' This dialog creates an input mask for the given parameter and their types. ''' def __init__(self, params=dict(), buttons=QDialogButtonBox.Cancel | QDialogButtonBox.Ok, sidebar_var='', parent=None, store_geometry=''): ''' Creates an input dialog. :param dict params: a (recursive) dictionary with parameter names and their values. A value can be of primitive type (int, bool, string), a list or dictionary. If it is of list type, the list should contains dictionaries with parameter and values. If value is of dictionary type it is a recursive include or value with tags. If it is a recursive include a group will be created. The key is the name of the group. If it is a value with tags it should contains at least a ':value' tag. All attributes begin with ':'. Other key attributes: -':type': type, overwrites the autodetection -':ro': read only -':hint': description of the parameter -':default': default value -':min': minimum value -':max': maximum value -':alt': a list of alternative values -'path': 'dir' or 'file' :param str sidebar_var: the name of the key in first level of params. Creates a sidebar if it is not empty. Cached and alternative values are used to fill the sidebar. ''' QDialog.__init__(self, parent=parent) self.setObjectName('ParameterDialog - %s' % utf8(params)) self.__current_path = nm.settings().current_dialog_path self.horizontalLayout = QHBoxLayout(self) self.horizontalLayout.setObjectName("horizontalLayout") self.horizontalLayout.setContentsMargins(0, 0, 0, 0) self.horizontalLayout.setSpacing(0) self.verticalLayout = QVBoxLayout() self.verticalLayout.setObjectName("verticalLayout") self.verticalLayout.setContentsMargins(3, 3, 3, 3) # add filter row self.filter_field = EnhancedLineEdit(self) self.filter_field.setPlaceholderText("filter") self.filter_field.textChanged.connect(self._on_filter_changed) self.filter_visible = True self.verticalLayout.addWidget(self.filter_field) # create area for the parameter self.scrollArea = scrollArea = ScrollArea(self) scrollArea.setObjectName("scrollArea") self.content = MainBox('/', 'string', False, self) scrollArea.setFrameStyle(QFrame.NoFrame) scrollArea.setWidget(self.content) scrollArea.setWidgetResizable(True) self.verticalLayout.addWidget(scrollArea) # add info text field self.info_field = QTextEdit(self) palette = QPalette() brush = QBrush(QColor(255, 254, 242)) brush.setStyle(Qt.SolidPattern) palette.setBrush(QPalette.Active, QPalette.Base, brush) brush = QBrush(QColor(255, 254, 242)) brush.setStyle(Qt.SolidPattern) palette.setBrush(QPalette.Inactive, QPalette.Base, brush) brush = QBrush(QColor(244, 244, 244)) brush.setStyle(Qt.SolidPattern) palette.setBrush(QPalette.Disabled, QPalette.Base, brush) self.info_field.setPalette(palette) self.info_field.setFrameShadow(QFrame.Plain) self.info_field.setReadOnly(True) self.info_field.setTextInteractionFlags(Qt.LinksAccessibleByKeyboard | Qt.LinksAccessibleByMouse | Qt.TextBrowserInteraction | Qt.TextSelectableByKeyboard | Qt.TextSelectableByMouse) self.info_field.setObjectName("dialog_info_field") self.verticalLayout.addWidget(self.info_field) self.info_field.setVisible(False) # create buttons self.buttonBox = QDialogButtonBox(self) self.buttonBox.setObjectName("buttonBox") self.buttonBox.setOrientation(Qt.Horizontal) self.buttonBox.setStandardButtons(buttons) self.buttonBox.accepted.connect(self.accept) self.buttonBox.rejected.connect(self.reject) self.verticalLayout.addWidget(self.buttonBox) self.horizontalLayout.addLayout(self.verticalLayout) # add side bar for checklist values = nm.history().cachedParamValues('/%s' % sidebar_var) self.sidebar_frame = QFrame(self) self.sidebar_frame.setObjectName(sidebar_var) sidebarframe_verticalLayout = QVBoxLayout(self.sidebar_frame) sidebarframe_verticalLayout.setObjectName("sidebarframe_verticalLayout") sidebarframe_verticalLayout.setContentsMargins(3, 3, 3, 3) self._sidebar_selected = 0 if len(values) > 0 and sidebar_var in params: self.horizontalLayout.addWidget(self.sidebar_frame) try: if ':value' in params[sidebar_var]: self.sidebar_default_val = params[sidebar_var][':value'] else: self.sidebar_default_val = params[sidebar_var][1] # add default value to sidebar if self.sidebar_default_val and self.sidebar_default_val not in values: values.append(self.sidebar_default_val) except Exception: self.sidebar_default_val = '' values.sort() for v in values: checkbox = QCheckBox(v) checkbox.setObjectName(v) checkbox.stateChanged.connect(self._on_sidebar_stateChanged) self.sidebar_frame.layout().addWidget(checkbox) self.sidebar_frame.layout().addItem(QSpacerItem(100, 20, QSizePolicy.Minimum, QSizePolicy.Expanding)) # set the input fields if params: try: self.content.createFieldFromValue(params) self.setInfoActive(False) except Exception: print(traceback.format_exc()) if self.filter_field.isVisible(): self.filter_field.setFocus() # restore from configuration file self._geometry_name = store_geometry if store_geometry and nm.settings().store_geometry: settings = nm.settings().qsettings(nm.settings().CFG_GUI_FILE) self._history_selected_robot = settings.value("selected_robot", '') settings.beginGroup(store_geometry) self.resize(settings.value("size", QSize(600, 300))) pos = settings.value("pos", QPoint(0, 0)) if pos.x() != 0 and pos.y() != 0: self.move(pos) settings.endGroup() def __del__(self): self.content.removeAllFields() def _on_sidebar_stateChanged(self, state): if state == Qt.Checked: self._sidebar_selected += 1 elif state == Qt.Unchecked: self._sidebar_selected -= 1 if self._sidebar_selected in [0, 1]: try: field = self.content.getField(self.sidebar_frame.objectName()) if field is not None and field.currentText() == self.sidebar_default_val: field.setEnabled(True if self._sidebar_selected == 0 else False) except Exception: pass def showLoadSaveButtons(self): self.load_button = QPushButton() self.load_button.setIcon(nm.settings().icon('load.png')) self.load_button.clicked.connect(self._load_parameter) self.load_button.setToolTip('Load parameters from YAML file') self.load_button.setFlat(True) self.buttonBox.addButton(self.load_button, QDialogButtonBox.ActionRole) self.save_button = QPushButton() self.save_button.clicked.connect(self._save_parameter) self.save_button.setIcon(nm.settings().icon('save.png')) self.save_button.setToolTip('Save parameters to YAML file') self.save_button.setFlat(True) self.buttonBox.addButton(self.save_button, QDialogButtonBox.ActionRole) def _on_filter_changed(self): self.content.filter(self.filter_field.text().lower()) def setFilterVisible(self, val): ''' Shows or hides the filter row. ''' self.filter_visible = val self.filter_field.setVisible(val & self.scrollArea.isHidden()) def add_warning(self, message): label = QLabel(self) label.setWordWrap(True) label.setTextInteractionFlags(Qt.TextSelectableByMouse) label.setText(''.join(["<font color='red'>Warning!\n", message, "</font>"])) self.verticalLayout.insertWidget(1, label) def setText(self, text): ''' Adds a label to the dialog's layout and shows the given text. :param str text: the text to add to the dialog ''' self.info_field.setText(text) self.setInfoActive(True) def setInfoActive(self, val): ''' Activates or deactivates the info field of this dialog. If info field is activated, the filter frame and the input field are deactivated. :param bool val: state ''' if val and self.info_field.isHidden(): self.filter_field.setVisible(False & self.filter_visible) self.scrollArea.setVisible(False) self.info_field.setVisible(True) elif not val and self.scrollArea.isHidden(): self.filter_field.setVisible(True & self.filter_visible) self.scrollArea.setVisible(True) self.info_field.setVisible(False) if self.filter_field.isVisible(): self.filter_field.setFocus() def setFocusField(self, field_label): field = self.content.getField(field_label, recursive=True) if field is not None: field.setFocus() def getKeywords(self, only_changed=False, with_tags=False): ''' :param bool only_changed: returns changed parameter only (Defaul: False) :param bool with_tags: returns parameter attributes (e.g. :ro, :hint,...) (Defaul: False) :returns a directory with parameter and value for entered fields. :rtype: dict ''' # get the results of sidebar sidebar_list = [] sidebar_name = self.sidebar_frame.objectName() for j in range(self.sidebar_frame.layout().count() - 1): w = self.sidebar_frame.layout().itemAt(j).widget() if isinstance(w, QCheckBox): if w.checkState() == Qt.Checked: sidebar_list.append(w.objectName()) result_value = self.content.value(with_tags, only_changed) # add the sidebar results if sidebar_name in result_value: # skip the default value, if elements are selected in the side_bar sidebar_value = '' if with_tags: sidebar_value = result_value[sidebar_name][':value'] else: sidebar_value = result_value[sidebar_name] if len(sidebar_list) == 0 or self.sidebar_default_val != sidebar_value: sidebar_list.append(sidebar_value) if with_tags: result_value[sidebar_name][':value'] = [v for v in set(sidebar_list)] else: result_value[sidebar_name] = [v for v in set(sidebar_list)] return result_value def keywords2params(self, keywords): ''' Resolves the dictionary values to ROS parameter names. :param keywords: the result of the getKeywords :return: dictionary of (ROS parameter name : value) ''' result = dict() for param, value in keywords.items(): if isinstance(value, dict): r = self.keywords2params(value) for p, v in r.items(): result[roslib.names.ns_join(param, p)] = v else: result[param] = value return result @classmethod def remove_attributes(cls, keywords): # it it is a value dictionary, we need only :value attribute if ':value' in keywords: return keywords[':value'] # remove all attributes which starts with ':' result = {} for key, val in keywords.items(): clean_val = val if isinstance(val, dict): clean_val = cls.remove_attributes(val) if not key.startswith(':'): result[key] = clean_val return result def _save_parameter(self): try: (fileName, _) = QFileDialog.getSaveFileName(self, "Save parameter", self.__current_path, "YAML files (*.yaml);;All files (*)") if fileName: self.__current_path = os.path.dirname(fileName) nm.settings().current_dialog_path = os.path.dirname(fileName) content = self.content.value(with_tags=True) buf = ruamel.yaml.compat.StringIO() ruamel.yaml.dump(content, buf, Dumper=ruamel.yaml.RoundTripDumper) with open(fileName, 'w+') as f: f.write(buf.getvalue()) except Exception as e: print(traceback.format_exc(3)) MessageBox.warning(self, "Save parameter Error", 'Error while save parameter', utf8(e)) def _load_parameter(self): try: (fileName, _) = QFileDialog.getOpenFileName(self, "Load parameter", self.__current_path, "YAML files (*.yaml);;All files (*)") if fileName: self.__current_path = os.path.dirname(fileName) nm.settings().current_dialog_path = os.path.dirname(fileName) with open(fileName, 'r') as f: # print yaml.load(f.read()) self.content.set_values(ruamel.yaml.load(f.read(), Loader=ruamel.yaml.Loader)) except Exception as e: print(traceback.format_exc()) MessageBox.warning(self, "Load parameter Error", 'Error while load parameter', utf8(e)) # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # %%%%%%%%%%%%%%%%%% close handling %%%%%%%%%%%%%%%%%%%%% # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% def _store_geometry(self): if self._geometry_name: settings = nm.settings().qsettings(nm.settings().CFG_GUI_FILE) settings.beginGroup(self._geometry_name) settings.setValue("size", self.size()) settings.setValue("pos", self.pos()) settings.endGroup() def accept(self): self._store_geometry() self.setResult(QDialog.Accepted) self.accepted.emit() if self.isModal(): self.hide() def reject(self): self._store_geometry() self.setResult(QDialog.Rejected) self.rejected.emit() self.hide() def hideEvent(self, event): self.close() def closeEvent(self, event): ''' Test the open files for changes and save this if needed. ''' self.setAttribute(Qt.WA_DeleteOnClose, True) if self.result() == QDialog.Accepted: event.setAccepted(False) QDialog.closeEvent(self, event) class MasterParameterDialog(ParameterDialog): ''' This dialog is an extension to the L{ParameterDialog}. The parameter and their values are requested from the ROS master parameter server. The requests are threaded and allows the also threaded changed of ROS parameter assigned to given namespace. ''' def __init__(self, masteruri, ns='/', parent=None, store_geometry=''): ''' :param str masteruri: if the master uri is not None, the parameter are retrieved from ROS parameter server. :param str ns: namespace of the parameter retrieved from the ROS parameter server. ''' ParameterDialog.__init__(self, dict(), parent=parent, store_geometry=store_geometry) self.masteruri = masteruri self.ns = ns self.is_delivered = False self.is_send = False self.mIcon = nm.settings().icon('default_cfg.png') self.setWindowIcon(self.mIcon) # self.resize(450, 300) self.add_new_button = QPushButton() self.add_new_button.setIcon(nm.settings().icon('crystal_clear_add.png')) self.add_new_button.clicked.connect(self._on_add_parameter) self.add_new_button.setToolTip('Adds a new parameter to the list') self.add_new_button.setFlat(True) self.buttonBox.addButton(self.add_new_button, QDialogButtonBox.ActionRole) self.showLoadSaveButtons() # self.apply_button = QPushButton(self.tr("&Ok")) # self.apply_button.clicked.connect(self._on_apply) # self.buttonBox.addButton(self.apply_button, QDialogButtonBox.ApplyRole) # self.buttonBox.accepted.connect(self._on_apply) self.setText(' '.join(['Obtaining parameters from the parameter server', masteruri, '...'])) self.parameterHandler = ParameterHandler() self.parameterHandler.parameter_list_signal.connect(self._on_param_list) self.parameterHandler.parameter_values_signal.connect(self._on_param_values) self.parameterHandler.delivery_result_signal.connect(self._on_delivered_values) self.parameterHandler.requestParameterList(masteruri, ns) # self.apply_button.setFocus(Qt.OtherFocusReason) def accept(self): if self.masteruri is not None and not self.is_send: try: params = self.getKeywords(True) params = self.keywords2params(params) ros_params = dict() for p, v in params.items(): rospy.logdebug("updated parameter: %s, %s, %s", p, utf8(v), type(v)) ros_params[roslib.names.ns_join(self.ns, p)] = v if ros_params: self.is_send = True self.setText('Sends parameters to the server...') self.parameterHandler.deliverParameter(self.masteruri, ros_params) else: self.close() except Exception as e: print(traceback.format_exc(3)) MessageBox.warning(self, self.tr("Warning"), utf8(e)) elif self.masteruri is None: MessageBox.warning(self, self.tr("Error"), 'Invalid ROS master URI') # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # %%%%%%%%%%%%%%%%%% ROS parameter handling %%%%%%%%%%%%%%%%%%%%% # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% def _on_add_parameter(self): params_arg = {'namespace': {':type': 'string', ':value': self.ns}, 'name': {':type': 'string', ':value': ''}, 'type': {':type': 'string', ':value': ['string', 'int', 'float', 'bool', 'list']}, 'value': {':type': 'string', ':value': ''} } dia = ParameterDialog(params_arg, store_geometry='add_parameter_in_master_dialog') dia.setWindowTitle('Add new parameter') dia.setFilterVisible(False) if dia.exec_(): try: params = dia.getKeywords() if params['name']: if params['type'] == 'int': value = int(params['value']) elif params['type'] == 'float': value = float(params['value']) elif params['type'] == 'bool': value = str2bool(params['value']) elif params['type'] == 'list': try: value = ruamel.yaml.load("[%s]" % params['value'], Loader=ruamel.yaml.Loader) # if there is no YAML, load() will return an # empty string. We want an empty dictionary instead # for our representation of empty. if value is None: value = [] except ruamel.yaml.MarkedYAMLError as e: MessageBox.warning(self, self.tr("Warning"), "yaml error: %s" % utf8(e)) else: value = params['value'] self._on_param_values(self.masteruri, 1, '', {roslib.names.ns_join(params['namespace'], params['name']): (1, '', value)}, new_param=True) else: MessageBox.warning(self, self.tr("Warning"), 'Empty name is not valid!') except ValueError as e: print(traceback.format_exc(3)) MessageBox.warning(self, self.tr("Warning"), utf8(e)) def _on_param_list(self, masteruri, code, msg, params): ''' :param str masteruri: The URI of the ROS parameter server :param int code: The return code of the request. If not 1, the message is set and the list can be ignored. :param str msg: The message of the result. :param [str] params: The list the parameter names. ''' if code == 1: params.sort() self.parameterHandler.requestParameterValues(masteruri, params) else: self.setText(msg) def _on_param_values(self, masteruri, code, msg, params, new_param=False): ''' :param str masteruri: The URI of the ROS parameter server :param int code: The return code of the request. If not 1, the message is set and the list can be ignored. :param str msg: The message of the result. :param params: The dictionary the parameter names and request result. :type params: dict(paramName : (code, statusMessage, parameterValue)) ''' if code == 1: dia_params = dict() for p, (code_n, _, val) in params.items(): # _:=msg_n if code_n != 1: val = '' type_str = 'string' value = utf8(val) if isinstance(val, bool): type_str = 'bool' elif isinstance(val, int): type_str = 'int' elif isinstance(val, float): type_str = 'float' elif isinstance(val, list) or isinstance(val, dict): # handle representation of `rosparam` type_str = 'list' value = '' for v in val: if len(value) > 0: value = value + ', ' value = value + utf8(v) elif isinstance(val, xmlrpcclient.Binary): type_str = 'binary' param = p.replace(self.ns, '') param = param.strip(roslib.names.SEP) names_sep = param.split(roslib.names.SEP) param_name = names_sep.pop() if names_sep: group = dia_params for n in names_sep: group_name = n if group_name in group: group = group[group_name] else: tmp_dict = dict() group[group_name] = tmp_dict group = tmp_dict group[param_name] = {':type': type_str, ':value': value} else: dia_params[param_name] = {':type': type_str, ':value': value} try: self.content.createFieldFromValue(dia_params, clear_origin_value=new_param) self.setInfoActive(False) except Exception as e: print(traceback.format_exc(3)) MessageBox.warning(self, self.tr("Warning"), utf8(e)) else: self.setText(msg) def _on_delivered_values(self, masteruri, code, msg, params): ''' :param str masteruri: The URI of the ROS parameter server :param int code: The return code of the request. If not 1, the message is set and the list can be ignored. :param str msg: The message of the result. :param params: The dictionary the parameter names and request result. :type params: dict(paramName : (code, statusMessage, parameterValue)) ''' self.is_delivered = True errmsg = '' if code == 1: for _, (code_n, msg, _) in params.items(): # _:=param, val if code_n != 1: errmsg = '\n'.join([errmsg, msg]) else: errmsg = msg if msg else 'Unknown error on set parameter' if errmsg: print(traceback.format_exc(2)) MessageBox.warning(self, self.tr("Warning"), utf8(errmsg)) self.is_delivered = False self.is_send = False self.setInfoActive(False) if self.is_delivered: self.close() class ServiceDialog(ParameterDialog): ''' Adds a support for calling a service to the L{ParameterDialog}. The needed input fields are created from the service request message type. The service call is executed in a thread to avoid blocking GUI. ''' service_resp_signal = Signal(str, str) def __init__(self, service, parent=None): ''' :param service: Service to call. :type service: U{fkie_master_discovery.ServiceInfo<http://docs.ros.org/kinetic/api/fkie_master_discovery/html/modules.html#fkie_master_discovery.master_info.ServiceInfo>} ''' self.service = service slots = service.get_service_class(True)._request_class.__slots__ types = service.get_service_class()._request_class._slot_types ParameterDialog.__init__(self, self._params_from_slots(slots, types), buttons=QDialogButtonBox.Close, parent=parent, store_geometry='service_call_dialog') self.setWindowTitle('Call %s' % service.name) self.service_resp_signal.connect(self._handle_resp) # self.resize(450, 300) if not slots: self.setText(''.join(['Wait for response ...'])) thread = threading.Thread(target=self._callService) thread.setDaemon(True) thread.start() else: self.call_service_button = QPushButton(self.tr("&Call")) self.call_service_button.clicked.connect(self._on_call_service) self.buttonBox.addButton(self.call_service_button, QDialogButtonBox.ActionRole) self.hide_button = QPushButton(self.tr("&Hide/Show output")) self.hide_button.clicked.connect(self._on_hide_output) self.buttonBox.addButton(self.hide_button, QDialogButtonBox.ActionRole) self.hide_button.setVisible(False) self.showLoadSaveButtons() def _on_hide_output(self): self.setInfoActive(not self.info_field.isVisible()) def _on_call_service(self): try: self.hide_button.setVisible(True) params = self.getKeywords() self.setText(''.join(['Wait for response ...'])) thread = threading.Thread(target=self._callService, args=((params,))) thread.setDaemon(True) thread.start() except Exception as e: rospy.logwarn("Error while reading parameter for %s service: %s", utf8(self.service.name), utf8(e)) self.setText(''.join(['Error while reading parameter:\n', utf8(e)])) def _callService(self, params={}): req = utf8(params) if params else '' try: req, resp = nm.starter().callService(self.service.uri, self.service.name, self.service.get_service_class(), [params]) self.service_resp_signal.emit(utf8(repr(req)), utf8(repr(resp))) except Exception as e: print(traceback.format_exc(2)) rospy.logwarn("Error while call service '%s': %s", utf8(self.service.name), utf8(e)) self.service_resp_signal.emit(utf8(repr(req)), utf8(e)) @classmethod def _params_from_slots(cls, slots, types, values={}): result = dict() for slot, msg_type in zip(slots, types): base_type, is_array, _array_length = roslib.msgs.parse_type(msg_type) if base_type in roslib.msgs.PRIMITIVE_TYPES or base_type in ['time', 'duration']: default_value = 'now' if base_type in ['time', 'duration'] else '' if slot in values and values[slot]: default_value = values[slot] result[slot] = {':type': msg_type, ':value': default_value} else: try: list_msg_class = roslib.message.get_message_class(base_type) if is_array and slot in values: subresult = [] for slot_value in values[slot]: subvalue = cls._params_from_slots(list_msg_class.__slots__, list_msg_class._slot_types, slot_value if slot in values and slot_value else {}) subresult.append(subvalue) result[slot] = {':value': subresult, ':type': msg_type} else: subresult = cls._params_from_slots(list_msg_class.__slots__, list_msg_class._slot_types, values[slot] if slot in values and values[slot] else {}) if is_array: result[slot] = {':value': subresult, ':type': msg_type} else: subresult[':type'] = msg_type result[slot] = subresult except ValueError as e: print(traceback.format_exc()) rospy.logwarn("Error while parse message type '%s': %s", utf8(msg_type), utf8(e)) return result def _handle_resp(self, req, resp): self.setWindowTitle(''.join(['Request / Response of ', self.service.name])) # replace some of Escape Characters resp_str = utf8(resp).replace('\\r\\n', '\n') resp_str = resp_str.replace('\\n', '\n') resp_str = resp_str.replace('\\t', '\t') resp_str = resp_str.replace('\\v', '\v') self.setText('\n'.join([utf8(req), '---', resp_str]))
looper.py
import threading import time class Looper: def __init__(self, func, interval=1, daemon=True, *args, **kwargs): super().__init__() self.func = None self.interval = None self.running = False self.thread = None self.daemon = daemon self.set_interval(interval=interval) self.args = args self.kwargs = kwargs self.set_func(func=func) def make_thread(self, func, *args, **kwargs): self.thread = threading.Thread(target=self.loop, args=(func, *args), kwargs=kwargs, daemon=self.daemon) return self.thread def set_interval(self, interval): self.interval = interval def set_func(self, func): self.func = func def loop(self, func, *args, **kwargs): try: while self.running: time.sleep(self.interval) func(*args, **kwargs) except Exception as e: raise e finally: self.running = False def is_alive(self): return self.thread.is_alive() if self.thread else False def is_running(self): return self.running def start(self): if self.is_running() or self.is_alive(): raise Exception('Thread is already running') self.running = True self.make_thread(func=self.func, *self.args, **self.kwargs) return self.thread.start() def restart(self): if self.is_running() or self.is_alive(): self.stop() return self.start() def stop(self): self.running = False return self.thread.join()
basic_multiprocessing.py
""" "Multiprocessing" section example showing how to create new processes with `multiprocessing` module """ from multiprocessing import Process import os def work(identifier): print(f"Hey, I am the process " f"{identifier}, pid: {os.getpid()}") def main(): processes = [Process(target=work, args=(number,)) for number in range(5)] for process in processes: process.start() while processes: processes.pop().join() if __name__ == "__main__": main()
vfs.py
# Adapted from imutils https://github.com/jrosebr1/imutils from queue import Queue import sys from threading import Thread import time from cv2 import VideoCapture, CAP_PROP_POS_FRAMES class FileVideoStream: def __init__(self, path, start, length, queue_size=128): # initialize the file video stream along with the boolean # used to indicate if the thread should be stopped or not self.stream = VideoCapture(str(path)) self.startFrame = start self.videoLength = length self.stream.set(CAP_PROP_POS_FRAMES, self.startFrame) self.stopped = False # initialize the queue used to store frames read from # the video file self.Q = Queue(maxsize=queue_size) self.currentNumber = 0 # intialize thread self.thread = Thread(target=self.update, args=()) self.thread.daemon = True def start(self): # start a thread to read frames from the file video stream self.thread.start() return self def update(self): # keep looping infinitely while True: # if the thread indicator variable is set, stop the # thread if self.stopped: break # otherwise, ensure the queue has room in it if not self.Q.full(): # read the next frame from the file (grabbed, frame) = self.stream.read() # if the `grabbed` boolean is `False`, then we have # reached the end of the video file if not grabbed: self.stopped = True # add the frame to the queue self.Q.put(frame) self.currentNumber += 1 if self.currentNumber >= self.videoLength: self.stream.set(CAP_PROP_POS_FRAMES, self.startFrame) self.currentNumber = 0 else: time.sleep(0.1) # Rest for 10ms, we have a full queue self.stream.release() def read(self): # return next frame in the queue frame = self.Q.get() return frame # Insufficient to have consumer use while(more()) which does # not take into account if the producer has reached end of # file stream. def running(self): return self.more() or not self.stopped def more(self): # return True if there are still frames in the queue. If stream is not stopped, try to wait a moment tries = 0 while self.Q.qsize() == 0 and not self.stopped and tries < 5: time.sleep(0.1) tries += 1 return self.Q.qsize() > 0 def clear(self): with self.Q.mutex: self.Q.queue.clear() def stop(self): # indicate that the thread should be stopped self.stopped = True # wait until stream resources are released (producer thread might be still grabbing frame) self.thread.join()
test.py
import tempfile import cmmap from multiprocessing import Process def test_file(): fd, filename = tempfile.mkstemp() f = open(filename, 'w') data = 'foobarbing' f.write(data) f.close() f = open(filename, 'r+') m = cmmap.mmap(prot=cmmap.PROT_READ, length=len(data), flags=cmmap.MAP_SHARED, fd=f.fileno()) assert m[:] == data def test_anonymous_private(): data = 'foobarding' m = cmmap.mmap(length=10, prot=cmmap.PROT_READ|cmmap.PROT_WRITE, flags=cmmap.MAP_ANONYMOUS|cmmap.MAP_PRIVATE) m[:] = data assert m[:] == data def test_anonymous_shared(): data = 'foobarding' m = cmmap.mmap(length=10, prot=cmmap.PROT_READ|cmmap.PROT_WRITE, flags=cmmap.MAP_ANONYMOUS|cmmap.MAP_SHARED) m[:] = data assert m[:] == data def f(): assert m[:] == data m[0] = 'd' proc = Process(target=f) proc.start() proc.join() assert m[:] == 'doobarding' def test_anonymous_private(): data = 'foobarding' m = cmmap.mmap(length=10, prot=cmmap.PROT_READ|cmmap.PROT_WRITE, flags=cmmap.MAP_ANONYMOUS|cmmap.MAP_PRIVATE, buffer=False) m[:] = data assert m[:] == data
sample_vnf.py
# Copyright (c) 2016-2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import decimal from multiprocessing import Queue, Value, Process import os import posixpath import re import uuid import subprocess import time from trex_stl_lib.trex_stl_client import LoggerApi from trex_stl_lib.trex_stl_client import STLClient from trex_stl_lib.trex_stl_exceptions import STLError from yardstick.benchmark.contexts.base import Context from yardstick.common import exceptions as y_exceptions from yardstick.common.process import check_if_process_failed from yardstick.common import utils from yardstick.common import yaml_loader from yardstick.network_services import constants from yardstick.network_services.helpers.dpdkbindnic_helper import DpdkBindHelper, DpdkNode from yardstick.network_services.helpers.samplevnf_helper import MultiPortConfig from yardstick.network_services.nfvi.resource import ResourceProfile from yardstick.network_services.utils import get_nsb_option from yardstick.network_services.vnf_generic.vnf.base import GenericTrafficGen from yardstick.network_services.vnf_generic.vnf.base import GenericVNF from yardstick.network_services.vnf_generic.vnf.base import QueueFileWrapper from yardstick.network_services.vnf_generic.vnf.vnf_ssh_helper import VnfSshHelper from yardstick.benchmark.contexts.node import NodeContext LOG = logging.getLogger(__name__) class SetupEnvHelper(object): CFG_CONFIG = os.path.join(constants.REMOTE_TMP, "sample_config") CFG_SCRIPT = os.path.join(constants.REMOTE_TMP, "sample_script") DEFAULT_CONFIG_TPL_CFG = "sample.cfg" PIPELINE_COMMAND = '' VNF_TYPE = "SAMPLE" def __init__(self, vnfd_helper, ssh_helper, scenario_helper): super(SetupEnvHelper, self).__init__() self.vnfd_helper = vnfd_helper self.ssh_helper = ssh_helper self.scenario_helper = scenario_helper self.collectd_options = {} def build_config(self): raise NotImplementedError def setup_vnf_environment(self): pass def kill_vnf(self): pass def tear_down(self): raise NotImplementedError class DpdkVnfSetupEnvHelper(SetupEnvHelper): APP_NAME = 'DpdkVnf' FIND_NET_CMD = "find /sys/class/net -lname '*{}*' -printf '%f'" NR_HUGEPAGES_PATH = '/proc/sys/vm/nr_hugepages' @staticmethod def _update_packet_type(ip_pipeline_cfg, traffic_options): match_str = 'pkt_type = ipv4' replace_str = 'pkt_type = {0}'.format(traffic_options['pkt_type']) pipeline_config_str = ip_pipeline_cfg.replace(match_str, replace_str) return pipeline_config_str @classmethod def _update_traffic_type(cls, ip_pipeline_cfg, traffic_options): traffic_type = traffic_options['traffic_type'] if traffic_options['vnf_type'] is not cls.APP_NAME: match_str = 'traffic_type = 4' replace_str = 'traffic_type = {0}'.format(traffic_type) elif traffic_type == 4: match_str = 'pkt_type = ipv4' replace_str = 'pkt_type = ipv4' else: match_str = 'pkt_type = ipv4' replace_str = 'pkt_type = ipv6' pipeline_config_str = ip_pipeline_cfg.replace(match_str, replace_str) return pipeline_config_str def __init__(self, vnfd_helper, ssh_helper, scenario_helper): super(DpdkVnfSetupEnvHelper, self).__init__(vnfd_helper, ssh_helper, scenario_helper) self.all_ports = None self.bound_pci = None self.socket = None self.used_drivers = None self.dpdk_bind_helper = DpdkBindHelper(ssh_helper) def build_config(self): vnf_cfg = self.scenario_helper.vnf_cfg task_path = self.scenario_helper.task_path config_file = vnf_cfg.get('file') lb_count = vnf_cfg.get('lb_count', 3) lb_config = vnf_cfg.get('lb_config', 'SW') worker_config = vnf_cfg.get('worker_config', '1C/1T') worker_threads = vnf_cfg.get('worker_threads', 3) traffic_type = self.scenario_helper.all_options.get('traffic_type', 4) traffic_options = { 'traffic_type': traffic_type, 'pkt_type': 'ipv%s' % traffic_type, 'vnf_type': self.VNF_TYPE, } # read actions/rules from file acl_options = None acl_file_name = self.scenario_helper.options.get('rules') if acl_file_name: with utils.open_relative_file(acl_file_name, task_path) as infile: acl_options = yaml_loader.yaml_load(infile) config_tpl_cfg = utils.find_relative_file(self.DEFAULT_CONFIG_TPL_CFG, task_path) config_basename = posixpath.basename(self.CFG_CONFIG) script_basename = posixpath.basename(self.CFG_SCRIPT) multiport = MultiPortConfig(self.scenario_helper.topology, config_tpl_cfg, config_basename, self.vnfd_helper, self.VNF_TYPE, lb_count, worker_threads, worker_config, lb_config, self.socket) multiport.generate_config() if config_file: with utils.open_relative_file(config_file, task_path) as infile: new_config = ['[EAL]'] vpci = [] for port in self.vnfd_helper.port_pairs.all_ports: interface = self.vnfd_helper.find_interface(name=port) vpci.append(interface['virtual-interface']["vpci"]) new_config.extend('w = {0}'.format(item) for item in vpci) new_config = '\n'.join(new_config) + '\n' + infile.read() else: with open(self.CFG_CONFIG) as handle: new_config = handle.read() new_config = self._update_traffic_type(new_config, traffic_options) new_config = self._update_packet_type(new_config, traffic_options) self.ssh_helper.upload_config_file(config_basename, new_config) self.ssh_helper.upload_config_file(script_basename, multiport.generate_script(self.vnfd_helper, self.get_flows_config(acl_options))) LOG.info("Provision and start the %s", self.APP_NAME) self._build_pipeline_kwargs() return self.PIPELINE_COMMAND.format(**self.pipeline_kwargs) def get_flows_config(self, options=None): # pylint: disable=unused-argument """No actions/rules (flows) by default""" return None def _build_pipeline_kwargs(self, cfg_file=None, script=None): tool_path = self.ssh_helper.provision_tool(tool_file=self.APP_NAME) # count the number of actual ports in the list of pairs # remove duplicate ports # this is really a mapping from LINK ID to DPDK PMD ID # e.g. 0x110 maps LINK0 -> PMD_ID_1, LINK1 -> PMD_ID_2 # 0x1010 maps LINK0 -> PMD_ID_1, LINK1 -> PMD_ID_3 ports = self.vnfd_helper.port_pairs.all_ports port_nums = self.vnfd_helper.port_nums(ports) # create mask from all the dpdk port numbers ports_mask_hex = hex(sum(2 ** num for num in port_nums)) vnf_cfg = self.scenario_helper.vnf_cfg lb_config = vnf_cfg.get('lb_config', 'SW') worker_threads = vnf_cfg.get('worker_threads', 3) hwlb = '' if lb_config == 'HW': hwlb = ' --hwlb %s' % worker_threads self.pipeline_kwargs = { 'cfg_file': cfg_file if cfg_file else self.CFG_CONFIG, 'script': script if script else self.CFG_SCRIPT, 'port_mask_hex': ports_mask_hex, 'tool_path': tool_path, 'hwlb': hwlb, } def setup_vnf_environment(self): self._setup_dpdk() self.kill_vnf() # bind before _setup_resources so we can use dpdk_port_num self._detect_and_bind_drivers() resource = self._setup_resources() return resource def kill_vnf(self): # pkill is not matching, debug with pgrep self.ssh_helper.execute("sudo pgrep -lax %s" % self.APP_NAME) self.ssh_helper.execute("sudo ps aux | grep -i %s" % self.APP_NAME) # have to use exact match # try using killall to match self.ssh_helper.execute("sudo killall %s" % self.APP_NAME) def _setup_dpdk(self): """Setup DPDK environment needed for VNF to run""" hugepages_gb = self.scenario_helper.all_options.get('hugepages_gb', 16) utils.setup_hugepages(self.ssh_helper, hugepages_gb * 1024 * 1024) self.dpdk_bind_helper.load_dpdk_driver() exit_status = self.dpdk_bind_helper.check_dpdk_driver() if exit_status == 0: return def _setup_resources(self): # what is this magic? how do we know which socket is for which port? # what about quad-socket? if any(v[5] == "0" for v in self.bound_pci): self.socket = 0 else: self.socket = 1 # implicit ordering, presumably by DPDK port num, so pre-sort by port_num # this won't work because we don't have DPDK port numbers yet ports = sorted(self.vnfd_helper.interfaces, key=self.vnfd_helper.port_num) port_names = (intf["name"] for intf in ports) plugins = self.collectd_options.get("plugins", {}) interval = self.collectd_options.get("interval") # we must set timeout to be the same as the VNF otherwise KPIs will die before VNF return ResourceProfile(self.vnfd_helper.mgmt_interface, port_names=port_names, plugins=plugins, interval=interval, timeout=self.scenario_helper.timeout) def _check_interface_fields(self): num_nodes = len(self.scenario_helper.nodes) # OpenStack instance creation time is probably proportional to the number # of instances timeout = 120 * num_nodes dpdk_node = DpdkNode(self.scenario_helper.name, self.vnfd_helper.interfaces, self.ssh_helper, timeout) dpdk_node.check() def _detect_and_bind_drivers(self): interfaces = self.vnfd_helper.interfaces self._check_interface_fields() # check for bound after probe self.bound_pci = [v['virtual-interface']["vpci"] for v in interfaces] self.dpdk_bind_helper.read_status() self.dpdk_bind_helper.save_used_drivers() self.dpdk_bind_helper.bind(self.bound_pci, 'igb_uio') sorted_dpdk_pci_addresses = sorted(self.dpdk_bind_helper.dpdk_bound_pci_addresses) for dpdk_port_num, vpci in enumerate(sorted_dpdk_pci_addresses): try: intf = next(v for v in interfaces if vpci == v['virtual-interface']['vpci']) # force to int intf['virtual-interface']['dpdk_port_num'] = int(dpdk_port_num) except: # pylint: disable=bare-except pass time.sleep(2) def get_local_iface_name_by_vpci(self, vpci): find_net_cmd = self.FIND_NET_CMD.format(vpci) exit_status, stdout, _ = self.ssh_helper.execute(find_net_cmd) if exit_status == 0: return stdout return None def tear_down(self): self.dpdk_bind_helper.rebind_drivers() class ResourceHelper(object): COLLECT_KPI = '' MAKE_INSTALL = 'cd {0} && make && sudo make install' RESOURCE_WORD = 'sample' COLLECT_MAP = {} def __init__(self, setup_helper): super(ResourceHelper, self).__init__() self.resource = None self.setup_helper = setup_helper self.ssh_helper = setup_helper.ssh_helper self._enable = True def setup(self): self.resource = self.setup_helper.setup_vnf_environment() def generate_cfg(self): pass def update_from_context(self, context, attr_name): """Disable resource helper in case of baremetal context. And update appropriate node collectd options in context """ if isinstance(context, NodeContext): self._enable = False context.update_collectd_options_for_node(self.setup_helper.collectd_options, attr_name) def _collect_resource_kpi(self): result = {} status = self.resource.check_if_system_agent_running("collectd")[0] if status == 0 and self._enable: result = self.resource.amqp_collect_nfvi_kpi() result = {"core": result} return result def start_collect(self): if self._enable: self.resource.initiate_systemagent(self.ssh_helper.bin_path) self.resource.start() self.resource.amqp_process_for_nfvi_kpi() def stop_collect(self): if self.resource and self._enable: self.resource.stop() def collect_kpi(self): return self._collect_resource_kpi() class ClientResourceHelper(ResourceHelper): RUN_DURATION = 60 QUEUE_WAIT_TIME = 5 SYNC_PORT = 1 ASYNC_PORT = 2 def __init__(self, setup_helper): super(ClientResourceHelper, self).__init__(setup_helper) self.vnfd_helper = setup_helper.vnfd_helper self.scenario_helper = setup_helper.scenario_helper self.client = None self.client_started = Value('i', 0) self.all_ports = None self._queue = Queue() self._result = {} self._terminated = Value('i', 0) def _build_ports(self): self.networks = self.vnfd_helper.port_pairs.networks self.uplink_ports = self.vnfd_helper.port_nums(self.vnfd_helper.port_pairs.uplink_ports) self.downlink_ports = \ self.vnfd_helper.port_nums(self.vnfd_helper.port_pairs.downlink_ports) self.all_ports = self.vnfd_helper.port_nums(self.vnfd_helper.port_pairs.all_ports) def port_num(self, intf): # by default return port num return self.vnfd_helper.port_num(intf) def get_stats(self, *args, **kwargs): try: return self.client.get_stats(*args, **kwargs) except STLError: LOG.error('TRex client not connected') return {} def _get_samples(self, ports, port_pg_id=False): raise NotImplementedError() def _run_traffic_once(self, traffic_profile): traffic_profile.execute_traffic(self) self.client_started.value = 1 time.sleep(self.RUN_DURATION) samples = self._get_samples(traffic_profile.ports) time.sleep(self.QUEUE_WAIT_TIME) self._queue.put(samples) def run_traffic(self, traffic_profile, mq_producer): # if we don't do this we can hang waiting for the queue to drain # have to do this in the subprocess self._queue.cancel_join_thread() # fixme: fix passing correct trex config file, # instead of searching the default path mq_producer.tg_method_started() try: self._build_ports() self.client = self._connect() self.client.reset(ports=self.all_ports) self.client.remove_all_streams(self.all_ports) # remove all streams traffic_profile.register_generator(self) iteration_index = 0 while self._terminated.value == 0: iteration_index += 1 if self._run_traffic_once(traffic_profile): self._terminated.value = 1 mq_producer.tg_method_iteration(iteration_index) self.client.stop(self.all_ports) self.client.disconnect() self._terminated.value = 0 except STLError: if self._terminated.value: LOG.debug("traffic generator is stopped") return # return if trex/tg server is stopped. raise mq_producer.tg_method_finished() def terminate(self): self._terminated.value = 1 # stop client def clear_stats(self, ports=None): if ports is None: ports = self.all_ports self.client.clear_stats(ports=ports) def start(self, ports=None, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg # NOTE(ralonsoh): defining keyworded arguments before variable # positional arguments is a bug. This function definition doesn't work # in Python 2, although it works in Python 3. Reference: # https://www.python.org/dev/peps/pep-3102/ if ports is None: ports = self.all_ports self.client.start(ports=ports, *args, **kwargs) def collect_kpi(self): if not self._queue.empty(): kpi = self._queue.get() self._result.update(kpi) LOG.debug('Got KPIs from _queue for %s %s', self.scenario_helper.name, self.RESOURCE_WORD) return self._result def _connect(self, client=None): if client is None: client = STLClient(username=self.vnfd_helper.mgmt_interface["user"], server=self.vnfd_helper.mgmt_interface["ip"], verbose_level=LoggerApi.VERBOSE_QUIET) # try to connect with 5s intervals, 30s max for idx in range(6): try: client.connect() break except STLError: LOG.info("Unable to connect to Trex Server.. Attempt %s", idx) time.sleep(5) return client class Rfc2544ResourceHelper(object): DEFAULT_CORRELATED_TRAFFIC = False DEFAULT_LATENCY = False DEFAULT_TOLERANCE = '0.0001 - 0.0001' def __init__(self, scenario_helper): super(Rfc2544ResourceHelper, self).__init__() self.scenario_helper = scenario_helper self._correlated_traffic = None self.iteration = Value('i', 0) self._latency = None self._rfc2544 = None self._tolerance_low = None self._tolerance_high = None self._tolerance_precision = None @property def rfc2544(self): if self._rfc2544 is None: self._rfc2544 = self.scenario_helper.all_options['rfc2544'] return self._rfc2544 @property def tolerance_low(self): if self._tolerance_low is None: self.get_rfc_tolerance() return self._tolerance_low @property def tolerance_high(self): if self._tolerance_high is None: self.get_rfc_tolerance() return self._tolerance_high @property def tolerance_precision(self): if self._tolerance_precision is None: self.get_rfc_tolerance() return self._tolerance_precision @property def correlated_traffic(self): if self._correlated_traffic is None: self._correlated_traffic = \ self.get_rfc2544('correlated_traffic', self.DEFAULT_CORRELATED_TRAFFIC) return self._correlated_traffic @property def latency(self): if self._latency is None: self._latency = self.get_rfc2544('latency', self.DEFAULT_LATENCY) return self._latency def get_rfc2544(self, name, default=None): return self.rfc2544.get(name, default) def get_rfc_tolerance(self): tolerance_str = self.get_rfc2544('allowed_drop_rate', self.DEFAULT_TOLERANCE) tolerance_iter = iter(sorted( decimal.Decimal(t.strip()) for t in tolerance_str.split('-'))) tolerance_low = next(tolerance_iter) tolerance_high = next(tolerance_iter, tolerance_low) self._tolerance_precision = abs(tolerance_high.as_tuple().exponent) self._tolerance_high = float(tolerance_high) self._tolerance_low = float(tolerance_low) class SampleVNFDeployHelper(object): SAMPLE_VNF_REPO = 'https://gerrit.opnfv.org/gerrit/samplevnf' REPO_NAME = posixpath.basename(SAMPLE_VNF_REPO) SAMPLE_REPO_DIR = os.path.join('~/', REPO_NAME) def __init__(self, vnfd_helper, ssh_helper): super(SampleVNFDeployHelper, self).__init__() self.ssh_helper = ssh_helper self.vnfd_helper = vnfd_helper def deploy_vnfs(self, app_name): vnf_bin = self.ssh_helper.join_bin_path(app_name) exit_status = self.ssh_helper.execute("which %s" % vnf_bin)[0] if not exit_status: return subprocess.check_output(["rm", "-rf", self.REPO_NAME]) subprocess.check_output(["git", "clone", self.SAMPLE_VNF_REPO]) time.sleep(2) self.ssh_helper.execute("rm -rf %s" % self.SAMPLE_REPO_DIR) self.ssh_helper.put(self.REPO_NAME, self.SAMPLE_REPO_DIR, True) build_script = os.path.join(self.SAMPLE_REPO_DIR, 'tools/vnf_build.sh') time.sleep(2) http_proxy = os.environ.get('http_proxy', '') cmd = "sudo -E %s -s -p='%s'" % (build_script, http_proxy) LOG.debug(cmd) self.ssh_helper.execute(cmd) vnf_bin_loc = os.path.join(self.SAMPLE_REPO_DIR, "VNFs", app_name, "build", app_name) self.ssh_helper.execute("sudo mkdir -p %s" % self.ssh_helper.bin_path) self.ssh_helper.execute("sudo cp %s %s" % (vnf_bin_loc, vnf_bin)) class ScenarioHelper(object): DEFAULT_VNF_CFG = { 'lb_config': 'SW', 'lb_count': 1, 'worker_config': '1C/1T', 'worker_threads': 1, } def __init__(self, name): self.name = name self.scenario_cfg = None @property def task_path(self): return self.scenario_cfg['task_path'] @property def nodes(self): return self.scenario_cfg.get('nodes') @property def all_options(self): return self.scenario_cfg.get('options', {}) @property def options(self): return self.all_options.get(self.name, {}) @property def vnf_cfg(self): return self.options.get('vnf_config', self.DEFAULT_VNF_CFG) @property def topology(self): return self.scenario_cfg['topology'] @property def timeout(self): test_duration = self.scenario_cfg.get('runner', {}).get('duration', self.options.get('timeout', constants.DEFAULT_VNF_TIMEOUT)) test_timeout = self.options.get('timeout', constants.DEFAULT_VNF_TIMEOUT) return test_duration if test_duration > test_timeout else test_timeout class SampleVNF(GenericVNF): """ Class providing file-like API for generic VNF implementation """ VNF_PROMPT = "pipeline>" WAIT_TIME = 1 WAIT_TIME_FOR_SCRIPT = 10 APP_NAME = "SampleVNF" # we run the VNF interactively, so the ssh command will timeout after this long def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, resource_helper_type=None): super(SampleVNF, self).__init__(name, vnfd, task_id) self.bin_path = get_nsb_option('bin_path', '') self.scenario_helper = ScenarioHelper(self.name) self.ssh_helper = VnfSshHelper(self.vnfd_helper.mgmt_interface, self.bin_path) if setup_env_helper_type is None: setup_env_helper_type = SetupEnvHelper self.setup_helper = setup_env_helper_type(self.vnfd_helper, self.ssh_helper, self.scenario_helper) self.deploy_helper = SampleVNFDeployHelper(vnfd, self.ssh_helper) if resource_helper_type is None: resource_helper_type = ResourceHelper self.resource_helper = resource_helper_type(self.setup_helper) self.context_cfg = None self.pipeline_kwargs = {} self.uplink_ports = None self.downlink_ports = None # NOTE(esm): make QueueFileWrapper invert-able so that we # never have to manage the queues self.q_in = Queue() self.q_out = Queue() self.queue_wrapper = None self.run_kwargs = {} self.used_drivers = {} self.vnf_port_pairs = None self._vnf_process = None def _start_vnf(self): self.queue_wrapper = QueueFileWrapper(self.q_in, self.q_out, self.VNF_PROMPT) name = "{}-{}-{}".format(self.name, self.APP_NAME, os.getpid()) self._vnf_process = Process(name=name, target=self._run) self._vnf_process.start() def _vnf_up_post(self): pass def instantiate(self, scenario_cfg, context_cfg): self._update_collectd_options(scenario_cfg, context_cfg) self.scenario_helper.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.resource_helper.update_from_context( Context.get_context_from_server(self.scenario_helper.nodes[self.name]), self.scenario_helper.nodes[self.name] ) # vnf deploy is unsupported, use ansible playbooks if self.scenario_helper.options.get("vnf_deploy", False): self.deploy_helper.deploy_vnfs(self.APP_NAME) self.resource_helper.setup() self._start_vnf() def _update_collectd_options(self, scenario_cfg, context_cfg): """Update collectd configuration options This function retrieves all collectd options contained in the test case definition builds a single dictionary combining them. The following fragment represents a test case with the collectd options and priorities (1 highest, 3 lowest): --- schema: yardstick:task:0.1 scenarios: - type: NSPerf nodes: tg__0: trafficgen_1.yardstick vnf__0: vnf.yardstick options: collectd: <options> # COLLECTD priority 3 vnf__0: collectd: plugins: load <options> # COLLECTD priority 2 context: type: Node name: yardstick nfvi_type: baremetal file: /etc/yardstick/nodes/pod_ixia.yaml # COLLECTD priority 1 """ scenario_options = scenario_cfg.get('options', {}) generic_options = scenario_options.get('collectd', {}) scenario_node_options = scenario_options.get(self.name, {})\ .get('collectd', {}) context_node_options = context_cfg.get('nodes', {})\ .get(self.name, {}).get('collectd', {}) options = generic_options self._update_options(options, scenario_node_options) self._update_options(options, context_node_options) self.setup_helper.collectd_options = options def _update_options(self, options, additional_options): """Update collectd options and plugins dictionary""" for k, v in additional_options.items(): if isinstance(v, dict) and k in options: options[k].update(v) else: options[k] = v def wait_for_instantiate(self): buf = [] time.sleep(self.WAIT_TIME) # Give some time for config to load while True: if not self._vnf_process.is_alive(): raise RuntimeError("%s VNF process died." % self.APP_NAME) # NOTE(esm): move to QueueFileWrapper while self.q_out.qsize() > 0: buf.append(self.q_out.get()) message = ''.join(buf) if self.VNF_PROMPT in message: LOG.info("%s VNF is up and running.", self.APP_NAME) self._vnf_up_post() self.queue_wrapper.clear() return self._vnf_process.exitcode if "PANIC" in message: raise RuntimeError("Error starting %s VNF." % self.APP_NAME) LOG.info("Waiting for %s VNF to start.. ", self.APP_NAME) time.sleep(self.WAIT_TIME_FOR_SCRIPT) # Send ENTER to display a new prompt in case the prompt text was corrupted # by other VNF output self.q_in.put('\r\n') def start_collect(self): self.resource_helper.start_collect() def stop_collect(self): self.resource_helper.stop_collect() def _build_run_kwargs(self): self.run_kwargs = { 'stdin': self.queue_wrapper, 'stdout': self.queue_wrapper, 'keep_stdin_open': True, 'pty': True, 'timeout': self.scenario_helper.timeout, } def _build_config(self): return self.setup_helper.build_config() def _run(self): # we can't share ssh paramiko objects to force new connection self.ssh_helper.drop_connection() cmd = self._build_config() # kill before starting self.setup_helper.kill_vnf() LOG.debug(cmd) self._build_run_kwargs() self.ssh_helper.run(cmd, **self.run_kwargs) def vnf_execute(self, cmd, wait_time=2): """ send cmd to vnf process """ LOG.info("%s command: %s", self.APP_NAME, cmd) self.q_in.put("{}\r\n".format(cmd)) time.sleep(wait_time) output = [] while self.q_out.qsize() > 0: output.append(self.q_out.get()) return "".join(output) def _tear_down(self): pass def terminate(self): self.vnf_execute("quit") self.setup_helper.kill_vnf() self._tear_down() self.resource_helper.stop_collect() if self._vnf_process is not None: # be proper and join first before we kill LOG.debug("joining before terminate %s", self._vnf_process.name) self._vnf_process.join(constants.PROCESS_JOIN_TIMEOUT) self._vnf_process.terminate() # no terminate children here because we share processes with tg def get_stats(self, *args, **kwargs): # pylint: disable=unused-argument """Method for checking the statistics This method could be overridden in children classes. :return: VNF statistics """ cmd = 'p {0} stats'.format(self.APP_WORD) out = self.vnf_execute(cmd) return out def collect_kpi(self): # we can't get KPIs if the VNF is down check_if_process_failed(self._vnf_process, 0.01) stats = self.get_stats() m = re.search(self.COLLECT_KPI, stats, re.MULTILINE) physical_node = Context.get_physical_node_from_server( self.scenario_helper.nodes[self.name]) result = {"physical_node": physical_node} if m: result.update({k: int(m.group(v)) for k, v in self.COLLECT_MAP.items()}) result["collect_stats"] = self.resource_helper.collect_kpi() else: result.update({"packets_in": 0, "packets_fwd": 0, "packets_dropped": 0}) LOG.debug("%s collect KPIs %s", self.APP_NAME, result) return result def scale(self, flavor=""): """The SampleVNF base class doesn't provide the 'scale' feature""" raise y_exceptions.FunctionNotImplemented( function_name='scale', class_name='SampleVNFTrafficGen') class SampleVNFTrafficGen(GenericTrafficGen): """ Class providing file-like API for generic traffic generator """ APP_NAME = 'Sample' RUN_WAIT = 1 def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, resource_helper_type=None): super(SampleVNFTrafficGen, self).__init__(name, vnfd, task_id) self.bin_path = get_nsb_option('bin_path', '') self.scenario_helper = ScenarioHelper(self.name) self.ssh_helper = VnfSshHelper(self.vnfd_helper.mgmt_interface, self.bin_path, wait=True) if setup_env_helper_type is None: setup_env_helper_type = SetupEnvHelper self.setup_helper = setup_env_helper_type(self.vnfd_helper, self.ssh_helper, self.scenario_helper) if resource_helper_type is None: resource_helper_type = ClientResourceHelper self.resource_helper = resource_helper_type(self.setup_helper) self.runs_traffic = True self.traffic_finished = False self._tg_process = None self._traffic_process = None def _start_server(self): # we can't share ssh paramiko objects to force new connection self.ssh_helper.drop_connection() def instantiate(self, scenario_cfg, context_cfg): self.scenario_helper.scenario_cfg = scenario_cfg self.resource_helper.update_from_context( Context.get_context_from_server(self.scenario_helper.nodes[self.name]), self.scenario_helper.nodes[self.name] ) self.resource_helper.context_cfg = context_cfg self.resource_helper.setup() # must generate_cfg after DPDK bind because we need port number self.resource_helper.generate_cfg() LOG.info("Starting %s server...", self.APP_NAME) name = "{}-{}-{}".format(self.name, self.APP_NAME, os.getpid()) self._tg_process = Process(name=name, target=self._start_server) self._tg_process.start() def _check_status(self): raise NotImplementedError def _wait_for_process(self): while True: if not self._tg_process.is_alive(): raise RuntimeError("%s traffic generator process died." % self.APP_NAME) LOG.info("Waiting for %s TG Server to start.. ", self.APP_NAME) time.sleep(1) status = self._check_status() if status == 0: LOG.info("%s TG Server is up and running.", self.APP_NAME) return self._tg_process.exitcode def _traffic_runner(self, traffic_profile, mq_id): # always drop connections first thing in new processes # so we don't get paramiko errors self.ssh_helper.drop_connection() LOG.info("Starting %s client...", self.APP_NAME) self._mq_producer = self._setup_mq_producer(mq_id) self.resource_helper.run_traffic(traffic_profile, self._mq_producer) def run_traffic(self, traffic_profile): """ Generate traffic on the wire according to the given params. Method is non-blocking, returns immediately when traffic process is running. Mandatory. :param traffic_profile: :return: True/False """ name = '{}-{}-{}-{}'.format(self.name, self.APP_NAME, traffic_profile.__class__.__name__, os.getpid()) self._traffic_process = Process( name=name, target=self._traffic_runner, args=(traffic_profile, uuid.uuid1().int)) self._traffic_process.start() # Wait for traffic process to start while self.resource_helper.client_started.value == 0: time.sleep(self.RUN_WAIT) # what if traffic process takes a few seconds to start? if not self._traffic_process.is_alive(): break def collect_kpi(self): # check if the tg processes have exited physical_node = Context.get_physical_node_from_server( self.scenario_helper.nodes[self.name]) result = {"physical_node": physical_node} for proc in (self._tg_process, self._traffic_process): check_if_process_failed(proc) result["collect_stats"] = self.resource_helper.collect_kpi() LOG.debug("%s collect KPIs %s", self.APP_NAME, result) return result def terminate(self): """ After this method finishes, all traffic processes should stop. Mandatory. :return: True/False """ self.traffic_finished = True # we must kill client before we kill the server, or the client will raise exception if self._traffic_process is not None: # be proper and try to join before terminating LOG.debug("joining before terminate %s", self._traffic_process.name) self._traffic_process.join(constants.PROCESS_JOIN_TIMEOUT) self._traffic_process.terminate() if self._tg_process is not None: # be proper and try to join before terminating LOG.debug("joining before terminate %s", self._tg_process.name) self._tg_process.join(constants.PROCESS_JOIN_TIMEOUT) self._tg_process.terminate() # no terminate children here because we share processes with vnf def scale(self, flavor=""): """A traffic generator VFN doesn't provide the 'scale' feature""" raise y_exceptions.FunctionNotImplemented( function_name='scale', class_name='SampleVNFTrafficGen')
run_camera_frame_producer.py
import os import threading import signal from functools import partial from io import BytesIO from typing import Dict from multiprocessing import Process import logging from mqtt.mqtt_client import get_mqtt from camera.camera_config import camera_config from camera.camera_frame_producer import CameraFrameProducer from camera.pivideostream import PiVideoStream from service_manager.runnable import Runnable import multiprocessing as mp CAMERA_WIDTH = camera_config.camera_width CAMERA_HEIGHT = camera_config.camera_height DEVICE_ID = os.environ['DEVICE_ID'] LOGGER = logging.getLogger(__name__) class ManageRecord: """ Manage record through MQTT for dumb camera. It receives MQTT messages, process it to manage records. """ def __init__(self, video_stream: PiVideoStream) -> None: self._video_stream = video_stream self._mqtt_client = get_mqtt(f'{DEVICE_ID}-manage-record') self._mqtt_client.connect() self._mqtt_client.client.loop_start() self._setup_listeners() @staticmethod def _extract_data_from_topic(topic: str) -> Dict[str, str]: split = topic.split('/') data = { 'action': split[3], } if len(split) > 4: data['video_ref'] = split[4] return data def _on_record(self, _client, _userdata, message) -> None: data = self._extract_data_from_topic(message.topic) if data['action'] == 'start': self._video_stream.start_recording(data['video_ref']) elif data['action'] == 'split': self._video_stream.split_recording(data['video_ref']) elif data['action'] == 'end': self._video_stream.stop_recording() def _setup_listeners(self) -> None: self._mqtt_client.client.subscribe(f'camera/recording/{DEVICE_ID}/#', qos=2) self._mqtt_client.client.message_callback_add(f'camera/recording/{DEVICE_ID}/#', self._on_record) class FrameProducer: def __init__(self, stream_event: mp.Event, process_event: mp.Event): self._stream_event = stream_event self._process_event = process_event def run(self, device_id: str) -> None: LOGGER.info('run camera frame producer') camera = CameraFrameProducer(device_id) stream = PiVideoStream(None, resolution=( CAMERA_WIDTH, CAMERA_HEIGHT), framerate=25) """ For fps processing, we do not change directly FPS because we need to record at high fps. If the camera is recording and we try to change the camera fps we get this error: raise PiCameraRuntimeError("Recording is currently running") """ # @rate_limited(max_per_second=0.5, thread_safe=False, block=True) def process_frame(frame: BytesIO): camera.process_frame(frame, stream=self._stream_event.is_set(), process=self._process_event.is_set()) stream.process_frame = process_frame ManageRecord(stream) stream.run() class RunCameraFrameProducer(Runnable): def __init__(self): self._stream_event = mp.Event() self._process_event = mp.Event() self._frame_producer = FrameProducer(self._stream_event, self._process_event) self._process = None def _process_join(self, process) -> None: process.join() if process.exitcode != -signal.SIGTERM: # something went wrong in the child process # -> kill the process. Remainder: we are inside a thread here, # so it appears that sys.exit(1) does not work... So, we will! os.kill(os.getpid(), signal.SIGINT) def run(self, device_id: str, status: bool, data=None) -> None: """ Be careful to Falsy value! None does not mean to turn off the stream/process i.e data={'to_analyze': None, 'stream': False} do a strict equal to turn on/off something. """ if data: if 'stream' in data: if data['stream'] is True: self._stream_event.set() elif data['stream'] is False: self._stream_event.clear() if 'to_analyze' in data: if data['to_analyze'] is True: self._process_event.set() elif data['to_analyze'] is False: self._process_event.clear() if status is True and self._process is None: run = partial(self._frame_producer.run, device_id) self._process = Process(target=run, daemon=True) verify = partial(self._process_join, self._process) t = threading.Thread(target=verify, daemon=True) self._process.start() t.start() return if status is False and self._process: self._process.terminate() self._process = None def __str__(self): return 'run-camera-frame-producer'
log.py
""" Logging module for printing status during an exploit, and internally within ``pwntools``. Exploit Developers ------------------ By using the standard ``from pwn import *``, an object named ``log`` will be inserted into the global namespace. You can use this to print out status messages during exploitation. For example,:: log.info('Hello, world!') prints:: [*] Hello, world! Additionally, there are some nifty mechanisms for performing status updates on a running job (e.g. when brute-forcing).:: p = log.progress('Working') p.status('Reticulating splines') time.sleep(1) p.success('Got a shell!') The verbosity of logging can be most easily controlled by setting ``log_level`` on the global ``context`` object.:: log.info("No you see me") context.log_level = 'error' log.info("Now you don't") The purpose of this attribute is to control what gets printed to the screen, not what gets emitted. This means that you can put all logging events into a log file, while only wanting to see a small subset of them on your screen. Pwnlib Developers ----------------- A module-specific logger can be imported into the module via:: from pwnlib.log import getLogger log = getLogger(__name__) This provides an easy way to filter logging programmatically or via a configuration file for debugging. When using ``progress``, you should use the ``with`` keyword to manage scoping, to ensure the spinner stops if an exception is thrown. Technical details ----------------- Familiarity with the :mod:`logging` module is assumed. A pwnlib root logger named 'pwnlib' is created and a custom handler and formatter is installed for it. The handler determines its logging level from :data:`context.log_level`. Ideally :data:`context.log_level` should only affect which records will be emitted by the handler such that e.g. logging to a file will not be changed by it. But for performance reasons it is not feasible log everything in the normal case. In particular there are tight loops inside :mod:`pwnlib.tubes.tube`, which we would like to be able to debug, but if we are not debugging them, they should not spit out messages (even to a log file). For this reason there are a few places inside pwnlib, that will not even emit a record without :data:`context.log_level` being set to `logging.DEBUG` or below. Log records created by ``Progress`` and ``Logger`` objects will set ``'pwnlib_msgtype'`` on the ``extra`` field to signal which kind of message was generated. This information is used by the formatter to prepend a symbol to the message, e.g. ``'[+] '`` in ``'[+] got a shell!'`` This field is ignored when using the ``logging`` module's standard formatters. All status updates (which are not dropped due to throttling) on progress loggers result in a log record being created. The ``extra`` field then carries a reference to the ``Progress`` logger as ``'pwnlib_progress'``. If the custom handler determines that :data:`term.term_mode` is enabled, log records that have a ``'pwnlib_progess'`` in their ``extra`` field will not result in a message being emitted but rather an animated progress line (with a spinner!) being created. Note that other handlers will still see a meaningful log record. The custom handler will only handle log records whith a level of at least :data:`context.log_level`. Thus if e.g. the level for the ``'pwnlib.tubes.ssh'`` is set to ``'DEBUG'`` no additional output will show up unless :data:`context.log_level` is also set to ``'DEBUG'``. Other handlers will however see the extra log records generated by the ``'pwnlib.tubes.ssh'`` logger. """ from __future__ import absolute_import from __future__ import division import logging import os import random import re import six import string import sys import threading import time from pwnlib import term from pwnlib.config import register_config from pwnlib.context import Thread from pwnlib.context import context from pwnlib.exception import PwnlibException from pwnlib.term import spinners from pwnlib.term import text __all__ = [ 'getLogger', 'install_default_handler', 'rootlogger' ] # list of prefixes to use for the different message types. note that the `text` # module won't add any escape codes if `pwnlib.context.log_console.isatty()` is `False` _msgtype_prefixes = { 'status' : [text.magenta, 'x'], 'success' : [text.bold_green, '+'], 'failure' : [text.bold_red, '-'], 'debug' : [text.bold_red, 'DEBUG'], 'info' : [text.bold_blue, '*'], 'warning' : [text.bold_yellow, '!'], 'error' : [text.on_red, 'ERROR'], 'exception' : [text.on_red, 'ERROR'], 'critical' : [text.on_red, 'CRITICAL'], 'info_once' : [text.bold_blue, '*'], 'warning_once' : [text.bold_yellow, '!'], } def read_log_config(settings): log = getLogger(__name__) for key, value in settings.items(): if '.' not in key: log.warn("Invalid configuration option %r in section %r" % (key, 'log')) continue msgtype, key = key.split('.', 1) if key == 'color': current = _msgtype_prefixes[msgtype][0] _msgtype_prefixes[msgtype][0] = getattr(text, value, current) elif key == 'symbol': _msgtype_prefixes[msgtype][1] = value else: log.warn("Unknown configuration option %r in section %r" % (key, 'log')) register_config('log', read_log_config) # the text decoration to use for spinners. the spinners themselves can be found # in the `pwnlib.term.spinners` module _spinner_style = text.bold_blue class Progress(object): """ Progress logger used to generate log records associated with some running job. Instances can be used as context managers which will automatically declare the running job a success upon exit or a failure upon a thrown exception. After :meth:`success` or :meth:`failure` is called the status can no longer be updated. This class is intended for internal use. Progress loggers should be created using :meth:`Logger.progress`. """ def __init__(self, logger, msg, status, level, args, kwargs): self._logger = logger self._msg = msg self._status = status self._level = level self._stopped = False self.last_status = 0 self.rate = kwargs.pop('rate', 0) self._log(status, args, kwargs, 'status') # it is a common use case to create a logger and then immediately update # its status line, so we reset `last_status` to accommodate this pattern self.last_status = 0 def _log(self, status, args, kwargs, msgtype): # Logs are strings, not bytes. Handle Python3 bytes() objects. status = _need_text(status) # this progress logger is stopped, so don't generate any more records if self._stopped: return msg = self._msg if msg and status: msg += ': ' msg += status self._logger._log(self._level, msg, args, kwargs, msgtype, self) def status(self, status, *args, **kwargs): """status(status, *args, **kwargs) Logs a status update for the running job. If the progress logger is animated the status line will be updated in place. Status updates are throttled at one update per 100ms. """ now = time.time() if (now - self.last_status) > self.rate: self.last_status = now self._log(status, args, kwargs, 'status') def success(self, status = 'Done', *args, **kwargs): """success(status = 'Done', *args, **kwargs) Logs that the running job succeeded. No further status updates are allowed. If the Logger is animated, the animation is stopped. """ self._log(status, args, kwargs, 'success') self._stopped = True def failure(self, status = 'Failed', *args, **kwargs): """failure(message) Logs that the running job failed. No further status updates are allowed. If the Logger is animated, the animation is stopped. """ self._log(status, args, kwargs, 'failure') self._stopped = True def __enter__(self): return self def __exit__(self, exc_typ, exc_val, exc_tb): # if the progress logger is already stopped these are no-ops if exc_typ is None: self.success() else: self.failure() class Logger(object): """ A class akin to the :class:`logging.LoggerAdapter` class. All public methods defined on :class:`logging.Logger` instances are defined on this class. Also adds some ``pwnlib`` flavor: * :meth:`progress` (alias :meth:`waitfor`) * :meth:`success` * :meth:`failure` * :meth:`indented` * :meth:`info_once` * :meth:`warning_once` (alias :meth:`warn_once`) Adds ``pwnlib``-specific information for coloring, indentation and progress logging via log records ``extra`` field. Loggers instantiated with :func:`getLogger` will be of this class. """ _one_time_infos = set() _one_time_warnings = set() def __init__(self, logger=None): if logger is None: # This is a minor hack to permit user-defined classes which inherit # from a tube (which do not actually reside in the pwnlib library) # to receive logging abilities that behave as they would expect from # the rest of the library module = self.__module__ if not module.startswith('pwnlib'): module = 'pwnlib.' + module # - end hack - logger_name = '%s.%s.%s' % (module, self.__class__.__name__, id(self)) logger = logging.getLogger(logger_name) self._logger = logger def _getlevel(self, levelString): if isinstance(levelString, six.integer_types): return levelString return logging._levelNames[levelString.upper()] def _log(self, level, msg, args, kwargs, msgtype, progress = None): # Logs are strings, not bytes. Handle Python3 bytes() objects. msg = _need_text(msg) extra = kwargs.get('extra', {}) extra.setdefault('pwnlib_msgtype', msgtype) extra.setdefault('pwnlib_progress', progress) kwargs['extra'] = extra self._logger.log(level, msg, *args, **kwargs) def progress(self, message, status = '', *args, **kwargs): """progress(message, status = '', *args, level = logging.INFO, **kwargs) -> Progress Creates a new progress logger which creates log records with log level `level`. Progress status can be updated using :meth:`Progress.status` and stopped using :meth:`Progress.success` or :meth:`Progress.failure`. If `term.term_mode` is enabled the progress logger will be animated. The progress manager also functions as a context manager. Using context managers ensures that animations stop even if an exception is raised. .. code-block:: python with log.progress('Trying something...') as p: for i in range(10): p.status("At %i" % i) time.sleep(0.5) x = 1/0 """ level = self._getlevel(kwargs.pop('level', logging.INFO)) return Progress(self, message, status, level, args, kwargs) def waitfor(self, *args, **kwargs): """Alias for :meth:`progress`.""" return self.progress(*args, **kwargs) def indented(self, message, *args, **kwargs): """indented(message, *args, level = logging.INFO, **kwargs) Log a message but don't put a line prefix on it. Arguments: level(int): Alternate log level at which to set the indented message. Defaults to :const:`logging.INFO`. """ level = self._getlevel(kwargs.pop('level', logging.INFO)) self._log(level, message, args, kwargs, 'indented') def success(self, message, *args, **kwargs): """success(message, *args, **kwargs) Logs a success message. """ self._log(logging.INFO, message, args, kwargs, 'success') def failure(self, message, *args, **kwargs): """failure(message, *args, **kwargs) Logs a failure message. """ self._log(logging.INFO, message, args, kwargs, 'failure') def info_once(self, message, *args, **kwargs): """info_once(message, *args, **kwargs) Logs an info message. The same message is never printed again. """ m = message % args if m not in self._one_time_infos: if self.isEnabledFor(logging.INFO): self._one_time_infos.add(m) self._log(logging.INFO, message, args, kwargs, 'info_once') def warning_once(self, message, *args, **kwargs): """warning_once(message, *args, **kwargs) Logs a warning message. The same message is never printed again. """ m = message % args if m not in self._one_time_warnings: if self.isEnabledFor(logging.WARNING): self._one_time_warnings.add(m) self._log(logging.WARNING, message, args, kwargs, 'warning_once') def warn_once(self, *args, **kwargs): """Alias for :meth:`warning_once`.""" return self.warning_once(*args, **kwargs) # logging functions also exposed by `logging.Logger` def debug(self, message, *args, **kwargs): """debug(message, *args, **kwargs) Logs a debug message. """ self._log(logging.DEBUG, message, args, kwargs, 'debug') def info(self, message, *args, **kwargs): """info(message, *args, **kwargs) Logs an info message. """ self._log(logging.INFO, message, args, kwargs, 'info') def hexdump(self, message, *args, **kwargs): # cyclic dependencies FTW! # TODO: Move pwnlib.util.fiddling.hexdump into a new module. import pwnlib.util.fiddling self.info(pwnlib.util.fiddling.hexdump(message, *args, **kwargs)) def maybe_hexdump(self, message, *args, **kwargs): """maybe_hexdump(self, message, *args, **kwargs) Logs a message using indented. Repeated single byte is compressed, and unprintable message is hexdumped. """ if len(set(message)) == 1 and len(message) > 1: self.indented('%r * %#x' % (message[:1], len(message)), *args, **kwargs) elif len(message) == 1 or all(c in string.printable.encode() for c in message): for line in message.splitlines(True): self.indented(repr(line), *args, **kwargs) else: import pwnlib.util.fiddling self.indented(pwnlib.util.fiddling.hexdump(message), *args, **kwargs) def warning(self, message, *args, **kwargs): """warning(message, *args, **kwargs) Logs a warning message. """ self._log(logging.WARNING, message, args, kwargs, 'warning') def warn(self, *args, **kwargs): """Alias for :meth:`warning`.""" return self.warning(*args, **kwargs) def error(self, message, *args, **kwargs): """error(message, *args, **kwargs) To be called outside an exception handler. Logs an error message, then raises a ``PwnlibException``. """ self._log(logging.ERROR, message, args, kwargs, 'error') raise PwnlibException(message % args) def exception(self, message, *args, **kwargs): """exception(message, *args, **kwargs) To be called from an exception handler. Logs a error message, then re-raises the current exception. """ kwargs["exc_info"] = 1 self._log(logging.ERROR, message, args, kwargs, 'exception') raise def critical(self, message, *args, **kwargs): """critical(message, *args, **kwargs) Logs a critical message. """ self._log(logging.CRITICAL, message, args, kwargs, 'critical') def log(self, level, message, *args, **kwargs): """log(level, message, *args, **kwargs) Logs a message with log level `level`. The ``pwnlib`` formatter will use the default :mod:`logging` formater to format this message. """ self._log(level, message, args, kwargs, None) def isEnabledFor(self, level): """isEnabledFor(level) -> bool See if the underlying logger is enabled for the specified level. """ effectiveLevel = self._logger.getEffectiveLevel() if effectiveLevel == 1: effectiveLevel = context.log_level return effectiveLevel <= level def setLevel(self, level): """setLevel(level) Set the logging level for the underlying logger. """ with context.local(log_level=level): self._logger.setLevel(context.log_level) def addHandler(self, handler): """addHandler(handler) Add the specified handler to the underlying logger. """ self._logger.addHandler(handler) def removeHandler(self, handler): """removeHandler(handler) Remove the specified handler from the underlying logger. """ self._logger.removeHandler(handler) @property def level(self): return self._logger.level @level.setter def level(self, value): with context.local(log_level=value): self._logger.level = context.log_level class Handler(logging.StreamHandler): """ A custom handler class. This class will report whatever :data:`context.log_level` is currently set to as its log level. If :data:`term.term_mode` is enabled log records originating from a progress logger will not be emitted but rather an animated progress line will be created. An instance of this handler is added to the ``'pwnlib'`` logger. """ @property def stream(self): return context.log_console @stream.setter def stream(self, value): pass def emit(self, record): """ Emit a log record or create/update an animated progress logger depending on whether :data:`term.term_mode` is enabled. """ # We have set the root 'pwnlib' logger to have a logLevel of 1, # when logging has been enabled via install_default_handler. # # If the level is 1, we should only process the record if # context.log_level is less than the record's log level. # # If the level is not 1, somebody else expressly set the log # level somewhere on the tree, and we should use that value. level = logging.getLogger(record.name).getEffectiveLevel() if level == 1: level = context.log_level if level > record.levelno: return progress = getattr(record, 'pwnlib_progress', None) # if the record originates from a `Progress` object and term handling # is enabled we can have animated spinners! so check that if progress is None or not term.term_mode: super(Handler, self).emit(record) return # yay, spinners! # since we want to be able to update the spinner we overwrite the # message type so that the formatter doesn't output a prefix symbol msgtype = record.pwnlib_msgtype record.pwnlib_msgtype = 'animated' msg = "%s\n" % self.format(record) # we enrich the `Progress` object to keep track of the spinner if not hasattr(progress, '_spinner_handle'): spinner_handle = term.output('') msg_handle = term.output(msg) stop = threading.Event() def spin(): '''Wheeeee!''' state = 0 states = random.choice(spinners.spinners) while True: prefix = '[%s] ' % _spinner_style(states[state]) spinner_handle.update(prefix) state = (state + 1) % len(states) if stop.wait(0.1): break t = Thread(target = spin) t.daemon = True t.start() progress._spinner_handle = spinner_handle progress._msg_handle = msg_handle progress._stop_event = stop progress._spinner_thread = t else: progress._msg_handle.update(msg) # if the message type was not a status message update, then we should # stop the spinner if msgtype != 'status': progress._stop_event.set() progress._spinner_thread.join() style, symb = _msgtype_prefixes[msgtype] prefix = '[%s] ' % style(symb) progress._spinner_handle.update(prefix) class Formatter(logging.Formatter): """ Logging formatter which performs custom formatting for log records containing the ``'pwnlib_msgtype'`` attribute. Other records are formatted using the `logging` modules default formatter. If ``'pwnlib_msgtype'`` is set, it performs the following actions: * A prefix looked up in `_msgtype_prefixes` is prepended to the message. * The message is prefixed such that it starts on column four. * If the message spans multiple lines they are split, and all subsequent lines are indented. This formatter is used by the handler installed on the ``'pwnlib'`` logger. """ # Indentation from the left side of the terminal. # All log messages will be indented at list this far. indent = ' ' # Newline, followed by an indent. Used to wrap multiple lines. nlindent = '\n' + indent def format(self, record): # use the default formatter to actually format the record msg = super(Formatter, self).format(record) # then put on a prefix symbol according to the message type msgtype = getattr(record, 'pwnlib_msgtype', None) # if 'pwnlib_msgtype' is not set (or set to `None`) we just return the # message as it is if msgtype is None: return msg if msgtype in _msgtype_prefixes: style, symb = _msgtype_prefixes[msgtype] prefix = '[%s] ' % style(symb) elif msgtype == 'indented': prefix = self.indent elif msgtype == 'animated': # the handler will take care of updating the spinner, so we will # not include it here prefix = '' else: # this should never happen prefix = '[?] ' msg = prefix + msg msg = self.nlindent.join(msg.splitlines()) return msg def _need_text(s): # circular import wrapper global _need_text from pwnlib.util.packing import _need_text return _need_text(s, 2) # we keep a dictionary of loggers such that multiple calls to `getLogger` with # the same name will return the same logger def getLogger(name): return Logger(logging.getLogger(name)) class LogfileHandler(logging.FileHandler): def __init__(self): super(LogfileHandler, self).__init__('', delay=1) @property def stream(self): return context.log_file @stream.setter def stream(self, value): pass def handle(self, *a, **kw): if self.stream.name is not None: super(LogfileHandler, self).handle(*a, **kw) iso_8601 = '%Y-%m-%dT%H:%M:%S' fmt = '%(asctime)s:%(levelname)s:%(name)s:%(message)s' log_file = LogfileHandler() log_file.setFormatter(logging.Formatter(fmt, iso_8601)) # # The root 'pwnlib' logger is declared here. To change the target of all # 'pwntools'-specific logging, only this logger needs to be changed. # # Logging cascades upward through the hierarchy, # so the only point that should ever need to be # modified is the root 'pwnlib' logger. # # For example: # map(rootlogger.removeHandler, rootlogger.handlers) # logger.addHandler(myCoolPitchingHandler) # rootlogger = getLogger('pwnlib') console = Handler() formatter = Formatter() console.setFormatter(formatter) def install_default_handler(): '''install_default_handler() Instantiates a :class:`Handler` and :class:`Formatter` and installs them for the ``pwnlib`` root logger. This function is automatically called from when importing :mod:`pwn`. ''' logger = logging.getLogger('pwnlib') if console not in logger.handlers: logger.addHandler(console) logger.addHandler(log_file) logger.setLevel(1)
test_smtplib.py
import asyncore import email.mime.text import email.utils import socket import smtpd import smtplib import io import re import sys import time import select import errno import unittest from test import support, mock_socket try: import threading except ImportError: threading = None HOST = support.HOST if sys.platform == 'darwin': # select.poll returns a select.POLLHUP at the end of the tests # on darwin, so just ignore it def handle_expt(self): pass smtpd.SMTPChannel.handle_expt = handle_expt def server(evt, buf, serv): serv.listen(5) evt.set() try: conn, addr = serv.accept() except socket.timeout: pass else: n = 500 while buf and n > 0: r, w, e = select.select([], [conn], []) if w: sent = conn.send(buf) buf = buf[sent:] n -= 1 conn.close() finally: serv.close() evt.set() class GeneralTests(unittest.TestCase): def setUp(self): smtplib.socket = mock_socket self.port = 25 def tearDown(self): smtplib.socket = socket # This method is no longer used but is retained for backward compatibility, # so test to make sure it still works. def testQuoteData(self): teststr = "abc\n.jkl\rfoo\r\n..blue" expected = "abc\r\n..jkl\r\nfoo\r\n...blue" self.assertEqual(expected, smtplib.quotedata(teststr)) def testBasic1(self): mock_socket.reply_with(b"220 Hola mundo") # connects smtp = smtplib.SMTP(HOST, self.port) smtp.close() def testSourceAddress(self): mock_socket.reply_with(b"220 Hola mundo") # connects smtp = smtplib.SMTP(HOST, self.port, source_address=('127.0.0.1',19876)) self.assertEqual(smtp.source_address, ('127.0.0.1', 19876)) smtp.close() def testBasic2(self): mock_socket.reply_with(b"220 Hola mundo") # connects, include port in host name smtp = smtplib.SMTP("%s:%s" % (HOST, self.port)) smtp.close() def testLocalHostName(self): mock_socket.reply_with(b"220 Hola mundo") # check that supplied local_hostname is used smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost") self.assertEqual(smtp.local_hostname, "testhost") smtp.close() def testTimeoutDefault(self): mock_socket.reply_with(b"220 Hola mundo") self.assertTrue(mock_socket.getdefaulttimeout() is None) mock_socket.setdefaulttimeout(30) self.assertEqual(mock_socket.getdefaulttimeout(), 30) try: smtp = smtplib.SMTP(HOST, self.port) finally: mock_socket.setdefaulttimeout(None) self.assertEqual(smtp.sock.gettimeout(), 30) smtp.close() def testTimeoutNone(self): mock_socket.reply_with(b"220 Hola mundo") self.assertTrue(socket.getdefaulttimeout() is None) socket.setdefaulttimeout(30) try: smtp = smtplib.SMTP(HOST, self.port, timeout=None) finally: socket.setdefaulttimeout(None) self.assertTrue(smtp.sock.gettimeout() is None) smtp.close() def testTimeoutValue(self): mock_socket.reply_with(b"220 Hola mundo") smtp = smtplib.SMTP(HOST, self.port, timeout=30) self.assertEqual(smtp.sock.gettimeout(), 30) smtp.close() # Test server thread using the specified SMTP server class def debugging_server(serv, serv_evt, client_evt): serv_evt.set() try: if hasattr(select, 'poll'): poll_fun = asyncore.poll2 else: poll_fun = asyncore.poll n = 1000 while asyncore.socket_map and n > 0: poll_fun(0.01, asyncore.socket_map) # when the client conversation is finished, it will # set client_evt, and it's then ok to kill the server if client_evt.is_set(): serv.close() break n -= 1 except socket.timeout: pass finally: if not client_evt.is_set(): # allow some time for the client to read the result time.sleep(0.5) serv.close() asyncore.close_all() serv_evt.set() MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n' MSG_END = '------------ END MESSAGE ------------\n' # NOTE: Some SMTP objects in the tests below are created with a non-default # local_hostname argument to the constructor, since (on some systems) the FQDN # lookup caused by the default local_hostname sometimes takes so long that the # test server times out, causing the test to fail. # Test behavior of smtpd.DebuggingServer @unittest.skipUnless(threading, 'Threading required for this test.') class DebuggingServerTests(unittest.TestCase): maxDiff = None def setUp(self): self.real_getfqdn = socket.getfqdn socket.getfqdn = mock_socket.getfqdn # temporarily replace sys.stdout to capture DebuggingServer output self.old_stdout = sys.stdout self.output = io.StringIO() sys.stdout = self.output self.serv_evt = threading.Event() self.client_evt = threading.Event() # Capture SMTPChannel debug output self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM smtpd.DEBUGSTREAM = io.StringIO() # Pick a random unused port by passing 0 for the port number self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1)) # Keep a note of what port was assigned self.port = self.serv.socket.getsockname()[1] serv_args = (self.serv, self.serv_evt, self.client_evt) self.thread = threading.Thread(target=debugging_server, args=serv_args) self.thread.start() # wait until server thread has assigned a port number self.serv_evt.wait() self.serv_evt.clear() def tearDown(self): socket.getfqdn = self.real_getfqdn # indicate that the client is finished self.client_evt.set() # wait for the server thread to terminate self.serv_evt.wait() self.thread.join() # restore sys.stdout sys.stdout = self.old_stdout # restore DEBUGSTREAM smtpd.DEBUGSTREAM.close() smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM def testBasic(self): # connect smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.quit() def testSourceAddress(self): # connect port = support.find_unused_port() try: smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3, source_address=('127.0.0.1', port)) self.assertEqual(smtp.source_address, ('127.0.0.1', port)) self.assertEqual(smtp.local_hostname, 'localhost') smtp.quit() except OSError as e: if e.errno == errno.EADDRINUSE: self.skipTest("couldn't bind to port %d" % port) raise def testNOOP(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (250, b'OK') self.assertEqual(smtp.noop(), expected) smtp.quit() def testRSET(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (250, b'OK') self.assertEqual(smtp.rset(), expected) smtp.quit() def testELHO(self): # EHLO isn't implemented in DebuggingServer smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (250, b'\nSIZE 33554432\nHELP') self.assertEqual(smtp.ehlo(), expected) smtp.quit() def testEXPNNotImplemented(self): # EXPN isn't implemented in DebuggingServer smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (502, b'EXPN not implemented') smtp.putcmd('EXPN') self.assertEqual(smtp.getreply(), expected) smtp.quit() def testVRFY(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) expected = (252, b'Cannot VRFY user, but will accept message ' + \ b'and attempt delivery') self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected) self.assertEqual(smtp.verify('nobody@nowhere.com'), expected) smtp.quit() def testSecondHELO(self): # check that a second HELO returns a message that it's a duplicate # (this behavior is specific to smtpd.SMTPChannel) smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.helo() expected = (503, b'Duplicate HELO/EHLO') self.assertEqual(smtp.helo(), expected) smtp.quit() def testHELP(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \ b'RCPT DATA RSET NOOP QUIT VRFY') smtp.quit() def testSend(self): # connect and send mail m = 'A test message' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.sendmail('John', 'Sally', m) # XXX(nnorwitz): this test is flaky and dies with a bad file descriptor # in asyncore. This sleep might help, but should really be fixed # properly by using an Event variable. time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END) self.assertEqual(self.output.getvalue(), mexpect) def testSendBinary(self): m = b'A test message' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.sendmail('John', 'Sally', m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END) self.assertEqual(self.output.getvalue(), mexpect) def testSendNeedingDotQuote(self): # Issue 12283 m = '.A test\n.mes.sage.' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.sendmail('John', 'Sally', m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END) self.assertEqual(self.output.getvalue(), mexpect) def testSendNullSender(self): m = 'A test message' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.sendmail('<>', 'Sally', m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END) self.assertEqual(self.output.getvalue(), mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: <>$", re.MULTILINE) self.assertRegex(debugout, sender) def testSendMessage(self): m = email.mime.text.MIMEText('A test message') smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m, from_addr='John', to_addrs='Sally') # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # Add the X-Peer header that DebuggingServer adds m['X-Peer'] = socket.gethostbyname('localhost') mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(self.output.getvalue(), mexpect) def testSendMessageWithAddresses(self): m = email.mime.text.MIMEText('A test message') m['From'] = 'foo@bar.com' m['To'] = 'John' m['CC'] = 'Sally, Fred' m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() # make sure the Bcc header is still in the message. self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" ' '<warped@silly.walks.com>') self.client_evt.set() self.serv_evt.wait() self.output.flush() # Add the X-Peer header that DebuggingServer adds m['X-Peer'] = socket.gethostbyname('localhost') # The Bcc header should not be transmitted. del m['Bcc'] mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(self.output.getvalue(), mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: foo@bar.com$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('John', 'Sally', 'Fred', 'root@localhost', 'warped@silly.walks.com'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertRegex(debugout, to_addr) def testSendMessageWithSomeAddresses(self): # Make sure nothing breaks if not all of the three 'to' headers exist m = email.mime.text.MIMEText('A test message') m['From'] = 'foo@bar.com' m['To'] = 'John, Dinsdale' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # Add the X-Peer header that DebuggingServer adds m['X-Peer'] = socket.gethostbyname('localhost') mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(self.output.getvalue(), mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: foo@bar.com$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('John', 'Dinsdale'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertRegex(debugout, to_addr) def testSendMessageWithSpecifiedAddresses(self): # Make sure addresses specified in call override those in message. m = email.mime.text.MIMEText('A test message') m['From'] = 'foo@bar.com' m['To'] = 'John, Dinsdale' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net') # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # Add the X-Peer header that DebuggingServer adds m['X-Peer'] = socket.gethostbyname('localhost') mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(self.output.getvalue(), mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: joe@example.com$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('John', 'Dinsdale'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertNotRegex(debugout, to_addr) recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE) self.assertRegex(debugout, recip) def testSendMessageWithMultipleFrom(self): # Sender overrides To m = email.mime.text.MIMEText('A test message') m['From'] = 'Bernard, Bianca' m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com' m['To'] = 'John, Dinsdale' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # Add the X-Peer header that DebuggingServer adds m['X-Peer'] = socket.gethostbyname('localhost') mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(self.output.getvalue(), mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('John', 'Dinsdale'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertRegex(debugout, to_addr) def testSendMessageResent(self): m = email.mime.text.MIMEText('A test message') m['From'] = 'foo@bar.com' m['To'] = 'John' m['CC'] = 'Sally, Fred' m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>' m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000' m['Resent-From'] = 'holy@grail.net' m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff' m['Resent-Bcc'] = 'doe@losthope.net' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) smtp.send_message(m) # XXX (see comment in testSend) time.sleep(0.01) smtp.quit() self.client_evt.set() self.serv_evt.wait() self.output.flush() # The Resent-Bcc headers are deleted before serialization. del m['Bcc'] del m['Resent-Bcc'] # Add the X-Peer header that DebuggingServer adds m['X-Peer'] = socket.gethostbyname('localhost') mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END) self.assertEqual(self.output.getvalue(), mexpect) debugout = smtpd.DEBUGSTREAM.getvalue() sender = re.compile("^sender: holy@grail.net$", re.MULTILINE) self.assertRegex(debugout, sender) for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'): to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr), re.MULTILINE) self.assertRegex(debugout, to_addr) def testSendMessageMultipleResentRaises(self): m = email.mime.text.MIMEText('A test message') m['From'] = 'foo@bar.com' m['To'] = 'John' m['CC'] = 'Sally, Fred' m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>' m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000' m['Resent-From'] = 'holy@grail.net' m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff' m['Resent-Bcc'] = 'doe@losthope.net' m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000' m['Resent-To'] = 'holy@grail.net' m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff' smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3) with self.assertRaises(ValueError): smtp.send_message(m) smtp.close() class NonConnectingTests(unittest.TestCase): def testNotConnected(self): # Test various operations on an unconnected SMTP object that # should raise exceptions (at present the attempt in SMTP.send # to reference the nonexistent 'sock' attribute of the SMTP object # causes an AttributeError) smtp = smtplib.SMTP() self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo) self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, 'test msg') def testNonnumericPort(self): # check that non-numeric port raises OSError self.assertRaises(OSError, smtplib.SMTP, "localhost", "bogus") self.assertRaises(OSError, smtplib.SMTP, "localhost:bogus") # test response of client to a non-successful HELO message @unittest.skipUnless(threading, 'Threading required for this test.') class BadHELOServerTests(unittest.TestCase): def setUp(self): smtplib.socket = mock_socket mock_socket.reply_with(b"199 no hello for you!") self.old_stdout = sys.stdout self.output = io.StringIO() sys.stdout = self.output self.port = 25 def tearDown(self): smtplib.socket = socket sys.stdout = self.old_stdout def testFailingHELO(self): self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP, HOST, self.port, 'localhost', 3) sim_users = {'Mr.A@somewhere.com':'John A', 'Ms.B@xn--fo-fka.com':'Sally B', 'Mrs.C@somewhereesle.com':'Ruth C', } sim_auth = ('Mr.A@somewhere.com', 'somepassword') sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn' 'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=') sim_auth_credentials = { 'login': 'TXIuQUBzb21ld2hlcmUuY29t', 'plain': 'AE1yLkFAc29tZXdoZXJlLmNvbQBzb21lcGFzc3dvcmQ=', 'cram-md5': ('TXIUQUBZB21LD2HLCMUUY29TIDG4OWQ0MJ' 'KWZGQ4ODNMNDA4NTGXMDRLZWMYZJDMODG1'), } sim_auth_login_password = 'C29TZXBHC3N3B3JK' sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'], 'list-2':['Ms.B@xn--fo-fka.com',], } # Simulated SMTP channel & server class SimSMTPChannel(smtpd.SMTPChannel): quit_response = None mail_response = None rcpt_response = None data_response = None rcpt_count = 0 rset_count = 0 def __init__(self, extra_features, *args, **kw): self._extrafeatures = ''.join( [ "250-{0}\r\n".format(x) for x in extra_features ]) super(SimSMTPChannel, self).__init__(*args, **kw) def smtp_EHLO(self, arg): resp = ('250-testhost\r\n' '250-EXPN\r\n' '250-SIZE 20000000\r\n' '250-STARTTLS\r\n' '250-DELIVERBY\r\n') resp = resp + self._extrafeatures + '250 HELP' self.push(resp) self.seen_greeting = arg self.extended_smtp = True def smtp_VRFY(self, arg): # For max compatibility smtplib should be sending the raw address. if arg in sim_users: self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg))) else: self.push('550 No such user: %s' % arg) def smtp_EXPN(self, arg): list_name = arg.lower() if list_name in sim_lists: user_list = sim_lists[list_name] for n, user_email in enumerate(user_list): quoted_addr = smtplib.quoteaddr(user_email) if n < len(user_list) - 1: self.push('250-%s %s' % (sim_users[user_email], quoted_addr)) else: self.push('250 %s %s' % (sim_users[user_email], quoted_addr)) else: self.push('550 No access for you!') def smtp_AUTH(self, arg): if arg.strip().lower()=='cram-md5': self.push('334 {}'.format(sim_cram_md5_challenge)) return mech, auth = arg.split() mech = mech.lower() if mech not in sim_auth_credentials: self.push('504 auth type unimplemented') return if mech == 'plain' and auth==sim_auth_credentials['plain']: self.push('235 plain auth ok') elif mech=='login' and auth==sim_auth_credentials['login']: self.push('334 Password:') else: self.push('550 No access for you!') def smtp_QUIT(self, arg): if self.quit_response is None: super(SimSMTPChannel, self).smtp_QUIT(arg) else: self.push(self.quit_response) self.close_when_done() def smtp_MAIL(self, arg): if self.mail_response is None: super().smtp_MAIL(arg) else: self.push(self.mail_response) def smtp_RCPT(self, arg): if self.rcpt_response is None: super().smtp_RCPT(arg) return self.rcpt_count += 1 self.push(self.rcpt_response[self.rcpt_count-1]) def smtp_RSET(self, arg): self.rset_count += 1 super().smtp_RSET(arg) def smtp_DATA(self, arg): if self.data_response is None: super().smtp_DATA(arg) else: self.push(self.data_response) def handle_error(self): raise class SimSMTPServer(smtpd.SMTPServer): channel_class = SimSMTPChannel def __init__(self, *args, **kw): self._extra_features = [] smtpd.SMTPServer.__init__(self, *args, **kw) def handle_accepted(self, conn, addr): self._SMTPchannel = self.channel_class( self._extra_features, self, conn, addr) def process_message(self, peer, mailfrom, rcpttos, data): pass def add_feature(self, feature): self._extra_features.append(feature) def handle_error(self): raise # Test various SMTP & ESMTP commands/behaviors that require a simulated server # (i.e., something with more features than DebuggingServer) @unittest.skipUnless(threading, 'Threading required for this test.') class SMTPSimTests(unittest.TestCase): def setUp(self): self.real_getfqdn = socket.getfqdn socket.getfqdn = mock_socket.getfqdn self.serv_evt = threading.Event() self.client_evt = threading.Event() # Pick a random unused port by passing 0 for the port number self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1)) # Keep a note of what port was assigned self.port = self.serv.socket.getsockname()[1] serv_args = (self.serv, self.serv_evt, self.client_evt) self.thread = threading.Thread(target=debugging_server, args=serv_args) self.thread.start() # wait until server thread has assigned a port number self.serv_evt.wait() self.serv_evt.clear() def tearDown(self): socket.getfqdn = self.real_getfqdn # indicate that the client is finished self.client_evt.set() # wait for the server thread to terminate self.serv_evt.wait() self.thread.join() def testBasic(self): # smoke test smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.quit() def testEHLO(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) # no features should be present before the EHLO self.assertEqual(smtp.esmtp_features, {}) # features expected from the test server expected_features = {'expn':'', 'size': '20000000', 'starttls': '', 'deliverby': '', 'help': '', } smtp.ehlo() self.assertEqual(smtp.esmtp_features, expected_features) for k in expected_features: self.assertTrue(smtp.has_extn(k)) self.assertFalse(smtp.has_extn('unsupported-feature')) smtp.quit() def testVRFY(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) for email, name in sim_users.items(): expected_known = (250, bytes('%s %s' % (name, smtplib.quoteaddr(email)), "ascii")) self.assertEqual(smtp.vrfy(email), expected_known) u = 'nobody@nowhere.com' expected_unknown = (550, ('No such user: %s' % u).encode('ascii')) self.assertEqual(smtp.vrfy(u), expected_unknown) smtp.quit() def testEXPN(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) for listname, members in sim_lists.items(): users = [] for m in members: users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m))) expected_known = (250, bytes('\n'.join(users), "ascii")) self.assertEqual(smtp.expn(listname), expected_known) u = 'PSU-Members-List' expected_unknown = (550, b'No access for you!') self.assertEqual(smtp.expn(u), expected_unknown) smtp.quit() def testAUTH_PLAIN(self): self.serv.add_feature("AUTH PLAIN") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) expected_auth_ok = (235, b'plain auth ok') self.assertEqual(smtp.login(sim_auth[0], sim_auth[1]), expected_auth_ok) smtp.close() # SimSMTPChannel doesn't fully support LOGIN or CRAM-MD5 auth because they # require a synchronous read to obtain the credentials...so instead smtpd # sees the credential sent by smtplib's login method as an unknown command, # which results in smtplib raising an auth error. Fortunately the error # message contains the encoded credential, so we can partially check that it # was generated correctly (partially, because the 'word' is uppercased in # the error message). def testAUTH_LOGIN(self): self.serv.add_feature("AUTH LOGIN") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) try: smtp.login(sim_auth[0], sim_auth[1]) except smtplib.SMTPAuthenticationError as err: self.assertIn(sim_auth_login_password, str(err)) smtp.close() def testAUTH_CRAM_MD5(self): self.serv.add_feature("AUTH CRAM-MD5") smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) try: smtp.login(sim_auth[0], sim_auth[1]) except smtplib.SMTPAuthenticationError as err: self.assertIn(sim_auth_credentials['cram-md5'], str(err)) smtp.close() def test_with_statement(self): with smtplib.SMTP(HOST, self.port) as smtp: code, message = smtp.noop() self.assertEqual(code, 250) self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo') with smtplib.SMTP(HOST, self.port) as smtp: smtp.close() self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo') def test_with_statement_QUIT_failure(self): with self.assertRaises(smtplib.SMTPResponseException) as error: with smtplib.SMTP(HOST, self.port) as smtp: smtp.noop() self.serv._SMTPchannel.quit_response = '421 QUIT FAILED' self.assertEqual(error.exception.smtp_code, 421) self.assertEqual(error.exception.smtp_error, b'QUIT FAILED') #TODO: add tests for correct AUTH method fallback now that the #test infrastructure can support it. # Issue 5713: make sure close, not rset, is called if we get a 421 error def test_421_from_mail_cmd(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.noop() self.serv._SMTPchannel.mail_response = '421 closing connection' with self.assertRaises(smtplib.SMTPSenderRefused): smtp.sendmail('John', 'Sally', 'test message') self.assertIsNone(smtp.sock) self.assertEqual(self.serv._SMTPchannel.rset_count, 0) def test_421_from_rcpt_cmd(self): smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.noop() self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing'] with self.assertRaises(smtplib.SMTPRecipientsRefused) as r: smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message') self.assertIsNone(smtp.sock) self.assertEqual(self.serv._SMTPchannel.rset_count, 0) self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')}) def test_421_from_data_cmd(self): class MySimSMTPChannel(SimSMTPChannel): def found_terminator(self): if self.smtp_state == self.DATA: self.push('421 closing') else: super().found_terminator() self.serv.channel_class = MySimSMTPChannel smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15) smtp.noop() with self.assertRaises(smtplib.SMTPDataError): smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message') self.assertIsNone(smtp.sock) self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0) @support.reap_threads def test_main(verbose=None): support.run_unittest(GeneralTests, DebuggingServerTests, NonConnectingTests, BadHELOServerTests, SMTPSimTests) if __name__ == '__main__': test_main()
falskICD.py
import enum from datetime import datetime from flask import Flask, Response, render_template, request, jsonify from flask_socketio import SocketIO, emit, send import airsim import numpy as np import math import time import sys from threading import Thread import json app = Flask(__name__,instance_relative_config=True) # Config from filepath in env app.config.from_pyfile('config.cfg') print("read config file") unitypot = int(app.config["SESSION_COOKIE_PATH"]) print(unitypot) print("complit read config file") # import flask # class Application(flask.Flask): # def __init__(self, name): # print(name) # super(Application, self).__init__(name) # # self.config.from_object('mypackage.config') # self.config.from_envvar('APP_CONFIG', silent=True) # def run(self, *args, **kwargs): # print(args) # # kwargs.setdefault('host', self.config['HOST']) # # kwargs.setdefault('port', self.config['PORT']) # # kwargs.setdefault('debug', self.config['DEBUG']) # return super(Application, self).run(*args, **kwargs) # app = Application(__name__) app = Flask(__name__) socketio = SocketIO(app, ping_timeout=100, ping_interval=100) hot_point_ned_coordinate = [] way_point_ned_coordinate = [] way_point_status = -1 is_armed = False air_sim = None #unityDronePort = 41451 initialize_height = 0 posts = [{ 'author': "yigal", 'title': "1", 'content': "First post content", }, { 'author': "pigal", 'title': "2", 'content': "Second post content", }] @app.route('/') @app.route('/home') def home(): """Renders the home page.""" return render_template('index.html', posts=posts, title='Contact', year=datetime.now().year, message='Your contact page.') @app.route('/SomeFunction') def SomeFunction(): print('In SomeFunction') return "Nothing" @app.route('/button_press') def button_press(): print('In SomeFunction') return "Nothing" @app.route('/form', methods=['GET', 'POST']) def form(): data = request.get_json() return "Nothing" @app.route('/addRegion', methods=['POST']) def addRegion(): # return (request.form['projectFilePath']) return "Nothing" # missing # ========================================================================== # @app.route('/ICD/', methods=['GET', 'POST']) def ICD(): render_template('index.html') # Takeoff # ========================================================================== # @app.route('/takeoff', methods=['GET', 'POST']) def takeoff(): if request.method == "POST": data = request.get_json() print("request") operation = data['operationalAlt'] msg = "missing Alt operand" if operation: import sys sys.path.insert(1, '../icd_multirotor') #print(unityDronePort) initializeHeight() thread = Thread(target=takeoff_operation, kwargs={'value': request.args.get('value', operation)}) thread.start() respons = {"success": True, "message": ""} return jsonify(respons) else: print(msg) respons = {"success": False, "message": msg} return jsonify(respons) def takeoff_operation(value): import takeoff from takeoff import Takeoff _task = Takeoff(value,unitypot) _task.start() # Land # ========================================================================== # @app.route('/land', methods=['GET', 'POST']) def land(): if request.method == "POST": import sys sys.path.insert(1, '../icd_multirotor') thread = Thread(target=land_operation) thread.start() respons = {"success": True, "message": ""} return jsonify(respons) def land_operation(): import land from land import Land _task = Land(unitypot) time.sleep(1) _task.start() global is_armed is_armed = False # HotPoint # # Body: # { "latitude": 32.8004, # "longitude": 35.05148, # "altitude":22, # "radius":20, # "angular_speed": 5, # "is_clockwise": 0, # "start_point": 0, # "yaw_mode": 2 # } # ========================================================================== # @app.route('/hotPoint/upload', methods=['GET', 'POST']) def hotPoint(): if request.method == "POST": data = request.get_json() print("request") msg = "data is missing !" if data: import sys sys.path.insert(1, '../icd_multirotor') coordinates = [] coordinates = [data['latitude'],data['longitude'],data['altitude']] ## get lon\lat global hot_point_ned_coordinate ned_coordinates = geo_to_ned(coordinates) global initialize_height z1 = data['altitude'] z1 = initialize_height - z1 print("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF") print(data['latitude']) print(data['longitude']) print(data['altitude']) hot_point_ned_coordinate = [ned_coordinates[0],ned_coordinates[1],-(z1)] ##TODO:: hieght respons = {"success": True, "message": ""} return jsonify(respons) else: print(msg) respons = {"success": False, "message": msg} return jsonify(respons) # hotPoint Action # # Body: # { # "action": 0/1/2/3 # } # ========================================================================== # @app.route('/hotPoint/action', methods=['GET', 'POST']) def hotPointAction(): if request.method == "POST": data = request.get_json() msg = "action data is missing !" if data: import sys sys.path.insert(1, '../icd_multirotor') action = data['action'] if (action == 0): thread = Thread(target=hotpoint_action_operation) thread.start() respons = {"success": True, "message": ""} return jsonify(respons) else: print(msg) respons = {"success": False, "message": msg} return jsonify(respons) def hotpoint_action_operation(): import hotPoint from hotPoint import HotPoint ##global param.... _task = HotPoint(hot_point_ned_coordinate[0], hot_point_ned_coordinate[1], hot_point_ned_coordinate[2],unitypot) _task.start() # WayPoints uploadComplex # # { # "action_on_finish": 0, # "points": [ # { # "latitude": 32.922820, # "longitude": 35.28677496, # "altitude": 6, # "velocity": 10, # "yaw": 90 # }, # { # "latitude": 32.922020, # "longitude": 35.28707496, # "altitude": 6, # "velocity": 10, # "yaw": 90 # }, # { # "latitude": 32.921820, # "longitude": 35.28777496, # "altitude": 6, # "velocity": 10, # "yaw": 90 # } # ] # } # ========================================================================== # @app.route('/wayPoint/uploadComplex', methods=['GET', 'POST']) def wayPoints(): if request.method == "POST": data = request.get_json() print("request") msg = "data is missing !" if data: import sys sys.path.insert(1, '../icd_multirotor') points = data['points'] global initialize_height print("!!!!!!!!!!!!!") print(initialize_height) path = [] array_length = len(points) for i in range(array_length): point = points[i] #{X,Y,Z} x = point['latitude'] y = point['longitude'] z = point['altitude'] # add the const value {20} z = initialize_height - z print(z) geo_point = [] geo_point = [x,y,z] print(geo_point) ned_coordinate = geo_to_ned(geo_point) airSimPoint = airsim.Vector3r(ned_coordinate[0],ned_coordinate[1], z) path.append(airSimPoint) global way_point_ned_coordinate way_point_ned_coordinate = path respons = {"success": True, "message": ""} return jsonify(respons) else: print(msg) respons = {"success": False, "message": msg} return jsonify(respons) # WayPoints upload # # { # "latitude0": 32.908424, # "longitude0": 35.293166, # "latitude1": 32.908424, # "longitude1": 35.293166, # "altitude": 30 # } # ========================================================================== # @app.route('/wayPoint/upload', methods=['GET', 'POST']) def wayPointUpload(): if request.method == "POST": data = request.get_json() print("request") msg = "data is missing !" if data: import sys sys.path.insert(1, '../icd_multirotor') path = [] x1 = data['latitude0'] y1 = data['longitude0'] z1 = data['altitude'] # they send cons value {20} x2 = data['latitude1'] y2 = data['longitude1'] global initialize_height z1 = initialize_height - z1 geo_point1 = [] geo_point1 = [x1,y1,z1] ned_coordinate1 = geo_to_ned(geo_point1) airSimPoint1 = airsim.Vector3r(ned_coordinate1[0],ned_coordinate1[1],z1) path.append(airSimPoint1) geo_point2 = [] geo_point2 = [x2,y2,z1] ned_coordinate2 = geo_to_ned(geo_point2) airSimPoint2 = airsim.Vector3r(ned_coordinate2[0],ned_coordinate2[1],z1) path.append(airSimPoint2) global way_point_ned_coordinate way_point_ned_coordinate = path respons = {"success": True, "message": ""} return jsonify(respons) else: print(msg) respons = {"success": False, "message": msg} return jsonify(respons) # wayPoint Action # # Body: # { # "action": 0/1/2/3 # } # ========================================================================== # @app.route('/wayPoint/action', methods=['GET', 'POST']) def wayPointAction(): if request.method == "POST": data = request.get_json() msg = "action data is missing !" if data: import sys sys.path.insert(1, '../icd_multirotor') action = data['action'] global way_point_status if (action == 0): way_point_status = 0 print(way_point_ned_coordinate) thread = Thread(target=waypoint_action_operation) thread.start() elif (action == 1): way_point_status = 1 elif (action == 2): way_point_status = 2 elif (action == 3): way_point_status = 3 respons = {"success": True, "message": ""} return jsonify(respons) else: print(msg) respons = {"success": False, "message": msg} return jsonify(respons) def waypoint_action_operation(): import wayPoints from wayPoints import WayPoints global way_point_status print(way_point_ned_coordinate) _task = WayPoints(way_point_ned_coordinate,12,unitypot) _task.start() way_point_status = 1 # position_set # # { # "x": 20, # "y": 20, # "z": 25, # "tolerance": 2 #} # ========================================================================== # @app.route('/position_set', methods=['GET', 'POST']) def positionSet(): if request.method == "POST": data = request.get_json() print("request") x = data['x'] y = data['y'] z = data['z'] ned_coordinates = [x,y,z] msg = "NED is missing" if ned_coordinates: import sys sys.path.insert(1, '../icd_multirotor') thread = Thread(target=position_set_operation, kwargs={'value': request.args.get('value', ned_coordinates)}) thread.start() respons = {"success": True, "message": ""} return jsonify(respons) else: print(msg) respons = {"success": False, "message": msg} return jsonify(respons) def position_set_operation(value): import positionSet from positionSet import PositionSet _task = PositionSet(value[0], value[1], value[2],unitypot) _task.start() #### # gimbal_set # # { # "yaw": 0, # "pitch": 0, # "roll": 0 # } #} # ========================================================================== # @app.route('/gimbal/set', methods=['GET', 'POST']) def gimbalSet(): if request.method == "POST": data = request.get_json() print("request") yaw = data['yaw'] pitch = data['pitch'] roll = data['roll'] rotation = [yaw,pitch,roll] msg = "rotation is missing" if rotation: import sys sys.path.insert(1, '../icd_multirotor') thread = Thread(target=gimbal_set_operation, kwargs={'value': request.args.get('value', rotation)}) thread.start() respons = {"success": True, "message": ""} return jsonify(respons) else: print(msg) respons = {"success": False, "message": msg} return jsonify(respons) def gimbal_set_operation(value): import gimbal_set from gimbal_set import Gimbal _task = Gimbal(value[0], value[1], value[2],unitypot) _task.start() #### # WebSocket -> start ! # ========================================================================== # @app.route('/api/WebSocket/start', methods=['GET']) def WebSocketStart(): if request.method == "GET": print("GET /WebSocket/start") time.sleep(1) global air_sim print("pre load airsim") air_sim = init_airsim() print("post load airsim") while True: data = load_airsim(air_sim) print(data) socketio.emit('my', data, broadcast=True) time.sleep(1) respons = {"success": True, "message": "WebSocket start"} return jsonify(respons) # initialize the client. # ========================================================================== # def init_airsim(): #global unityDronePort print(unitypot) airsim_client = airsim.MultirotorClient('',unitypot,3600) #airsim_client = airsim.MultirotorClient() airsim_client.confirmConnection() airsim_client.enableApiControl(True) airsim_client.armDisarm(True) return airsim_client # initialize the client. # ========================================================================== # def initializeHeight(): global air_sim global initialize_height global is_armed rpcinfo = air_sim.getMultirotorState() kinematics_estimated = rpcinfo.kinematics_estimated initialize_height = kinematics_estimated.position.z_val is_armed = True # get current kinematics_estimated. # ========================================================================== # def get_kinematics_estimated(): global air_sim rpcinfo = air_sim.getMultirotorState() kinematics_estimated = rpcinfo.kinematics_estimated return (kinematics_estimated) # get current gps_location # ========================================================================== # def get_gps_location(): global air_sim rpcinfo = air_sim.getMultirotorState() gps_location = rpcinfo.gps_location return (gps_location) # load telmetry # ========================================================================== # def load_airsim(airsim_client): rpcinfo = airsim_client.getMultirotorState() gps_location = rpcinfo.gps_location kinematics_estimated = rpcinfo.kinematics_estimated pitch, roll, yaw = airsim.to_eularian_angles( rpcinfo.kinematics_estimated.orientation) homepoint = airsim_client.getHomeGeoPoint() global way_point_status global initialize_height if(initialize_height is not 0): height_above_takeoff = -(kinematics_estimated.position.z_val) + initialize_height else: height_above_takeoff = initialize_height telemetry = { "battery_state": { "percentage": 70.04 }, "distance_from_home": 1561.4, "gimbal": { "roll": roll, "pitch": pitch, "yaw": yaw }, "height_above_takeoff": height_above_takeoff, "gps_health": 5, "heading": math.degrees(yaw), "velocity": { "x": kinematics_estimated.linear_velocity.x_val, "y": kinematics_estimated.linear_velocity.y_val, "z": kinematics_estimated.linear_velocity.z_val }, "gps_position": { "latitude": gps_location.latitude, "altitude": gps_location.altitude, "longitude": gps_location.longitude }, "last_change_time": rpcinfo.timestamp, "lastHome": { "latitude": homepoint.latitude, "operationalAlt": homepoint.altitude, "longitude": homepoint.longitude }, "owner": "droneService", "state": { "armed": is_armed }, "wayPoints": { "status": way_point_status }, "keepAlive": rpcinfo.timestamp } return telemetry # WebSocket -> end ! # ========================================================================== # @app.route('/api/WebSocket/end', methods=['GET']) def WebSocketEnd(): if request.method == "GET": print("GET /WebSocket/end") import sys sys.path.insert(1, '../webSocket') import wsClientLoop from wsClientLoop import WebSocketClient wbs = WebSocketClient() result = wbs.end() respons = {"success": True, "message": "WebSocket end"} return jsonify(respons) # ========================================================================== # # ############################# Socket.io ################################## # # ========================================================================== # @socketio.on('connect') def WSocketConnect(): print('connect') @socketio.on('disconnect') def WSocketDisconnect(): print('disconnect') @socketio.on('keepAlive') def WSocketHandleKeepAlive(json): # print('received keepAlive: ' + str(json)) pass @socketio.on('my') def handle_my_custom_event(json): print('received my: ' + str(json)) @socketio.on('force_send') def handle_force_send(json): print('received force_send: ' + str(json)) @socketio.on('force_stop') def handle_force_stop(json): print('received force_stop: ' + str(json)) ############# Socket.io ############# @socketio.on('connect') def WSocketConnect(): print('connect') @socketio.on('disconnect') def WSocketDisconnect(): print('disconnect') @socketio.on('keepAlive') def WSocketHandleKeepAlive(json): # print('received keepAlive: ' + str(json)) pass @socketio.on('my') def handle_my_custom_event(json): print('received my: ' + str(json)) @socketio.on('force_send') def handle_force_send(json): print('received force_send: ' + str(json)) @socketio.on('force_stop') def handle_force_stop(json): print('received force_stop: ' + str(json)) # geo_to_ned # # geodetic coordinate to local (unity units) def geo_to_ned(gps_location): global air_sim home_point = air_sim.getHomeGeoPoint() d_lat = gps_location[0] - home_point.latitude d_lon = gps_location[1] - home_point.longitude if (gps_location[2] > home_point.altitude): d_alt = gps_location[2] - home_point.altitude else: d_alt = home_point.altitude - gps_location[2] radian = np.deg2rad(d_lat) x= radian * 6378137.0 # 6378137.0f = earth_radius y = np.deg2rad(d_lon) * 6378137.0 * math.cos( np.deg2rad(gps_location[1])) ned_coordinates = [] ned_coordinates = [x,y,d_alt] print(ned_coordinates[0]) print(ned_coordinates[1]) print(ned_coordinates[2]) return (ned_coordinates) # if __name__ == '__main__': # flaskPortStr = "/flaskPortArg=" # unityDronePortStr = "/unityDronePort=" # for i in range(1, len(sys.argv)): # if sys.argv[i].startswith(flaskPortStr): # flaskp = sys.argv[i][len(flaskPortStr):] # app.config['flaskPortStr'] = flaskp # #flaskport = sys.argv[i][len(flaskPortStr):] # print(flaskp) # elif sys.argv[i].startswith(unityDronePortStr): # #unityDronePort = int(sys.argv[i][len(unityDronePortStr):]) # uport = int(sys.argv[i][len(unityDronePortStr):]) # app.config['unityDronePort'] = uport # print(unityDronePort) # print(sys.argv[i]) # app.run(host='0.0.0.0', port=flaskp, debug=True) # if __name__ == '__main__': # app.run(host='0.0.0.0',debug=True) # socketio.run(app)
base.py
# Copyright (c) 2008-2010 Aldo Cortesi # Copyright (c) 2011 Florian Mounier # Copyright (c) 2011 Kenji_Takahashi # Copyright (c) 2011 Paul Colomiets # Copyright (c) 2012 roger # Copyright (c) 2012 Craig Barnes # Copyright (c) 2012-2015 Tycho Andersen # Copyright (c) 2013 dequis # Copyright (c) 2013 David R. Andersen # Copyright (c) 2013 Tao Sauvage # Copyright (c) 2014-2015 Sean Vig # Copyright (c) 2014 Justin Bronder # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from libqtile.log_utils import logger from .. import command, bar, configurable, drawer, confreader import six import subprocess import threading import warnings # Each widget class must define which bar orientation(s) it supports by setting # these bits in an 'orientations' class attribute. Simply having the attribute # inherited by superclasses is discouraged, because if a superclass that was # only supporting one orientation, adds support for the other, its subclasses # will have to be adapted too, in general. ORIENTATION_NONE is only added for # completeness' sake. # +------------------------+--------------------+--------------------+ # | Widget bits | Horizontal bar | Vertical bar | # +========================+====================+====================+ # | ORIENTATION_NONE | ConfigError raised | ConfigError raised | # +------------------------+--------------------+--------------------+ # | ORIENTATION_HORIZONTAL | Widget displayed | ConfigError raised | # | | horizontally | | # +------------------------+--------------------+--------------------+ # | ORIENTATION_VERTICAL | ConfigError raised | Widget displayed | # | | | vertically | # +------------------------+--------------------+--------------------+ # | ORIENTATION_BOTH | Widget displayed | Widget displayed | # | | horizontally | vertically | # +------------------------+--------------------+--------------------+ class _Orientations(int): def __new__(cls, value, doc): return super(_Orientations, cls).__new__(cls, value) def __init__(self, value, doc): self.doc = doc def __str__(self): return self.doc def __repr__(self): return self.doc ORIENTATION_NONE = _Orientations(0, 'none') ORIENTATION_HORIZONTAL = _Orientations(1, 'horizontal only') ORIENTATION_VERTICAL = _Orientations(2, 'vertical only') ORIENTATION_BOTH = _Orientations(3, 'horizontal and vertical') class _Widget(command.CommandObject, configurable.Configurable): """Base Widget class If length is set to the special value `bar.STRETCH`, the bar itself will set the length to the maximum remaining space, after all other widgets have been configured. Only ONE widget per bar can have the `bar.STRETCH` length set. In horizontal bars, 'length' corresponds to the width of the widget; in vertical bars, it corresponds to the widget's height. The offsetx and offsety attributes are set by the Bar after all widgets have been configured. """ orientations = ORIENTATION_BOTH offsetx = None offsety = None defaults = [("background", None, "Widget background color")] def __init__(self, length, **config): """ length: bar.STRETCH, bar.CALCULATED, or a specified length. """ command.CommandObject.__init__(self) self.name = self.__class__.__name__.lower() if "name" in config: self.name = config["name"] configurable.Configurable.__init__(self, **config) self.add_defaults(_Widget.defaults) if length in (bar.CALCULATED, bar.STRETCH): self.length_type = length self.length = 0 else: assert isinstance(length, six.integer_types) self.length_type = bar.STATIC self.length = length self.configured = False @property def length(self): if self.length_type == bar.CALCULATED: return int(self.calculate_length()) return self._length @length.setter def length(self, value): self._length = value @property def width(self): if self.bar.horizontal: return self.length return self.bar.size @property def height(self): if self.bar.horizontal: return self.bar.size return self.length @property def offset(self): if self.bar.horizontal: return self.offsetx return self.offsety @property def win(self): return self.bar.window.window # Do not start the name with "test", or nosetests will try to test it # directly (prepend an underscore instead) def _test_orientation_compatibility(self, horizontal): if horizontal: if not self.orientations & ORIENTATION_HORIZONTAL: raise confreader.ConfigError( "The widget is not compatible with the orientation of the " "bar." ) elif not self.orientations & ORIENTATION_VERTICAL: raise confreader.ConfigError( "The widget is not compatible with the orientation of the bar." ) def timer_setup(self): """ This is called exactly once, after the widget has been configured and timers are available to be set up. """ pass def _configure(self, qtile, bar): self.qtile = qtile self.bar = bar self.drawer = drawer.Drawer( qtile, self.win.wid, self.bar.width, self.bar.height ) if not self.configured: self.configured = True self.qtile.call_soon(self.timer_setup) def finalize(self): if hasattr(self, 'layout') and self.layout: self.layout.finalize() self.drawer.finalize() def clear(self): self.drawer.set_source_rgb(self.bar.background) self.drawer.fillrect(self.offsetx, self.offsety, self.width, self.height) def info(self): return dict( name=self.name, offset=self.offset, length=self.length, width=self.width, height=self.height, ) def button_press(self, x, y, button): pass def button_release(self, x, y, button): pass def get(self, q, name): """ Utility function for quick retrieval of a widget by name. """ w = q.widgetMap.get(name) if not w: raise command.CommandError("No such widget: %s" % name) return w def _items(self, name): if name == "bar": return (True, None) def _select(self, name, sel): if name == "bar": return self.bar def cmd_info(self): """ Info for this object. """ return self.info() def draw(self): """ Method that draws the widget. You may call this explicitly to redraw the widget, but only if the length of the widget hasn't changed. If it has, you must call bar.draw instead. """ raise NotImplementedError def calculate_length(self): """ Must be implemented if the widget can take CALCULATED for length. It must return the width of the widget if it's installed in a horizontal bar; it must return the height of the widget if it's installed in a vertical bar. Usually you will test the orientation of the bar with 'self.bar.horizontal'. """ raise NotImplementedError def timeout_add(self, seconds, method, method_args=()): """ This method calls either ``.call_later`` with given arguments. """ return self.qtile.call_later(seconds, self._wrapper, method, *method_args) def call_process(self, command, **kwargs): """ This method uses `subprocess.check_output` to run the given command and return the string from stdout, which is decoded when using Python 3. """ output = subprocess.check_output(command, **kwargs) if six.PY3: output = output.decode() return output def _wrapper(self, method, *method_args): try: method(*method_args) except: logger.exception('got exception from widget timer') UNSPECIFIED = bar.Obj("UNSPECIFIED") class _TextBox(_Widget): """ Base class for widgets that are just boxes containing text. """ orientations = ORIENTATION_HORIZONTAL defaults = [ ("font", "Arial", "Default font"), ("fontsize", None, "Font size. Calculated if None."), ("padding", None, "Padding. Calculated if None."), ("foreground", "ffffff", "Foreground colour"), ( "fontshadow", None, "font shadow color, default is None(no shadow)" ), ("markup", False, "Whether or not to use pango markup"), ] def __init__(self, text=" ", width=bar.CALCULATED, **config): self.layout = None _Widget.__init__(self, width, **config) self.text = text self.add_defaults(_TextBox.defaults) @property def text(self): return self._text @text.setter def text(self, value): assert value is None or isinstance(value, six.string_types) self._text = value if self.layout: self.layout.text = value @property def font(self): return self._font @property def foreground(self): return self._foreground @foreground.setter def foreground(self, fg): self._foreground = fg if self.layout: self.layout.colour = fg @font.setter def font(self, value): self._font = value if self.layout: self.layout.font = value @property def fontshadow(self): return self._fontshadow @fontshadow.setter def fontshadow(self, value): self._fontshadow = value if self.layout: self.layout.font_shadow = value @property def actual_padding(self): if self.padding is None: return self.fontsize / 2 else: return self.padding def _configure(self, qtile, bar): _Widget._configure(self, qtile, bar) if self.fontsize is None: self.fontsize = self.bar.height - self.bar.height / 5 self.layout = self.drawer.textlayout( self.text, self.foreground, self.font, self.fontsize, self.fontshadow, markup=self.markup, ) def calculate_length(self): if self.text: return min( self.layout.width, self.bar.width ) + self.actual_padding * 2 else: return 0 def draw(self): # if the bar hasn't placed us yet if self.offsetx is None: return self.drawer.clear(self.background or self.bar.background) self.layout.draw( self.actual_padding or 0, int(self.bar.height / 2.0 - self.layout.height / 2.0) + 1 ) self.drawer.draw(offsetx=self.offsetx, width=self.width) def cmd_set_font(self, font=UNSPECIFIED, fontsize=UNSPECIFIED, fontshadow=UNSPECIFIED): """ Change the font used by this widget. If font is None, the current font is used. """ if font is not UNSPECIFIED: self.font = font if fontsize is not UNSPECIFIED: self.fontsize = fontsize if fontshadow is not UNSPECIFIED: self.fontshadow = fontshadow self.bar.draw() def info(self): d = _Widget.info(self) d['foreground'] = self.foreground d['text'] = self.text return d class InLoopPollText(_TextBox): """ A common interface for polling some 'fast' information, munging it, and rendering the result in a text box. You probably want to use ThreadedPollText instead. ('fast' here means that this runs /in/ the event loop, so don't block! If you want to run something nontrivial, use ThreadedPollWidget.) """ defaults = [ ("update_interval", 600, "Update interval in seconds, if none, the " "widget updates whenever the event loop is idle."), ] def __init__(self, **config): _TextBox.__init__(self, 'N/A', width=bar.CALCULATED, **config) self.add_defaults(InLoopPollText.defaults) def timer_setup(self): update_interval = self.tick() # If self.update_interval is defined and .tick() returns None, re-call # after self.update_interval if update_interval is None and self.update_interval is not None: self.timeout_add(self.update_interval, self.timer_setup) # We can change the update interval by returning something from .tick() elif update_interval: self.timeout_add(update_interval, self.timer_setup) # If update_interval is False, we won't re-call def _configure(self, qtile, bar): should_tick = self.configured _TextBox._configure(self, qtile, bar) # Update when we are being re-configured. if should_tick: self.tick() def button_press(self, x, y, button): self.tick() def poll(self): return 'N/A' def tick(self): text = self.poll() self.update(text) def update(self, text): old_width = self.layout.width if self.text != text: self.text = text # If our width hasn't changed, we just draw ourselves. Otherwise, # we draw the whole bar. if self.layout.width == old_width: self.draw() else: self.bar.draw() class ThreadedPollText(InLoopPollText): """ A common interface for polling some REST URL, munging the data, and rendering the result in a text box. """ def __init__(self, **config): InLoopPollText.__init__(self, **config) def tick(self): def worker(): try: text = self.poll() self.qtile.call_soon_threadsafe(self.update, text) except: logger.exception("problem polling to update widget %s", self.name) # TODO: There are nice asyncio constructs for this sort of thing, I # think... threading.Thread(target=worker).start() class ThreadPoolText(_TextBox): """ A common interface for wrapping blocking events which when triggered will update a textbox. This is an alternative to the ThreadedPollText class which differs by being push based rather than pull. The poll method is intended to wrap a blocking function which may take quite a while to return anything. It will be executed as a future and should return updated text when completed. It may also return None to disable any further updates. param: text - Initial text to display. """ defaults = [ ("update_interval", None, "Update interval in seconds, if none, the " "widget updates whenever it's done'."), ] def __init__(self, text, **config): super(ThreadPoolText, self).__init__(text, width=bar.CALCULATED, **config) self.add_defaults(ThreadPoolText.defaults) def timer_setup(self): def on_done(future): try: result = future.result() except Exception: result = None logger.exception('poll() raised exceptions, not rescheduling') if result is not None: try: self.update(result) if self.update_interval is not None: self.timeout_add(self.update_interval, self.timer_setup) else: self.timer_setup() except Exception: logger.exception('Failed to reschedule.') else: logger.warning('poll() returned None, not rescheduling') future = self.qtile.run_in_executor(self.poll) future.add_done_callback(on_done) def update(self, text): old_width = self.layout.width if self.text == text: return self.text = text if self.layout.width == old_width: self.draw() else: self.bar.draw() def poll(self): pass # these two classes below look SUSPICIOUSLY similar class PaddingMixin(object): """Mixin that provides padding(_x|_y|) To use it, subclass and add this to __init__: self.add_defaults(base.PaddingMixin.defaults) """ defaults = [ ("padding", 3, "Padding inside the box"), ("padding_x", None, "X Padding. Overrides 'padding' if set"), ("padding_y", None, "Y Padding. Overrides 'padding' if set"), ] padding_x = configurable.ExtraFallback('padding_x', 'padding') padding_y = configurable.ExtraFallback('padding_y', 'padding') class MarginMixin(object): """Mixin that provides margin(_x|_y|) To use it, subclass and add this to __init__: self.add_defaults(base.MarginMixin.defaults) """ defaults = [ ("margin", 3, "Margin inside the box"), ("margin_x", None, "X Margin. Overrides 'margin' if set"), ("margin_y", None, "Y Margin. Overrides 'margin' if set"), ] margin_x = configurable.ExtraFallback('margin_x', 'margin') margin_y = configurable.ExtraFallback('margin_y', 'margin') def deprecated(msg): warnings.warn(msg, DeprecationWarning)
bulldog_vision_2.py
#!/usr/bin/env python #!coding=utf-8 import rospy import numpy as np import PIL.Image as pilimage import actionlib from sensor_msgs.msg import CompressedImage from sensor_msgs.msg import Image from std_msgs.msg import Float64 from cv_bridge import CvBridge, CvBridgeError import cv2 import time from yolo import YOLO from sensor_msgs.msg import Joy from std_msgs.msg import String from geometry_msgs.msg import Twist from tf.transformations import * from math import pi from geometry_msgs.msg import PoseStamped from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal from geometry_msgs.msg import PoseWithCovarianceStamped,PoseStamped,Twist from std_msgs.msg import Header from sensor_msgs.msg import JointState from threading import Thread import threading global RV2_motor1_joint yolo = YOLO() class image_converter: def __init__(self): # 创建cv_bridge,声明图像的发布者和订阅者 global delta_x # location_pub = rospy.Publisher("cv_bridge_location", Float64, queue_size=1) self.bridge = CvBridge() self.image_sub = rospy.Subscriber("/mid_camera/color/image_raw/compressed", CompressedImage, self.callback) def callback(self,data): # 使用cv_bridge将ROS的图像数据转换成OpenCV的图像格式 global delta_x, label_list try: cv_image = self.bridge.compressed_imgmsg_to_cv2(data, "bgr8") except CvBridgeError as e: print('e') #BGR转RGB格式 cv_image = cv2.cvtColor(cv_image,cv2.COLOR_BGR2RGB) #cv格式转image cv_image = pilimage.fromarray(np.uint8(cv_image)) #进行yolo语音识别,提取框位置信息与识别物体信息 cv_image, bbox_list, label_list = yolo.detect_image(cv_image) #image转cv格式 cv_image = np.array(cv_image) #RGB在转BGR格式 cv_image = cv2.cvtColor(cv_image,cv2.COLOR_RGB2BGR) #显示识别后cv图像 cv2.imshow("Image window", cv_image) cv2.waitKey(3) if type(label_list) != int: # 没检测到物体的时候,bbox_list和label_list为1 num_of_obj = len(label_list) #print('num_of_object:', num_of_obj) #确定跟踪物体与图像中点的相对坐标 for i in range(num_of_obj): if 'banana' in label_list[i]: object_center = (bbox_list[i][1]+bbox_list[i][3])*0.5 delta_x = 320-object_center # print(delta_x) return delta_x # location_pub.publish(delta_x) #motor1_move() elif 'banana' in label_list[i]: print("yyy") pass else: print('yolo未识别到任何物体') pass def judge_bed(): global delta_x image_converter() def motor1_move(): time.sleep(1) global command_vel_pub_m, delta_x, RV2_motor1_joint # rospy.Subscriber('/joint_states_motor',JointState,RV2_motorjointstate_callback) while not rospy.is_shutdown(): print(delta_x) #中间位判断 if -1.5 < RV2_motor1_joint < 1.5: #左转判断条件 if delta_x > 200: now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] motor_vel.velocity = [0.48] print (motor_vel) command_vel_pub_m.publish(motor_vel) time.sleep(2) elif 80 < delta_x < 200: now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] motor_vel.velocity = [(delta_x - 40) * 0.003] print (motor_vel) command_vel_pub_m.publish(motor_vel) time.sleep(2) #右转判断条件 elif delta_x < -200: print ("b") now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] motor_vel.velocity = [-0.48] command_vel_pub_m.publish(motor_vel) time.sleep(2) elif -200 < delta_x < -80: print ("b") now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] motor_vel.velocity = [(delta_x + 40) * 0.003] command_vel_pub_m.publish(motor_vel) time.sleep(2) #停止判断条件 elif -80 < delta_x < 80: time.sleep(2) now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] motor_vel.velocity = [0] command_vel_pub_m.publish(motor_vel) #左限位判断条件 if 1.5 < RV2_motor1_joint: #左转判断条件 if delta_x > 80: print("a") now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] motor_vel.velocity = [0] print (motor_vel) command_vel_pub_m.publish(motor_vel) time.sleep(2) #右转判断条件 elif delta_x < -200: print ("b") now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] motor_vel.velocity = [-0.48] command_vel_pub_m.publish(motor_vel) time.sleep(2) elif -200 < delta_x < -80: print ("b") now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] motor_vel.velocity = [(delta_x + 40) * 0.003] command_vel_pub_m.publish(motor_vel) time.sleep(2) #停止判断条件 elif -80 < delta_x < 80: now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] motor_vel.velocity = [0] command_vel_pub_m.publish(motor_vel) time.sleep(0.5) #右限位判断条件 if RV2_motor1_joint < -1.5: #左转判断条件 if delta_x > 200: now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] motor_vel.velocity = [0.48] print (motor_vel) command_vel_pub_m.publish(motor_vel) time.sleep(2) elif 80 < delta_x < 200: now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] motor_vel.velocity = [(delta_x - 40) * 0.003] print (motor_vel) command_vel_pub_m.publish(motor_vel) time.sleep(2) #右转判断条件 elif delta_x < -80: print ("b") now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] motor_vel.velocity = [0] command_vel_pub_m.publish(motor_vel) time.sleep(2) #停止判断条件 elif -80 < delta_x < 80: now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] motor_vel.velocity = [0] command_vel_pub_m.publish(motor_vel) time.sleep(0.5) else: now = rospy.Time.now() motor_vel = JointState() motor_vel.header = Header() motor_vel.header.stamp = now motor_vel.header.frame_id = "bulldog" motor_vel.name = ["motor1"] motor_vel.velocity = [0] command_vel_pub_m.publish(motor_vel) time.sleep(0.5) time.sleep(1) #for object in vision_database_dict: # 再将opencv格式额数据转换成ros image格式的数据发布 # try: # #self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8")) # location_pub.publish(location_pub) # except CvBridgeError as e: # print('e') def RV2_motorjointstate_callback(data): # 定义RV2 motor数据全局变量,进行赋值 global RV2_motor1_joint RV2_motor1_joint = data.position[0] print(RV2_motor1_joint) def active_cb(extra): rospy.loginfo("Goal pose being processed") def feedback_cb(feedback): rospy.loginfo("Current location: "+str(feedback)) def done_cb(status, result): if status == 3: rospy.loginfo("Goal reached") if status == 2 or status == 8: rospy.loginfo("Goal cancelled") if status == 4: rospy.loginfo("Goal aborted") def base_move(): #rospy.init_node('listener', anonymous=True) rospy.Subscriber('/amcl_pose',PoseWithCovarianceStamped,current_pose) time.sleep(2) cmd() rospy.spin() def current_pose(msg): global posex,posey posex = msg.pose.pose.position.x posey = msg.pose.pose.position.y print (posex,posey) def cmd(): global label_list flag_k = 0 while not rospy.is_shutdown(): if type(label_list) != int: # 没检测到物体的时候,bbox_list和label_list为1 num_of_obj = len(label_list) #print('num_of_object:', num_of_obj) #确定跟踪物体与图像中点的相对坐标 for i in range(num_of_obj): if 'banana' in label_list[i] and flag_k == 0: #if 'banana' in label_list[i]: print ('a') navclient = actionlib.SimpleActionClient('move_base',MoveBaseAction) navclient.wait_for_server() goal = MoveBaseGoal() goal.target_pose.header.frame_id = "map" goal.target_pose.header.stamp = rospy.Time.now() goal.target_pose.pose.position.x = posex+0.3 goal.target_pose.pose.position.y = posey+0.1 goal.target_pose.pose.position.z = 0.0 goal.target_pose.pose.orientation.x = 0.0 goal.target_pose.pose.orientation.y = 0.0 goal.target_pose.pose.orientation.z = 0.0 goal.target_pose.pose.orientation.w = 1.0 flag_k = flag_k + 1 navclient.send_goal(goal,done_cb,active_cb, feedback_cb) finished = navclient.wait_for_result() if not finished: rospy.logerr("Action server not available!") else: rospy.loginfo ( navclient.get_result()) time.sleep(10) if 'banana' not in label_list[i] and flag_k >= 1: Goal() time.sleep(1) def Goal(): navclient = actionlib.SimpleActionClient('move_base',MoveBaseAction) navclient.wait_for_server() goal = MoveBaseGoal() goal.target_pose.header.frame_id = "map" goal.target_pose.header.stamp = rospy.Time.now() goal.target_pose.pose.position.x = -0.2 goal.target_pose.pose.position.y = 0.0 goal.target_pose.pose.position.z = 0.0 goal.target_pose.pose.orientation.x = 0.0 goal.target_pose.pose.orientation.y = 0.0 goal.target_pose.pose.orientation.z = 0.0 goal.target_pose.pose.orientation.w = 1.0 navclient.send_goal(goal,done_cb,active_cb, feedback_cb) finished = navclient.wait_for_result() if not finished: rospy.logerr("Action server not available!") else: rospy.loginfo ( navclient.get_result()) exit() if __name__ == '__main__': try: # 初始化ros节点 rospy.init_node("vision") rospy.loginfo("Starting cv_bridge_test node") global command_vel_pub_m, delta_x #创建发布者 command_vel_pub_m = rospy.Publisher('/motor_control/input/velocity', JointState, queue_size = 100, latch=True) #订阅躯干点击位置信息 rospy.Subscriber('/joint_states_motor',JointState,RV2_motorjointstate_callback) #定义yolo识别子程序 t_judge_bed = threading.Thread(target = judge_bed) t_judge_bed.start() time.sleep(2) # 定义躯干运动子进程 t_motor1 = threading.Thread(target = motor1_move) t_motor1.start() time.sleep(2) t_base = threading.Thread(target = base_move) t_base.start() rospy.spin() except KeyboardInterrupt: print("Shutting down cv_bridge_test node.") cv2.destroyAllWindows()
test_itemdb.py
""" The main usage tests. """ import os import time import random import sqlite3 import tempfile import threading from contextlib import closing from pytest import raises from testutils import run_tests from itemdb import ItemDB def get_fresh_filename(): filename = os.path.join(tempfile.gettempdir(), "test.db") if os.path.isfile(filename): os.remove(filename) return filename def test_init_read(): # Empty database, zero tables db = ItemDB(":memory:") assert db.get_table_names() == [] # no tables with raises(KeyError): db.select("foo", "key is NULL") with raises(KeyError): db.select_all("foo") with raises(KeyError): db.count_all("foo") # Two tables db = ItemDB(":memory:").ensure_table("foo", "key").ensure_table("bar") assert db.count_all("foo") == 0 assert db.count_all("bar") == 0 def test_index_fails(): # Invalid index/table names - not str db = ItemDB(":memory:") for name in [(), 4, b"", [], {}]: with raises(TypeError): db.ensure_table("items", name) # Invalid index/table names - not an identifier db = ItemDB(":memory:") for name in ["foo bar", "foo-bar", "33", "foo!"]: with raises(ValueError): db.ensure_table("items", name) # Reserved for name in ["!_ob", "_ob"]: with raises(IndexError): db.ensure_table("items", name) # Cannot add a unique key filename = get_fresh_filename() db = ItemDB(filename).ensure_table("foo", "meh") with closing(db): assert "foo" in db.get_table_names() with raises(IndexError): db = ItemDB(filename).ensure_table("foo", "!key") # Cannot use a normal key as a unique key filename = get_fresh_filename() db = ItemDB(filename).ensure_table("foo", "key") with closing(db): assert "foo" in db.get_table_names() with raises(IndexError): db = ItemDB(filename).ensure_table("foo", "!key") # Cannot use a unique key as a normal key filename = get_fresh_filename() db = ItemDB(filename).ensure_table("foo", "!key") with closing(db): assert "foo" in db.get_table_names() with raises(IndexError): db = ItemDB(filename).ensure_table("foo", "key") def test_init_write(): db = ItemDB(":memory:").ensure_table("items", "!id", "mt") with raises(IOError): # Put needs to be used under a context db.put("items", dict(id=1, mt=100)) with raises(KeyError): # Invalid table with db: db.put("foo", dict(id=1, mt=100)) with raises(TypeError): # Note a dict with db: db.put("items", "not a dict") with raises(IndexError): # id is required but missing with db: db.put("items", dict(mt=100)) with raises(IOError): # Cant enter twice with db: with db: pass with db: db.put("items", dict(id=1, mt=100)) db.put("items", dict(id=2, mt=100, value=42)) db.put("items", dict(id=3, value=42)) assert len(db.select_all("items")) == 3 assert db.count_all("items") == 3 assert len(db.get_table_names()) == 1 assert len(db.select("items", "mt == 100")) == 2 assert len(db.select("items", "mt is NULL")) == 1 assert db.count("items", "mt == 100") == 2 assert db.count("items", "mt is NULL") == 1 with raises(IndexError): # No index for value db.select("items", "value == 42") with raises(IndexError): # No index for value db.count("items", "value == 42") with raises(sqlite3.OperationalError): # Malformed SQL db.select("items", "id >>> 42") with raises(sqlite3.OperationalError): # Malformed SQL db.count("items", "id >>> 42") def test_multiple_unique_keys(): db = ItemDB(":memory:").ensure_table("items", "!id1", "!id2") with db: db.put_one("items", id1=1, id2=1, value=1) db.put_one("items", id1=1, id2=2, value=2) db.put_one("items", id1=2, id2=2, value=3) db.put_one("items", id1=2, id2=1, value=4) assert db.count_all("items") == 1 assert db.select_one("items", "id1 == 1") is None assert db.select_one("items", "id1 == 2")["value"] == 4 def test_missing_values1(): filename = get_fresh_filename() db = ItemDB(filename).ensure_table("items", "!id", "mt") # Keys that are not listed are NOT ignored with db: db.put("items", dict(id=1, mt=100)) db.put("items", dict(id=2, mt=100, value=6)) # assert db.select_all("items") == [dict(id=1, mt=100), dict(id=2, mt=100, value=6)] with raises(IndexError): # No index for value db.select("items", "value == 6") # When a column is added it gets NULL values in the db, and items stay as they are db = ItemDB(filename).ensure_table("items", "!id", "mt", "value") with db: db.put("items", dict(id=3, mt=100, value=41)) # db = ItemDB(filename).ensure_table("items", "!id", "mt", "value") assert db.select_all("items") == [ dict(id=1, mt=100), dict(id=2, mt=100, value=6), dict(id=3, mt=100, value=41), ] assert len(db.select("items", "value == 6")) == 1 assert len(db.select("items", "value > 0")) == 2 assert len(db.select("items", "value is NULL")) == 1 # When we don't specify a column, it still gets a value (not NULL) db = ItemDB(filename).ensure_table("items", "!id") with db: db.put("items", dict(id=5, mt=100, value=999)) assert len(db.select("items", "value == 999")) == 1 def test_missing_values2(): filename = get_fresh_filename() db = ItemDB(filename) db.ensure_table("items", "!id", "mt") # Keys that are not listed are NOT ignored with db: db.put("items", dict(id=1, mt=100)) db.put("items", dict(id=2, mt=100, value=6)) # assert db.select_all("items") == [dict(id=1, mt=100), dict(id=2, mt=100, value=6)] with raises(IndexError): # No index for value db.select("items", "value == 6") # When a column is added it gets NULL values in the db, and items stay as they are db.ensure_table("items", "value") with db: db.put("items", dict(id=3, mt=100, value=41)) # assert db.select_all("items") == [ dict(id=1, mt=100), dict(id=2, mt=100, value=6), dict(id=3, mt=100, value=41), ] assert len(db.select("items", "value == 6")) == 1 assert len(db.select("items", "value > 0")) == 2 assert len(db.select("items", "value is NULL")) == 1 # When we don't specify a column, it still gets a value (not NULL) db = ItemDB(filename) with db: db.put("items", dict(id=5, mt=100, value=999)) assert len(db.select("items", "value == 999")) == 1 def test_usage_items(): db = ItemDB(":memory:").ensure_table("items", "!id", "mt", "value") # Need id with raises(IndexError): with db: db.put("items", dict(mt=100, value=1)) # Add three items with db: db.put("items", dict(id=1, mt=100, value=1)) db.put("items", dict(id=2, mt=100, value=1)) db.put("items", dict(id=3, mt=100, value=1)) assert len(db.select_all("items")) == 3 assert len(db.select("items", "value == 1")) == 3 assert len(db.select("items", "value == 2")) == 0 # Update them, one using an older mt for item in [ dict(id=1, mt=99, value=2), # wont override dict(id=2, mt=100, value=2), # will override - mt's are equal dict(id=3, mt=101, value=2), # will override dict(id=4, mt=101, value=2), # new ]: with db: cur = db.select("items", "id == ?", item["id"]) if not cur or cur[0]["mt"] <= item["mt"]: db.put("items", item) assert len(db.select_all("items")) == 4 assert len(db.select("items", "value == 1")) == 1 assert len(db.select("items", "value == 2")) == 3 x = db.select_one("items", "id == ?", 3) assert x["mt"] == 101 db = ItemDB(":memory:").ensure_table("items", "!id", "mt", "value") x = db.select_one("items", "id == ?", 3) assert x is None def test_usage_settings(): db = ItemDB(":memory:").ensure_table("settings", "!id", "mt", "value") # Need id with raises(IndexError): with db: db.put("settings", dict(value="old", mt=100)) # Add three items with db: db.put("settings", dict(id="foo", value="old", mt=100)) db.put("settings", dict(id="bar", value="old", mt=100)) db.put("settings", dict(id="egg", value="old", mt=100)) assert len(db.select_all("settings")) == 3 assert len(db.select("settings", "mt > 100")) == 0 assert len(db.select("settings", "value == 'updated'")) == 0 # Update them, one using an older for item in [ dict(id="foo", value="updated", mt=99), dict(id="bar", value="updated", mt=100), # also updates dict(id="egg", value="updated", mt=101), dict(id="spam", value="updated", mt=101), # new ]: with db: cur = db.select("settings", "id == ?", item["id"]) if not cur or cur[0]["mt"] <= item["mt"]: db.put("settings", item) assert len(db.select_all("settings")) == 4 assert len(db.select("settings", "mt > 100")) == 2 assert len(db.select("settings", "value == 'updated'")) == 3 assert db.select_one("settings", "id=='egg'")["value"] == "updated" def test_multiple_items(): filename = get_fresh_filename() db = ItemDB(filename) db.ensure_table("items", "!id") assert len(db.select_all("items")) == 0 # Adding multiple with db: db.put("items", dict(id=1, mt=100), dict(id=2, mt=100)) assert len(db.select_all("items")) == 2 # Separate additions, one gets added # These few tests here are a remnant of when itemdb was different, but lets # not throw away precious testing code ... with db: db.put("items", dict(id=3, mt=100)) with raises(RuntimeError): with db: raise RuntimeError() assert set(x["id"] for x in db.select_all("items")) == {1, 2, 3} # Combined addition, none gets added with raises(RuntimeError): with db: db.put("items", dict(id=4, mt=100), dict(id=5)) raise RuntimeError() assert set(x["id"] for x in db.select_all("items")) == {1, 2, 3} # Combined addition, none gets changed with raises(RuntimeError): with db: db.put("items", dict(id=3, mt=102), dict(id=5)) raise RuntimeError() assert set(x["id"] for x in db.select_all("items")) == {1, 2, 3} x = db.select_all("items")[-1] assert x["id"] == 3 and x["mt"] == 100 # Upgrades work too db = ItemDB(filename) with db: db.put( "items", dict(id=1, mt=102), dict(id=1, mt=102), dict(id=2, mt=102), dict(id=3, mt=102), dict(id=4, mt=102), ) assert set(x["id"] for x in db.select_all("items")) == {1, 2, 3, 4} for x in db.select_all("items"): x["mt"] == 102 # Lets take it further with db: db.put("items", *(dict(id=i, mt=104) for i in range(99))) assert len(db.select_all("items")) == 99 def test_delete_items(): db = ItemDB(":memory:") db.ensure_table("persons", "!name") with db: db.put_one("persons", name="Jan", age=30) db.put_one("persons", name="Henk", age=42) db.put_one("persons", name="Bart", age=19) db.put_one("persons", name="Ivo", age=28) assert db.select_one("persons", "name == ?", "Bart") == {"name": "Bart", "age": 19} # Delete fails with raises(IOError): # Must be in a transaction! db.delete("persons", "name == ?", "Bart") with raises(IndexError): # No index for age with db: db.delete("persons", "age == 42") with raises(sqlite3.OperationalError): # Malformed SQL with db: db.delete("persons", "age >>> 42") with db: db.delete("persons", "name == ?", "Bart") assert db.count_all("persons") == 3 assert db.select_one("persons", "name == ?", "Bart") is None # And that transaction can be cancelled try: with db: db.delete("persons", "name > ''") raise RuntimeError() except RuntimeError: pass assert db.count_all("persons") == 3 # Just to show that without that raise, it would indeed clear the table with db: db.delete("persons", "name > ''") assert db.count_all("persons") == 0 def test_transactions1(): filename = get_fresh_filename() db = ItemDB(filename) db.ensure_table("items", "!id", "mt") # Add items the easy way with db: db.put_one("items", id=1, mt=100) db.put_one("items", id=2, mt=100) assert db.count_all("items") == 2 # Add more items and raise after with raises(RuntimeError): with db: db.put_one("items", id=3, mt=100) db.put_one("items", id=4, mt=100) raise RuntimeError("Transaction has been comitted") assert db.count_all("items") == 4 # Again, but now raise within transaction with raises(RuntimeError): with db: db.put_one("items", id=5, mt=100) db.put_one("items", id=6, mt=100) raise RuntimeError("Abort transaction!") assert db.count_all("items") == 4 def test_transactions2(): filename = get_fresh_filename() with ItemDB(filename).ensure_table("items", "!id") as db: db.put_one("items", id=3, value=10) # run transactions in threads while reading from other threads def run_slow_transaction1(): db = ItemDB(filename) with db: db.put_one("items", id=3, value=20) time.sleep(1.0) def run_fast_transaction2(): db = ItemDB(filename) time.sleep(0.1) # make sure that we're the waiting thread with db: db.put_one("items", id=3, value=30) time.sleep(0.2) def run_read(): db = ItemDB(filename) for i in range(30): time.sleep(0.05) item = db.select_one("items", "id == 3") read.append(item["value"]) read = [] threads = [ threading.Thread(target=run_slow_transaction1), threading.Thread(target=run_fast_transaction2), threading.Thread(target=run_read), ] for t in threads: t.start() for t in threads: t.join() assert len(read) == 30 assert 5 <= read.count(10) <= 22 assert 2 <= read.count(20) <= 6 assert read.count(30) >= 5 def test_transactions3(): # Test that a transaction really blocks filename = get_fresh_filename() def run_slow_transaction(): db = ItemDB(filename) with db: time.sleep(0.2) threads = [threading.Thread(target=run_slow_transaction) for i in range(3)] t0 = time.perf_counter() for t in threads: t.start() for t in threads: t.join() etime = time.perf_counter() - t0 print(etime) assert etime > 0.6 def test_database_race_conditions(): # This actually tests that a specific update scheme works with the # itemdb. It should. In a previous version, itemdb was specifically # designed for this syncing task. Now it's more general, but this # is still a good use-case. n_threads = 25 n_writes = 25 tracking = {} for i in range(1, 11): tracking[i] = [] # Create db and ensure it has tables filename = get_fresh_filename() ItemDB(filename).ensure_table("items", "!id") def push_a_bunch(): for i in range(n_writes): id = random.randint(1, 10) mt = random.randint(1000, 2000) tracking[id].append(mt) with ItemDB(filename) as db: x = db.select_one("items", "id == ?", id) if not x or x["mt"] <= mt: db.put_one("items", id=id, mt=mt) # Prepare, start, and join threads t0 = time.perf_counter() threads = [threading.Thread(target=push_a_bunch) for i in range(n_threads)] for t in threads: t.start() for t in threads: t.join() t1 = time.perf_counter() # Evaluate the result items = ItemDB(filename).select_all("items") print( f"{t1 - t0:0.2f} s for {n_threads * n_writes} writes saving {len(items)} items" ) assert len(items) == 10 # that's the number of id's # for item in items: id = item["id"] assert item["mt"] == max(tracking[id]) return items def test_threaded_access(): """This was an attempt to reproduce an error that turned out to be related to the discrepancy between os.path.getmtime and server_time. This test helped establish that it was not in itemdb. """ filename = get_fresh_filename() xx = [] def write_something(): db = ItemDB(filename).ensure_table("settings", "!id") with db: db.put("settings", dict(id="server_reset", value="42", mt=42)) db.close() return "wrote something" def read_something(): db = ItemDB(filename).ensure_table("settings", "!id") xx.extend(db.select_all("settings")) return "read something" t = threading.Thread(target=write_something) t.start() t.join() t = threading.Thread(target=read_something) t.start() t.join() assert len(xx) == 1 if __name__ == "__main__": run_tests(globals())
zip-cracker-gui.py
# Python Version 2.7.3 # File: zip-cracker-gui.py import zipfile import sys, os import tkMessageBox from Tkinter import * from tkFileDialog import askopenfilename from threading import Thread class Cracker: def __init__(self, master): #set up frame frame = Frame(master) frame.pack() #show title self.title = Label(frame, text = "ZIP Password Cracker") self.title.grid(row = 0, column = 0, columnspan = 4) #set up variables self.archFileMade = False self.dictFilename = "" #set up all widgets and their layout self.openZipLabel = Label(frame, text = "Select ZIP file:") self.openZipLabel.grid(row = 1, column = 0, columnspan = 3) self.openZipField = Text(frame, state = DISABLED, height = 1) self.openZipField.grid(row = 2, column = 0, columnspan = 3) self.openZipButton = Button(frame, text = "...", command = lambda: self.getFileName("zip")) self.openZipButton.grid(row = 2, column = 3) self.openDictLabel = Label(frame, text = "Select dictionary file:") self.openDictLabel.grid(row = 4, column = 0, columnspan = 3) self.openDictField = Text(frame, state = DISABLED, height = 1) self.openDictField.grid(row = 5, column = 0, columnspan = 3) self.openDictButton = Button(frame, text = "...", command = lambda: self.getFileName("dict")) self.openDictButton.grid(row = 5, column = 3) self.crackButton = Button(frame, text = "Crack", command = lambda: self.crackPass()) self.crackButton.grid(row = 7, column = 0, columnspan = 4) self.outputLabel = Label(frame, text = "Status:") self.outputLabel.grid(row = 9, column = 0, columnspan = 4) self.outputText = Text(frame, state = NORMAL, height = 1) self.outputText.insert(1.0, "Waiting for ZIP and Dictionary files.") self.outputText.config(state = DISABLED) self.outputText.grid(row = 10, column = 0, columnspan = 4) self.exitButton = Button(frame, text = "Exit", command = lambda: self.closeProgram()) self.exitButton.grid(row = 11, column = 0, columnspan = 4) def getFileName(self, filetype): error = False if filetype == "zip": archFilename = askopenfilename() #check if user actually picked a file if archFilename != "": #check if file exists and access is allowed if not os.path.isfile(archFilename): tkMessageBox.showinfo("File Error", archFilename + " does not exist.") error = True if not os.access(archFilename, os.R_OK): tkMessageBox.showinfo("File Error", archFilename + " access denied.") error = True error = self.checkIfZip(archFilename) #update data if no errors found if error == False: self.archFileMade = True self.openZipField.config(state = NORMAL) self.openZipField.delete(1.0, END) self.openZipField.insert(END, archFilename) self.openZipField.config(state = DISABLED) if filetype == "dict": dictFilename = askopenfilename() #check if user actually picked a file if dictFilename != "": #check if file exists and access is allowed if not os.path.isfile(dictFilename): tkMessageBox.showinfo("File Error", dictFilename + " does not exist.") error = True if not os.access(dictFilename, os.R_OK): tkMessageBox.showinfo("File Error", dictFilename + " access denied.") error = True #updata data if no errors found if error == False: self.dictFilename = dictFilename self.openDictField.config(state = NORMAL) self.openDictField.delete(1.0, END) self.openDictField.insert(END, dictFilename) self.openDictField.config(state = DISABLED) def checkIfZip(self, filename): #create file if zip, otherwise return error try: self.archFile = zipfile.ZipFile(filename) return False except zipfile.BadZipfile: tkMessageBox.showinfo("File Error", filename + " is corrupt or not a ZIP file.") return True except zipfile.LargeZipFile: tkMessageBox.showing("File Error", filename + " requires ZIP64, which is not enabled") return True except: tkMessageBox.showinfo("File Error", "An error occurred, please try again.") return True def crackPass(self): if self.archFileMade == False: tkMessageBox.showinfo("Error", "Please select a ZIP file.") elif self.dictFilename == "": tkMessageBox.showinfo("Error", "Please select a dictionary file.") else: #output status message self.outputText.config(state = NORMAL) self.outputText.delete(1.0, END) self.outputText.insert(END, "Cracking...") self.outputText.config(state = DISABLED) self.finished = False #open dict file dictFile = open(self.dictFilename, 'r') #start threads for line in dictFile.readlines(): #if finished, don't start new threads if self.finished == False: password = line.strip("\n") password = password.strip() t = Thread(target = self.extractFile, args = (self.archFile, password)) t.start() #if failed, change status message if self.finished == False: self.outputText.config(state = NORMAL) self.outputText.delete(1.0, END) self.outputText.insert(END, "Password not in dictionary.") self.outputText.config(state = DISABLED) def extractFile(self, archFile, password): #try extract file, if successful - print password that worked if self.finished == False: try: archFile.extractall(pwd = password) self.passwordFound(password) except: pass def passwordFound(self, password): self.finished = True self.outputText.config(state = NORMAL) self.outputText.delete(1.0, END) self.outputText.insert(END, "Password found: " + password) self.outputText.config(state = DISABLED) def closeProgram(self): global root root.destroy() #create Tk widget root = Tk() #set title root.title("ZIP Password Cracker") #create instance cracker = Cracker(root) #make sure root is killed on close root.protocol('WM_DELETE_WINDOW', cracker.closeProgram) #run event loop root.mainloop()
zjsfb_grabno_2020.py
#! /usr/bin/env python # -*- coding: utf-8 -*- # # Copyright: error.d # Date : 2018-02-24 # Update : 2020-05-02 # Create by: error.d<error.d@gmail.com> # import re import time import json import requests import threading import ipdb from lxml import etree from ying import * #from ding import * """ todolist: * 退出机制完善 """ ''' estr = etree._ElementStringResult eunicode = etree._ElementUnicodeResult estrtypes = [str, unicode, estr, eunicode] ''' done = False full_exit = False cur_oppointment_time = "" def is_login(html): for key in config['is_login_keys']: if html.find(key) >= 0: print("login failed: %s" % key) return False print("login success") return True def get_cookies(): return "%s=%s" % (config['session_key'], config['session_value']) def get_headers(update_headers): headers = {"Host": "zjsfbwxs.zwjk.com", "Connection": "keep-alive", "Upgrade-Insecure-Requests": "1", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "Accept-Language": "zh-cn", "Accept-Encoding": "gzip, deflate", "Cookie": get_cookies(), "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_5 like Mac OS X) AppleWebKit/604.5.6 (KHTML, like Gecko) Mobile/15D60 MicroMessenger/6.6.3 NetType/WIFI Language/zh_CN" } headers.update(update_headers) return headers def work_time_div_idx(work_time): pass def create_worktime_table(html): selector = etree.HTML(html.encode('utf-8')) elements = selector.xpath("//*[@class=\"baseli dateli\"]") if debug: print("create_worktime_table: ", elements, len(elements)) worktime_table = {} for idx, element in enumerate(elements): if idx is 0: continue worktime_table[idx] = [s.replace("\r\n", "").replace(" ", "") for s in element.itertext()] return worktime_table def day_work_time(worktime_table): # idx is relative to today return worktime_table[1] def last_work_time(worktime_table): return worktime_table[7] def find_doctor_block(html, doctor): blocks = map(lambda x: config["doctor_block_tag"] + x, html.split(config["doctor_block_tag"])) doctor_blocks = list(filter(lambda x: doctor in x, blocks)) if len(doctor_blocks) == 0: return None return doctor_blocks[0] def onclick_parse(onclick): onclick = onclick.replace("'", "") m = re.search(r"test\((.+?)\)", onclick) assert len(m.groups()) == 1, "onclick failed" action = list(map(lambda x: x.encode('utf8'), m.group(1).split(","))) return action def create_doctor_table(html, doctor): doctor_table = {'am': {}, 'pm': {}} block_html = find_doctor_block(html, doctor) if block_html is None: print("没有'%s'医生的信息" % doctor) return None elif debug: print("find_doctor_block: %s" % block_html) selector = etree.HTML(block_html) elements = [t for t in selector.xpath("//div[@class=\"baseli orderli\"]")] element_texts = [''.join([t.text for t in element]) for element in elements] if debug: print("element texts: %s" % element_texts) state = 0 idx = 0 for eidx, text in enumerate(element_texts): if eidx == 0: # 上午 state = 'am' idx = 1 continue if eidx == 8: # 下午 state = 'pm' idx = 1 continue if state == 0 or text == ' ': continue text = text.strip() action = None if text.isdigit(): onclick = elements[eidx].attrib['onclick'] action_list = onclick_parse(onclick) action = {"id": action_list[0], # a "ampm": action_list[1], # b "date": action_list[2], # c "week": action_list[3], # d "regfee": action_list[4], # e "deptType": action_list[5], # f "orderType": action_list[6], # g # dept_name = dept_name } doctor_table[state][idx] = {"count": text, "action": action} idx += 1 return doctor_table def doctor_gatecard(html, doctor): if debug: print("html: %s doctor: %s" % (html, doctor)) html = re.sub(r"\s+", " ", html) worktime_table = create_worktime_table(html) doctor_table = create_doctor_table(html, doctor) if doctor_table is None: return None if debug: print("worktime_table: %s" % worktime_table) print("doctor_table: %s" % doctor_table) gatecard = {} assert len(worktime_table) == 7 and \ len(doctor_table['am']) == 7 and \ len(doctor_table['pm']) == 7, "格式错误" for time in worktime_table: gatecard[time] = { "time": worktime_table[time], "work": {"am": doctor_table['am'][time], "pm": doctor_table['pm'][time]} } return gatecard def submit_oppointment(doctor_id, dept_type, numid_list, order_type): global done, full_exit def _start_oppointment(numid): global done, full_exit if done or full_exit: return numid = str(doctor_id) + '-' + str(nid) print("oppointment numid: %s dept_type: %s id: %s" % \ (numid, dept_type, doctor_id)) _r = http_request("action_oppointment", numid=numid, deptType=dept_type, id=doctor_id, orderType=order_type) print("numid:%s oppointment result: %s" % (numid, _r)) try: result = json.loads(_r) except Exception as e: print("submit_oppointment loads failed : %s" % e) return if result["R"] == 200: if 'url' in result and len(result['url']) > 1: print("发现需要社保支持..") print("[http request] GET url:%s" % result['url']) headers = get_headers({}) request = requests.get(result['url'], headers=headers) if debug: html = request.content.decode('utf-8') print(html) print("oppointment SUCCESS: %s" % result["res"]) full_exit = True return if config["oppointment_fail_action"] == 2: full_exit = True return print("submit_oppointment params: doctor_id=%s dept_type=%s numid_list=%s" % \ (doctor_id, dept_type, numid_list)) clock = 0.05 for nid in numid_list: if config["use_thread"] is True: threading.Thread(target=_start_oppointment, args=(nid,), name='thread-' + str(nid)).start() else: _start_oppointment(nid) if clock > 0: time.sleep(clock) clock -= 0.01 full_exit = True def http_request(action, **kwargs): update_headers = {} action_info = config["action_url_map"][action] if "referer" in action_info: update_headers["Referer"] = action_info["referer"] headers = get_headers(update_headers) method = action_info["method"].upper() url = action_info["url"] params = {} if "params" in action_info: for key, value in action_info["params"].items(): params[key] = value params.update(kwargs) assert None not in params.values(), "request need params has None" if method == "GET": if len(params) > 0: url += '&'.join(["{0}={1}".format(k, v) for k,v in params.items()]) print("[http request] GET url:%s" % url) request = requests.get(url, headers=headers) elif method == "POST": if debug: print("post url:%s params:%s" % (url, params)) print("[http request] POST url:%s" % url) request = requests.post(url, headers=headers, data=params) cookies = request.cookies html = request.content.decode('utf-8') if debug: print("request headers: %s" % request.request.headers) if method == "POST": print("request body: %s" % request.request.body) print("response headers: %s" % request.headers) if not request.ok or not is_login(html): print("request action:%s failed.." % action) if debug: print("response: %s" % html) return None return html def is_availability(gatecard): global done count = gatecard['work'][config['visit_time']]['count'] if count == '-': print('oppointment: no availability(-)') done = True return False elif count == '满': print('oppointment: doctor full(满)') done = True return False elif count == "停诊": print('oppointment: doctor stop(停诊)') done = True return False return True def get_numid_list(doctor_id): ampm = 1 if config['visit_time'] == 'am' else 2 html = http_request("action_schedule", id=doctor_id, ampm=ampm) selector = etree.HTML(html.encode('utf-8')) elements = selector.xpath("//*[@class=\"order_menu_list\"]") onclick_list = filter(lambda x: x != None, [element.attrib.get('onclick', None) for element in elements]) numid_list = [] for onclick in onclick_list: action_list = onclick_parse(onclick) numid_list.append(int(action_list[1])) return numid_list def oppointment(gatecard): if not is_availability(gatecard): return try: count = gatecard['work'][config['visit_time']]['count'] dept_type = gatecard['work'][config['visit_time']]['action']['deptType'] doctor_id = int(gatecard['work'][config['visit_time']]['action']['id']) order_type = gatecard['work'][config['visit_time']]['action']['orderType'] except Exception as e: print("oppointment params failed: %s" % e) return if config["numid_list_real"] is True: numid_list = get_numid_list(doctor_id) else: numid_list = [i for i in range(1, int(count))] submit_oppointment(doctor_id, dept_type, numid_list, order_type) def run(): global cur_oppointment_time, done print("预约 -- 医生: %s 时间: %s-%s " % (config['doctor_name'], config['oppointment_time'], config['visit_time'])) while not (done is True and cur_oppointment_time == config["oppointment_time"]) and \ full_exit is False: time.sleep(0.01) done = False action = "action_chankezhuanjia" html = http_request(action) if html is None: continue gatecard = doctor_gatecard(html, config['doctor_name']) if gatecard is None: print('no doctor...') if config["check_doctor_name"] == False: return else: continue if debug: print('gatecard: %s' % gatecard) cur_gatecard = gatecard[config["gatecard_idx"]] cur_oppointment_time = cur_gatecard["time"][1] if cur_oppointment_time != config["oppointment_time"]: print("time failed cur_oppointment_time: %s config oppointment_time: %s" % (cur_oppointment_time, config["oppointment_time"])) print("auto search time: %s" % config["oppointment_time"]) has_time = False for (idx, item) in enumerate(gatecard.items()): item = item[1] item_time = item['time'] if item_time[1] == config["oppointment_time"]: has_time = True cur_oppointment_time = item_time[1] cur_gatecard = gatecard[idx+1] print("search success %s %s" % (item_time[0], item_time[1])) break if not has_time: print("search %s failed" % config["oppointment_time"]) continue oppointment(cur_gatecard) if __name__ == '__main__': run()
groups.py
import tkinter as tk from config.colours import LIGHT_RED, DARK_RED, GLOBAL_SIDEBAR_BG, HEADING_COLOR, BG_COLOR, COUNTRY_SEARCH_BG, COUNTRYBOX_BG_COLOR, COUNTRIES_TEXT_COLOR, COUNTRIES_DETAILS_BG from config.fonts import TITLE_FONT, HEADING_FONT, SEARCHBAR_FONT, COUNTRY_NOT_FOUND_FONT from config.paths import titlebar_icon_route, global_heading_image_route, icon_route, searchbar_icon_route, ascending_cases_icon_route, descending_cases_icon_route, a_to_z_alphabetical_icon_route, z_to_a_alphabetical_icon_route, x_icon_route from ui.standard import ImageLabel, StatLabel, CountrySearch, ScrollableFrame, ImageButton from utils.data_manager import DataManager import threading REFRESH_TIME = 5*60*1000 class AppTitleBar(tk.Frame): def __init__(self, app, *args, **kwargs): tk.Frame.__init__(self, app, *args, **kwargs) self['bg'] = LIGHT_RED self.pack(fill="both") tk.Label(self, text="CoviStats", font=TITLE_FONT, bg=LIGHT_RED, fg=DARK_RED).pack(side="left", padx=(10,3)) ImageLabel(self, image=tk.PhotoImage(file=titlebar_icon_route), bg=LIGHT_RED).pack(side="left") class AppGlobalSidebar(tk.Frame): def __init__(self, app, *args, **kwargs): tk.Frame.__init__(self, app.content_frame, *args, **kwargs) self['bg'] = GLOBAL_SIDEBAR_BG self.grid(row=0, column=0, rowspan=2, sticky="nws", ipadx=10) app.content_frame.grid_rowconfigure(1, weight=1) #heading group global_heading_frame = tk.Frame(self, bg=GLOBAL_SIDEBAR_BG) global_heading_frame.pack() tk.Label(global_heading_frame, text="Global", font=HEADING_FONT, fg=HEADING_COLOR, bg=GLOBAL_SIDEBAR_BG).pack(side="left", padx=(0,5)) ImageLabel(global_heading_frame, image=tk.PhotoImage(file=global_heading_image_route), bg=GLOBAL_SIDEBAR_BG).pack(side="left") #stats group global_data = DataManager.get_global_data() self.stat_groups = [ StatLabel(self, global_data.cases, "cases"), StatLabel(self, global_data.active, "active"), StatLabel(self, global_data.recoveries, "recoveries"), StatLabel(self, global_data.deaths, "deaths") ] self.after(REFRESH_TIME, self.update) def update(self): global_data = DataManager.get_global_data() self.stat_groups[0].update(global_data.cases) self.stat_groups[1].update(global_data.active) self.stat_groups[2].update(global_data.recoveries) self.stat_groups[3].update(global_data.deaths) self.after(REFRESH_TIME, self.update) class AppCountrySearchGroup(tk.Frame): def __init__(self, app, *args, **kwargs): tk.Frame.__init__(self, app.content_frame, *args, **kwargs) self.app = app self['bg'] = COUNTRY_SEARCH_BG self.grid(row=0, column=1, sticky="nesw", ipadx=10) app.content_frame.grid_columnconfigure(1, weight=2) app.content_frame.grid_rowconfigure(0, weight=2) self.selected_sort = (lambda k: k.name, False) #heading group heading_frame = tk.Frame(self, bg=BG_COLOR) heading_frame.pack() tk.Label(heading_frame, text="Countries", justify="left", anchor="w", font=HEADING_FONT, fg=HEADING_COLOR, bg=BG_COLOR).pack(padx=(7, 0), side="left", anchor="w") #searchbar + countrybox self.searchbar_frame = tk.Frame(self, bg=BG_COLOR) self.searchbar_frame.pack(fill="x") ImageLabel(self.searchbar_frame, image=tk.PhotoImage(file=searchbar_icon_route), bg=BG_COLOR).pack(side="left", padx=(12, 5)) self.search_term = tk.StringVar() self.search_term.trace_add('write', self.search_callback) self.searchbar = CountrySearch(self.searchbar_frame, font=SEARCHBAR_FONT, textvariable=self.search_term) self.searchbar.pack(expand=True, fill="x", padx=(5, 12)) countries_box_frame = tk.Frame(self) countries_box_frame.pack(fill="both", expand=1, padx=12, pady=12) self.countries_box = ScrollableFrame(countries_box_frame) #load countries self.update_data() #sort buttons sort_buttons_frame = tk.Frame(heading_frame, bg=BG_COLOR) sort_buttons_frame.pack(side="right", padx=(15, 7), anchor="e") ascending_cases_sort = ImageButton(sort_buttons_frame, bg=BG_COLOR, image=tk.PhotoImage(file=ascending_cases_icon_route), command=lambda: self.sort_data("cases", False)) ascending_cases_sort.pack(side="left", padx=3) descending_cases_sort = ImageButton(sort_buttons_frame, bg=BG_COLOR, image=tk.PhotoImage(file=descending_cases_icon_route), command=lambda: self.sort_data("cases", True)) descending_cases_sort.pack(side="left", padx=3) a_to_z_name_sort = ImageButton(sort_buttons_frame, bg=BG_COLOR, image=tk.PhotoImage(file=a_to_z_alphabetical_icon_route), command=lambda: self.sort_data("name", False)) a_to_z_name_sort.pack(side="left", padx=3) z_to_a_name_sort = ImageButton(sort_buttons_frame, bg=BG_COLOR, image=tk.PhotoImage(file=z_to_a_alphabetical_icon_route), command=lambda: self.sort_data("name", True)) z_to_a_name_sort.pack(side="left", padx=3) def update_data(self): self.countries_data = DataManager.get_countries_data(self.countries_box.scrollable_frame, self.app) key, reverse = self.selected_sort self.countries_data.sort(key=key, reverse=reverse) text = self.search_term.get() text = "" if text == " " else text self.update_country_box(text) self.after(REFRESH_TIME, self.update_data) def search_callback(self, _, __, ___): text = self.search_term.get() if self.search_term.get() == " ": self.search_term.set("") else: self.update_country_box(text) def update_country_box(self, text, thread=False): try: self.countries_box._reset() if text == "Search": text = "" countries = list(filter(lambda c: text.lower() in c.name.lower(), self.countries_data)) for country in countries: if thread: t = threading.Thread(target=lambda: country.frame.pack(anchor="w", fill="x", expand=True)) t.setDaemon(True) t.start() else: country.frame.pack(anchor="w", fill="x", expand=True) if len(countries) == 0: tk.Label(self.countries_box.scrollable_frame, bg=COUNTRYBOX_BG_COLOR, fg=COUNTRIES_TEXT_COLOR, font=COUNTRY_NOT_FOUND_FONT, text=f"no countries with \"{text}\" found").pack(anchor="w", fill="x", expand=True) except AttributeError: pass def sort_data(self, key, reverse): text = self.search_term.get() sorting_key = { "cases": lambda k: k.cases, "name": lambda k: k.name }.get(key) self.selected_sort = (sorting_key, reverse) self.countries_data.sort(key=sorting_key, reverse=reverse) self.update_country_box(text) class CountryDetailsGroup(tk.Frame): def __init__(self, app, country_name, *args, **kwargs): tk.Frame.__init__(self, app, *args, **kwargs) self.country_name = country_name self['bg'] = COUNTRIES_DETAILS_BG self.pack(side="left", expand=True, fill="both") #heading group parent_frame = tk.Frame(self, bg=COUNTRIES_DETAILS_BG) heading_frame = tk.Frame(parent_frame, bg=COUNTRIES_DETAILS_BG) heading_frame.pack() close_button = ImageButton(heading_frame, bg=COUNTRIES_DETAILS_BG, image=tk.PhotoImage(file=x_icon_route), command=lambda: app.hide_country_details()) close_button.pack(side="left", padx=8) tk.Label(heading_frame, text=self.country_name, font=HEADING_FONT, fg=HEADING_COLOR, bg=GLOBAL_SIDEBAR_BG).pack(side="left", padx=(0,8)) #stat groups self.stat_groups = [ StatLabel(parent_frame, 0, "cases"), StatLabel(parent_frame, 0, "active"), StatLabel(parent_frame, 0, "recoveries"), StatLabel(parent_frame, 0, "deaths") ] parent_frame.pack(expand=True) self.after(0, self.update) def update(self): country_data = DataManager.get_country_data_for(self.country_name) self.stat_groups[0].update(country_data.cases) self.stat_groups[1].update(country_data.active) self.stat_groups[2].update(country_data.recoveries) self.stat_groups[3].update(country_data.deaths) self.after(REFRESH_TIME, self.update)
test_unix_events.py
"""Tests for unix_events.py.""" import contextlib import errno import io import os import pathlib import signal import socket import stat import sys import tempfile import threading import unittest from unittest import mock from test.support import os_helper from test.support import socket_helper if sys.platform == 'win32': raise unittest.SkipTest('UNIX only') import asyncio from asyncio import log from asyncio import unix_events from test.test_asyncio import utils as test_utils MOCK_ANY = mock.ANY def EXITCODE(exitcode): return 32768 + exitcode def SIGNAL(signum): assert 1 <= signum <= 68 return 32768 - signum def tearDownModule(): asyncio.set_event_loop_policy(None) def close_pipe_transport(transport): # Don't call transport.close() because the event loop and the selector # are mocked if transport._pipe is None: return transport._pipe.close() transport._pipe = None @unittest.skipUnless(signal, 'Signals are not supported') class SelectorEventLoopSignalTests(test_utils.TestCase): def setUp(self): super().setUp() self.loop = asyncio.SelectorEventLoop() self.set_event_loop(self.loop) def test_check_signal(self): self.assertRaises( TypeError, self.loop._check_signal, '1') self.assertRaises( ValueError, self.loop._check_signal, signal.NSIG + 1) def test_handle_signal_no_handler(self): self.loop._handle_signal(signal.NSIG + 1) def test_handle_signal_cancelled_handler(self): h = asyncio.Handle(mock.Mock(), (), loop=mock.Mock()) h.cancel() self.loop._signal_handlers[signal.NSIG + 1] = h self.loop.remove_signal_handler = mock.Mock() self.loop._handle_signal(signal.NSIG + 1) self.loop.remove_signal_handler.assert_called_with(signal.NSIG + 1) @mock.patch('asyncio.unix_events.signal') def test_add_signal_handler_setup_error(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals m_signal.set_wakeup_fd.side_effect = ValueError self.assertRaises( RuntimeError, self.loop.add_signal_handler, signal.SIGINT, lambda: True) @mock.patch('asyncio.unix_events.signal') def test_add_signal_handler_coroutine_error(self, m_signal): m_signal.NSIG = signal.NSIG async def simple_coroutine(): pass # callback must not be a coroutine function coro_func = simple_coroutine coro_obj = coro_func() self.addCleanup(coro_obj.close) for func in (coro_func, coro_obj): self.assertRaisesRegex( TypeError, 'coroutines cannot be used with add_signal_handler', self.loop.add_signal_handler, signal.SIGINT, func) @mock.patch('asyncio.unix_events.signal') def test_add_signal_handler(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals cb = lambda: True self.loop.add_signal_handler(signal.SIGHUP, cb) h = self.loop._signal_handlers.get(signal.SIGHUP) self.assertIsInstance(h, asyncio.Handle) self.assertEqual(h._callback, cb) @mock.patch('asyncio.unix_events.signal') def test_add_signal_handler_install_error(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals def set_wakeup_fd(fd): if fd == -1: raise ValueError() m_signal.set_wakeup_fd = set_wakeup_fd class Err(OSError): errno = errno.EFAULT m_signal.signal.side_effect = Err self.assertRaises( Err, self.loop.add_signal_handler, signal.SIGINT, lambda: True) @mock.patch('asyncio.unix_events.signal') @mock.patch('asyncio.base_events.logger') def test_add_signal_handler_install_error2(self, m_logging, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals class Err(OSError): errno = errno.EINVAL m_signal.signal.side_effect = Err self.loop._signal_handlers[signal.SIGHUP] = lambda: True self.assertRaises( RuntimeError, self.loop.add_signal_handler, signal.SIGINT, lambda: True) self.assertFalse(m_logging.info.called) self.assertEqual(1, m_signal.set_wakeup_fd.call_count) @mock.patch('asyncio.unix_events.signal') @mock.patch('asyncio.base_events.logger') def test_add_signal_handler_install_error3(self, m_logging, m_signal): class Err(OSError): errno = errno.EINVAL m_signal.signal.side_effect = Err m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals self.assertRaises( RuntimeError, self.loop.add_signal_handler, signal.SIGINT, lambda: True) self.assertFalse(m_logging.info.called) self.assertEqual(2, m_signal.set_wakeup_fd.call_count) @mock.patch('asyncio.unix_events.signal') def test_remove_signal_handler(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals self.loop.add_signal_handler(signal.SIGHUP, lambda: True) self.assertTrue( self.loop.remove_signal_handler(signal.SIGHUP)) self.assertTrue(m_signal.set_wakeup_fd.called) self.assertTrue(m_signal.signal.called) self.assertEqual( (signal.SIGHUP, m_signal.SIG_DFL), m_signal.signal.call_args[0]) @mock.patch('asyncio.unix_events.signal') def test_remove_signal_handler_2(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.SIGINT = signal.SIGINT m_signal.valid_signals = signal.valid_signals self.loop.add_signal_handler(signal.SIGINT, lambda: True) self.loop._signal_handlers[signal.SIGHUP] = object() m_signal.set_wakeup_fd.reset_mock() self.assertTrue( self.loop.remove_signal_handler(signal.SIGINT)) self.assertFalse(m_signal.set_wakeup_fd.called) self.assertTrue(m_signal.signal.called) self.assertEqual( (signal.SIGINT, m_signal.default_int_handler), m_signal.signal.call_args[0]) @mock.patch('asyncio.unix_events.signal') @mock.patch('asyncio.base_events.logger') def test_remove_signal_handler_cleanup_error(self, m_logging, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals self.loop.add_signal_handler(signal.SIGHUP, lambda: True) m_signal.set_wakeup_fd.side_effect = ValueError self.loop.remove_signal_handler(signal.SIGHUP) self.assertTrue(m_logging.info) @mock.patch('asyncio.unix_events.signal') def test_remove_signal_handler_error(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals self.loop.add_signal_handler(signal.SIGHUP, lambda: True) m_signal.signal.side_effect = OSError self.assertRaises( OSError, self.loop.remove_signal_handler, signal.SIGHUP) @mock.patch('asyncio.unix_events.signal') def test_remove_signal_handler_error2(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals self.loop.add_signal_handler(signal.SIGHUP, lambda: True) class Err(OSError): errno = errno.EINVAL m_signal.signal.side_effect = Err self.assertRaises( RuntimeError, self.loop.remove_signal_handler, signal.SIGHUP) @mock.patch('asyncio.unix_events.signal') def test_close(self, m_signal): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals self.loop.add_signal_handler(signal.SIGHUP, lambda: True) self.loop.add_signal_handler(signal.SIGCHLD, lambda: True) self.assertEqual(len(self.loop._signal_handlers), 2) m_signal.set_wakeup_fd.reset_mock() self.loop.close() self.assertEqual(len(self.loop._signal_handlers), 0) m_signal.set_wakeup_fd.assert_called_once_with(-1) @mock.patch('asyncio.unix_events.sys') @mock.patch('asyncio.unix_events.signal') def test_close_on_finalizing(self, m_signal, m_sys): m_signal.NSIG = signal.NSIG m_signal.valid_signals = signal.valid_signals self.loop.add_signal_handler(signal.SIGHUP, lambda: True) self.assertEqual(len(self.loop._signal_handlers), 1) m_sys.is_finalizing.return_value = True m_signal.signal.reset_mock() with self.assertWarnsRegex(ResourceWarning, "skipping signal handlers removal"): self.loop.close() self.assertEqual(len(self.loop._signal_handlers), 0) self.assertFalse(m_signal.signal.called) @unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'UNIX Sockets are not supported') class SelectorEventLoopUnixSocketTests(test_utils.TestCase): def setUp(self): super().setUp() self.loop = asyncio.SelectorEventLoop() self.set_event_loop(self.loop) @socket_helper.skip_unless_bind_unix_socket def test_create_unix_server_existing_path_sock(self): with test_utils.unix_socket_path() as path: sock = socket.socket(socket.AF_UNIX) sock.bind(path) sock.listen(1) sock.close() coro = self.loop.create_unix_server(lambda: None, path) srv = self.loop.run_until_complete(coro) srv.close() self.loop.run_until_complete(srv.wait_closed()) @socket_helper.skip_unless_bind_unix_socket def test_create_unix_server_pathlib(self): with test_utils.unix_socket_path() as path: path = pathlib.Path(path) srv_coro = self.loop.create_unix_server(lambda: None, path) srv = self.loop.run_until_complete(srv_coro) srv.close() self.loop.run_until_complete(srv.wait_closed()) def test_create_unix_connection_pathlib(self): with test_utils.unix_socket_path() as path: path = pathlib.Path(path) coro = self.loop.create_unix_connection(lambda: None, path) with self.assertRaises(FileNotFoundError): # If pathlib.Path wasn't supported, the exception would be # different. self.loop.run_until_complete(coro) def test_create_unix_server_existing_path_nonsock(self): with tempfile.NamedTemporaryFile() as file: coro = self.loop.create_unix_server(lambda: None, file.name) with self.assertRaisesRegex(OSError, 'Address.*is already in use'): self.loop.run_until_complete(coro) def test_create_unix_server_ssl_bool(self): coro = self.loop.create_unix_server(lambda: None, path='spam', ssl=True) with self.assertRaisesRegex(TypeError, 'ssl argument must be an SSLContext'): self.loop.run_until_complete(coro) def test_create_unix_server_nopath_nosock(self): coro = self.loop.create_unix_server(lambda: None, path=None) with self.assertRaisesRegex(ValueError, 'path was not specified, and no sock'): self.loop.run_until_complete(coro) def test_create_unix_server_path_inetsock(self): sock = socket.socket() with sock: coro = self.loop.create_unix_server(lambda: None, path=None, sock=sock) with self.assertRaisesRegex(ValueError, 'A UNIX Domain Stream.*was expected'): self.loop.run_until_complete(coro) def test_create_unix_server_path_dgram(self): sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) with sock: coro = self.loop.create_unix_server(lambda: None, path=None, sock=sock) with self.assertRaisesRegex(ValueError, 'A UNIX Domain Stream.*was expected'): self.loop.run_until_complete(coro) @unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'), 'no socket.SOCK_NONBLOCK (linux only)') @socket_helper.skip_unless_bind_unix_socket def test_create_unix_server_path_stream_bittype(self): sock = socket.socket( socket.AF_UNIX, socket.SOCK_STREAM | socket.SOCK_NONBLOCK) with tempfile.NamedTemporaryFile() as file: fn = file.name try: with sock: sock.bind(fn) coro = self.loop.create_unix_server(lambda: None, path=None, sock=sock) srv = self.loop.run_until_complete(coro) srv.close() self.loop.run_until_complete(srv.wait_closed()) finally: os.unlink(fn) def test_create_unix_server_ssl_timeout_with_plain_sock(self): coro = self.loop.create_unix_server(lambda: None, path='spam', ssl_handshake_timeout=1) with self.assertRaisesRegex( ValueError, 'ssl_handshake_timeout is only meaningful with ssl'): self.loop.run_until_complete(coro) def test_create_unix_connection_path_inetsock(self): sock = socket.socket() with sock: coro = self.loop.create_unix_connection(lambda: None, sock=sock) with self.assertRaisesRegex(ValueError, 'A UNIX Domain Stream.*was expected'): self.loop.run_until_complete(coro) @mock.patch('asyncio.unix_events.socket') def test_create_unix_server_bind_error(self, m_socket): # Ensure that the socket is closed on any bind error sock = mock.Mock() m_socket.socket.return_value = sock sock.bind.side_effect = OSError coro = self.loop.create_unix_server(lambda: None, path="/test") with self.assertRaises(OSError): self.loop.run_until_complete(coro) self.assertTrue(sock.close.called) sock.bind.side_effect = MemoryError coro = self.loop.create_unix_server(lambda: None, path="/test") with self.assertRaises(MemoryError): self.loop.run_until_complete(coro) self.assertTrue(sock.close.called) def test_create_unix_connection_path_sock(self): coro = self.loop.create_unix_connection( lambda: None, os.devnull, sock=object()) with self.assertRaisesRegex(ValueError, 'path and sock can not be'): self.loop.run_until_complete(coro) def test_create_unix_connection_nopath_nosock(self): coro = self.loop.create_unix_connection( lambda: None, None) with self.assertRaisesRegex(ValueError, 'no path and sock were specified'): self.loop.run_until_complete(coro) def test_create_unix_connection_nossl_serverhost(self): coro = self.loop.create_unix_connection( lambda: None, os.devnull, server_hostname='spam') with self.assertRaisesRegex(ValueError, 'server_hostname is only meaningful'): self.loop.run_until_complete(coro) def test_create_unix_connection_ssl_noserverhost(self): coro = self.loop.create_unix_connection( lambda: None, os.devnull, ssl=True) with self.assertRaisesRegex( ValueError, 'you have to pass server_hostname when using ssl'): self.loop.run_until_complete(coro) def test_create_unix_connection_ssl_timeout_with_plain_sock(self): coro = self.loop.create_unix_connection(lambda: None, path='spam', ssl_handshake_timeout=1) with self.assertRaisesRegex( ValueError, 'ssl_handshake_timeout is only meaningful with ssl'): self.loop.run_until_complete(coro) @unittest.skipUnless(hasattr(os, 'sendfile'), 'sendfile is not supported') class SelectorEventLoopUnixSockSendfileTests(test_utils.TestCase): DATA = b"12345abcde" * 16 * 1024 # 160 KiB class MyProto(asyncio.Protocol): def __init__(self, loop): self.started = False self.closed = False self.data = bytearray() self.fut = loop.create_future() self.transport = None self._ready = loop.create_future() def connection_made(self, transport): self.started = True self.transport = transport self._ready.set_result(None) def data_received(self, data): self.data.extend(data) def connection_lost(self, exc): self.closed = True self.fut.set_result(None) async def wait_closed(self): await self.fut @classmethod def setUpClass(cls): with open(os_helper.TESTFN, 'wb') as fp: fp.write(cls.DATA) super().setUpClass() @classmethod def tearDownClass(cls): os_helper.unlink(os_helper.TESTFN) super().tearDownClass() def setUp(self): self.loop = asyncio.new_event_loop() self.set_event_loop(self.loop) self.file = open(os_helper.TESTFN, 'rb') self.addCleanup(self.file.close) super().setUp() def make_socket(self, cleanup=True): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setblocking(False) sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024) sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024) if cleanup: self.addCleanup(sock.close) return sock def run_loop(self, coro): return self.loop.run_until_complete(coro) def prepare(self): sock = self.make_socket() proto = self.MyProto(self.loop) port = socket_helper.find_unused_port() srv_sock = self.make_socket(cleanup=False) srv_sock.bind((socket_helper.HOST, port)) server = self.run_loop(self.loop.create_server( lambda: proto, sock=srv_sock)) self.run_loop(self.loop.sock_connect(sock, (socket_helper.HOST, port))) self.run_loop(proto._ready) def cleanup(): proto.transport.close() self.run_loop(proto.wait_closed()) server.close() self.run_loop(server.wait_closed()) self.addCleanup(cleanup) return sock, proto def test_sock_sendfile_not_available(self): sock, proto = self.prepare() with mock.patch('asyncio.unix_events.os', spec=[]): with self.assertRaisesRegex(asyncio.SendfileNotAvailableError, "os[.]sendfile[(][)] is not available"): self.run_loop(self.loop._sock_sendfile_native(sock, self.file, 0, None)) self.assertEqual(self.file.tell(), 0) def test_sock_sendfile_not_a_file(self): sock, proto = self.prepare() f = object() with self.assertRaisesRegex(asyncio.SendfileNotAvailableError, "not a regular file"): self.run_loop(self.loop._sock_sendfile_native(sock, f, 0, None)) self.assertEqual(self.file.tell(), 0) def test_sock_sendfile_iobuffer(self): sock, proto = self.prepare() f = io.BytesIO() with self.assertRaisesRegex(asyncio.SendfileNotAvailableError, "not a regular file"): self.run_loop(self.loop._sock_sendfile_native(sock, f, 0, None)) self.assertEqual(self.file.tell(), 0) def test_sock_sendfile_not_regular_file(self): sock, proto = self.prepare() f = mock.Mock() f.fileno.return_value = -1 with self.assertRaisesRegex(asyncio.SendfileNotAvailableError, "not a regular file"): self.run_loop(self.loop._sock_sendfile_native(sock, f, 0, None)) self.assertEqual(self.file.tell(), 0) def test_sock_sendfile_cancel1(self): sock, proto = self.prepare() fut = self.loop.create_future() fileno = self.file.fileno() self.loop._sock_sendfile_native_impl(fut, None, sock, fileno, 0, None, len(self.DATA), 0) fut.cancel() with contextlib.suppress(asyncio.CancelledError): self.run_loop(fut) with self.assertRaises(KeyError): self.loop._selector.get_key(sock) def test_sock_sendfile_cancel2(self): sock, proto = self.prepare() fut = self.loop.create_future() fileno = self.file.fileno() self.loop._sock_sendfile_native_impl(fut, None, sock, fileno, 0, None, len(self.DATA), 0) fut.cancel() self.loop._sock_sendfile_native_impl(fut, sock.fileno(), sock, fileno, 0, None, len(self.DATA), 0) with self.assertRaises(KeyError): self.loop._selector.get_key(sock) def test_sock_sendfile_blocking_error(self): sock, proto = self.prepare() fileno = self.file.fileno() fut = mock.Mock() fut.cancelled.return_value = False with mock.patch('os.sendfile', side_effect=BlockingIOError()): self.loop._sock_sendfile_native_impl(fut, None, sock, fileno, 0, None, len(self.DATA), 0) key = self.loop._selector.get_key(sock) self.assertIsNotNone(key) fut.add_done_callback.assert_called_once_with(mock.ANY) def test_sock_sendfile_os_error_first_call(self): sock, proto = self.prepare() fileno = self.file.fileno() fut = self.loop.create_future() with mock.patch('os.sendfile', side_effect=OSError()): self.loop._sock_sendfile_native_impl(fut, None, sock, fileno, 0, None, len(self.DATA), 0) with self.assertRaises(KeyError): self.loop._selector.get_key(sock) exc = fut.exception() self.assertIsInstance(exc, asyncio.SendfileNotAvailableError) self.assertEqual(0, self.file.tell()) def test_sock_sendfile_os_error_next_call(self): sock, proto = self.prepare() fileno = self.file.fileno() fut = self.loop.create_future() err = OSError() with mock.patch('os.sendfile', side_effect=err): self.loop._sock_sendfile_native_impl(fut, sock.fileno(), sock, fileno, 1000, None, len(self.DATA), 1000) with self.assertRaises(KeyError): self.loop._selector.get_key(sock) exc = fut.exception() self.assertIs(exc, err) self.assertEqual(1000, self.file.tell()) def test_sock_sendfile_exception(self): sock, proto = self.prepare() fileno = self.file.fileno() fut = self.loop.create_future() err = asyncio.SendfileNotAvailableError() with mock.patch('os.sendfile', side_effect=err): self.loop._sock_sendfile_native_impl(fut, sock.fileno(), sock, fileno, 1000, None, len(self.DATA), 1000) with self.assertRaises(KeyError): self.loop._selector.get_key(sock) exc = fut.exception() self.assertIs(exc, err) self.assertEqual(1000, self.file.tell()) class UnixReadPipeTransportTests(test_utils.TestCase): def setUp(self): super().setUp() self.loop = self.new_test_loop() self.protocol = test_utils.make_test_protocol(asyncio.Protocol) self.pipe = mock.Mock(spec_set=io.RawIOBase) self.pipe.fileno.return_value = 5 blocking_patcher = mock.patch('os.set_blocking') blocking_patcher.start() self.addCleanup(blocking_patcher.stop) fstat_patcher = mock.patch('os.fstat') m_fstat = fstat_patcher.start() st = mock.Mock() st.st_mode = stat.S_IFIFO m_fstat.return_value = st self.addCleanup(fstat_patcher.stop) def read_pipe_transport(self, waiter=None): transport = unix_events._UnixReadPipeTransport(self.loop, self.pipe, self.protocol, waiter=waiter) self.addCleanup(close_pipe_transport, transport) return transport def test_ctor(self): waiter = self.loop.create_future() tr = self.read_pipe_transport(waiter=waiter) self.loop.run_until_complete(waiter) self.protocol.connection_made.assert_called_with(tr) self.loop.assert_reader(5, tr._read_ready) self.assertIsNone(waiter.result()) @mock.patch('os.read') def test__read_ready(self, m_read): tr = self.read_pipe_transport() m_read.return_value = b'data' tr._read_ready() m_read.assert_called_with(5, tr.max_size) self.protocol.data_received.assert_called_with(b'data') @mock.patch('os.read') def test__read_ready_eof(self, m_read): tr = self.read_pipe_transport() m_read.return_value = b'' tr._read_ready() m_read.assert_called_with(5, tr.max_size) self.assertFalse(self.loop.readers) test_utils.run_briefly(self.loop) self.protocol.eof_received.assert_called_with() self.protocol.connection_lost.assert_called_with(None) @mock.patch('os.read') def test__read_ready_blocked(self, m_read): tr = self.read_pipe_transport() m_read.side_effect = BlockingIOError tr._read_ready() m_read.assert_called_with(5, tr.max_size) test_utils.run_briefly(self.loop) self.assertFalse(self.protocol.data_received.called) @mock.patch('asyncio.log.logger.error') @mock.patch('os.read') def test__read_ready_error(self, m_read, m_logexc): tr = self.read_pipe_transport() err = OSError() m_read.side_effect = err tr._close = mock.Mock() tr._read_ready() m_read.assert_called_with(5, tr.max_size) tr._close.assert_called_with(err) m_logexc.assert_called_with( test_utils.MockPattern( 'Fatal read error on pipe transport' '\nprotocol:.*\ntransport:.*'), exc_info=(OSError, MOCK_ANY, MOCK_ANY)) @mock.patch('os.read') def test_pause_reading(self, m_read): tr = self.read_pipe_transport() m = mock.Mock() self.loop.add_reader(5, m) tr.pause_reading() self.assertFalse(self.loop.readers) @mock.patch('os.read') def test_resume_reading(self, m_read): tr = self.read_pipe_transport() tr.pause_reading() tr.resume_reading() self.loop.assert_reader(5, tr._read_ready) @mock.patch('os.read') def test_close(self, m_read): tr = self.read_pipe_transport() tr._close = mock.Mock() tr.close() tr._close.assert_called_with(None) @mock.patch('os.read') def test_close_already_closing(self, m_read): tr = self.read_pipe_transport() tr._closing = True tr._close = mock.Mock() tr.close() self.assertFalse(tr._close.called) @mock.patch('os.read') def test__close(self, m_read): tr = self.read_pipe_transport() err = object() tr._close(err) self.assertTrue(tr.is_closing()) self.assertFalse(self.loop.readers) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(err) def test__call_connection_lost(self): tr = self.read_pipe_transport() self.assertIsNotNone(tr._protocol) self.assertIsNotNone(tr._loop) err = None tr._call_connection_lost(err) self.protocol.connection_lost.assert_called_with(err) self.pipe.close.assert_called_with() self.assertIsNone(tr._protocol) self.assertIsNone(tr._loop) def test__call_connection_lost_with_err(self): tr = self.read_pipe_transport() self.assertIsNotNone(tr._protocol) self.assertIsNotNone(tr._loop) err = OSError() tr._call_connection_lost(err) self.protocol.connection_lost.assert_called_with(err) self.pipe.close.assert_called_with() self.assertIsNone(tr._protocol) self.assertIsNone(tr._loop) def test_pause_reading_on_closed_pipe(self): tr = self.read_pipe_transport() tr.close() test_utils.run_briefly(self.loop) self.assertIsNone(tr._loop) tr.pause_reading() def test_pause_reading_on_paused_pipe(self): tr = self.read_pipe_transport() tr.pause_reading() # the second call should do nothing tr.pause_reading() def test_resume_reading_on_closed_pipe(self): tr = self.read_pipe_transport() tr.close() test_utils.run_briefly(self.loop) self.assertIsNone(tr._loop) tr.resume_reading() def test_resume_reading_on_paused_pipe(self): tr = self.read_pipe_transport() # the pipe is not paused # resuming should do nothing tr.resume_reading() class UnixWritePipeTransportTests(test_utils.TestCase): def setUp(self): super().setUp() self.loop = self.new_test_loop() self.protocol = test_utils.make_test_protocol(asyncio.BaseProtocol) self.pipe = mock.Mock(spec_set=io.RawIOBase) self.pipe.fileno.return_value = 5 blocking_patcher = mock.patch('os.set_blocking') blocking_patcher.start() self.addCleanup(blocking_patcher.stop) fstat_patcher = mock.patch('os.fstat') m_fstat = fstat_patcher.start() st = mock.Mock() st.st_mode = stat.S_IFSOCK m_fstat.return_value = st self.addCleanup(fstat_patcher.stop) def write_pipe_transport(self, waiter=None): transport = unix_events._UnixWritePipeTransport(self.loop, self.pipe, self.protocol, waiter=waiter) self.addCleanup(close_pipe_transport, transport) return transport def test_ctor(self): waiter = self.loop.create_future() tr = self.write_pipe_transport(waiter=waiter) self.loop.run_until_complete(waiter) self.protocol.connection_made.assert_called_with(tr) self.loop.assert_reader(5, tr._read_ready) self.assertEqual(None, waiter.result()) def test_can_write_eof(self): tr = self.write_pipe_transport() self.assertTrue(tr.can_write_eof()) @mock.patch('os.write') def test_write(self, m_write): tr = self.write_pipe_transport() m_write.return_value = 4 tr.write(b'data') m_write.assert_called_with(5, b'data') self.assertFalse(self.loop.writers) self.assertEqual(bytearray(), tr._buffer) @mock.patch('os.write') def test_write_no_data(self, m_write): tr = self.write_pipe_transport() tr.write(b'') self.assertFalse(m_write.called) self.assertFalse(self.loop.writers) self.assertEqual(bytearray(b''), tr._buffer) @mock.patch('os.write') def test_write_partial(self, m_write): tr = self.write_pipe_transport() m_write.return_value = 2 tr.write(b'data') self.loop.assert_writer(5, tr._write_ready) self.assertEqual(bytearray(b'ta'), tr._buffer) @mock.patch('os.write') def test_write_buffer(self, m_write): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) tr._buffer = bytearray(b'previous') tr.write(b'data') self.assertFalse(m_write.called) self.loop.assert_writer(5, tr._write_ready) self.assertEqual(bytearray(b'previousdata'), tr._buffer) @mock.patch('os.write') def test_write_again(self, m_write): tr = self.write_pipe_transport() m_write.side_effect = BlockingIOError() tr.write(b'data') m_write.assert_called_with(5, bytearray(b'data')) self.loop.assert_writer(5, tr._write_ready) self.assertEqual(bytearray(b'data'), tr._buffer) @mock.patch('asyncio.unix_events.logger') @mock.patch('os.write') def test_write_err(self, m_write, m_log): tr = self.write_pipe_transport() err = OSError() m_write.side_effect = err tr._fatal_error = mock.Mock() tr.write(b'data') m_write.assert_called_with(5, b'data') self.assertFalse(self.loop.writers) self.assertEqual(bytearray(), tr._buffer) tr._fatal_error.assert_called_with( err, 'Fatal write error on pipe transport') self.assertEqual(1, tr._conn_lost) tr.write(b'data') self.assertEqual(2, tr._conn_lost) tr.write(b'data') tr.write(b'data') tr.write(b'data') tr.write(b'data') # This is a bit overspecified. :-( m_log.warning.assert_called_with( 'pipe closed by peer or os.write(pipe, data) raised exception.') tr.close() @mock.patch('os.write') def test_write_close(self, m_write): tr = self.write_pipe_transport() tr._read_ready() # pipe was closed by peer tr.write(b'data') self.assertEqual(tr._conn_lost, 1) tr.write(b'data') self.assertEqual(tr._conn_lost, 2) def test__read_ready(self): tr = self.write_pipe_transport() tr._read_ready() self.assertFalse(self.loop.readers) self.assertFalse(self.loop.writers) self.assertTrue(tr.is_closing()) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(None) @mock.patch('os.write') def test__write_ready(self, m_write): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) tr._buffer = bytearray(b'data') m_write.return_value = 4 tr._write_ready() self.assertFalse(self.loop.writers) self.assertEqual(bytearray(), tr._buffer) @mock.patch('os.write') def test__write_ready_partial(self, m_write): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) tr._buffer = bytearray(b'data') m_write.return_value = 3 tr._write_ready() self.loop.assert_writer(5, tr._write_ready) self.assertEqual(bytearray(b'a'), tr._buffer) @mock.patch('os.write') def test__write_ready_again(self, m_write): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) tr._buffer = bytearray(b'data') m_write.side_effect = BlockingIOError() tr._write_ready() m_write.assert_called_with(5, bytearray(b'data')) self.loop.assert_writer(5, tr._write_ready) self.assertEqual(bytearray(b'data'), tr._buffer) @mock.patch('os.write') def test__write_ready_empty(self, m_write): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) tr._buffer = bytearray(b'data') m_write.return_value = 0 tr._write_ready() m_write.assert_called_with(5, bytearray(b'data')) self.loop.assert_writer(5, tr._write_ready) self.assertEqual(bytearray(b'data'), tr._buffer) @mock.patch('asyncio.log.logger.error') @mock.patch('os.write') def test__write_ready_err(self, m_write, m_logexc): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) tr._buffer = bytearray(b'data') m_write.side_effect = err = OSError() tr._write_ready() self.assertFalse(self.loop.writers) self.assertFalse(self.loop.readers) self.assertEqual(bytearray(), tr._buffer) self.assertTrue(tr.is_closing()) m_logexc.assert_not_called() self.assertEqual(1, tr._conn_lost) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(err) @mock.patch('os.write') def test__write_ready_closing(self, m_write): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) tr._closing = True tr._buffer = bytearray(b'data') m_write.return_value = 4 tr._write_ready() self.assertFalse(self.loop.writers) self.assertFalse(self.loop.readers) self.assertEqual(bytearray(), tr._buffer) self.protocol.connection_lost.assert_called_with(None) self.pipe.close.assert_called_with() @mock.patch('os.write') def test_abort(self, m_write): tr = self.write_pipe_transport() self.loop.add_writer(5, tr._write_ready) self.loop.add_reader(5, tr._read_ready) tr._buffer = [b'da', b'ta'] tr.abort() self.assertFalse(m_write.called) self.assertFalse(self.loop.readers) self.assertFalse(self.loop.writers) self.assertEqual([], tr._buffer) self.assertTrue(tr.is_closing()) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(None) def test__call_connection_lost(self): tr = self.write_pipe_transport() self.assertIsNotNone(tr._protocol) self.assertIsNotNone(tr._loop) err = None tr._call_connection_lost(err) self.protocol.connection_lost.assert_called_with(err) self.pipe.close.assert_called_with() self.assertIsNone(tr._protocol) self.assertIsNone(tr._loop) def test__call_connection_lost_with_err(self): tr = self.write_pipe_transport() self.assertIsNotNone(tr._protocol) self.assertIsNotNone(tr._loop) err = OSError() tr._call_connection_lost(err) self.protocol.connection_lost.assert_called_with(err) self.pipe.close.assert_called_with() self.assertIsNone(tr._protocol) self.assertIsNone(tr._loop) def test_close(self): tr = self.write_pipe_transport() tr.write_eof = mock.Mock() tr.close() tr.write_eof.assert_called_with() # closing the transport twice must not fail tr.close() def test_close_closing(self): tr = self.write_pipe_transport() tr.write_eof = mock.Mock() tr._closing = True tr.close() self.assertFalse(tr.write_eof.called) def test_write_eof(self): tr = self.write_pipe_transport() tr.write_eof() self.assertTrue(tr.is_closing()) self.assertFalse(self.loop.readers) test_utils.run_briefly(self.loop) self.protocol.connection_lost.assert_called_with(None) def test_write_eof_pending(self): tr = self.write_pipe_transport() tr._buffer = [b'data'] tr.write_eof() self.assertTrue(tr.is_closing()) self.assertFalse(self.protocol.connection_lost.called) class AbstractChildWatcherTests(unittest.TestCase): def test_not_implemented(self): f = mock.Mock() watcher = asyncio.AbstractChildWatcher() self.assertRaises( NotImplementedError, watcher.add_child_handler, f, f) self.assertRaises( NotImplementedError, watcher.remove_child_handler, f) self.assertRaises( NotImplementedError, watcher.attach_loop, f) self.assertRaises( NotImplementedError, watcher.close) self.assertRaises( NotImplementedError, watcher.is_active) self.assertRaises( NotImplementedError, watcher.__enter__) self.assertRaises( NotImplementedError, watcher.__exit__, f, f, f) class BaseChildWatcherTests(unittest.TestCase): def test_not_implemented(self): f = mock.Mock() watcher = unix_events.BaseChildWatcher() self.assertRaises( NotImplementedError, watcher._do_waitpid, f) class ChildWatcherTestsMixin: ignore_warnings = mock.patch.object(log.logger, "warning") def setUp(self): super().setUp() self.loop = self.new_test_loop() self.running = False self.zombies = {} with mock.patch.object( self.loop, "add_signal_handler") as self.m_add_signal_handler: self.watcher = self.create_watcher() self.watcher.attach_loop(self.loop) def waitpid(self, pid, flags): if isinstance(self.watcher, asyncio.SafeChildWatcher) or pid != -1: self.assertGreater(pid, 0) try: if pid < 0: return self.zombies.popitem() else: return pid, self.zombies.pop(pid) except KeyError: pass if self.running: return 0, 0 else: raise ChildProcessError() def add_zombie(self, pid, status): self.zombies[pid] = status def waitstatus_to_exitcode(self, status): if status > 32768: return status - 32768 elif 32700 < status < 32768: return status - 32768 else: return status def test_create_watcher(self): self.m_add_signal_handler.assert_called_once_with( signal.SIGCHLD, self.watcher._sig_chld) def waitpid_mocks(func): def wrapped_func(self): def patch(target, wrapper): return mock.patch(target, wraps=wrapper, new_callable=mock.Mock) with patch('asyncio.unix_events.waitstatus_to_exitcode', self.waitstatus_to_exitcode), \ patch('os.waitpid', self.waitpid) as m_waitpid: func(self, m_waitpid) return wrapped_func @waitpid_mocks def test_sigchld(self, m_waitpid): # register a child callback = mock.Mock() with self.watcher: self.running = True self.watcher.add_child_handler(42, callback, 9, 10, 14) self.assertFalse(callback.called) # child is running self.watcher._sig_chld() self.assertFalse(callback.called) # child terminates (returncode 12) self.running = False self.add_zombie(42, EXITCODE(12)) self.watcher._sig_chld() callback.assert_called_once_with(42, 12, 9, 10, 14) callback.reset_mock() # ensure that the child is effectively reaped self.add_zombie(42, EXITCODE(13)) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback.called) # sigchld called again self.zombies.clear() self.watcher._sig_chld() self.assertFalse(callback.called) @waitpid_mocks def test_sigchld_two_children(self, m_waitpid): callback1 = mock.Mock() callback2 = mock.Mock() # register child 1 with self.watcher: self.running = True self.watcher.add_child_handler(43, callback1, 7, 8) self.assertFalse(callback1.called) self.assertFalse(callback2.called) # register child 2 with self.watcher: self.watcher.add_child_handler(44, callback2, 147, 18) self.assertFalse(callback1.called) self.assertFalse(callback2.called) # children are running self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) # child 1 terminates (signal 3) self.add_zombie(43, SIGNAL(3)) self.watcher._sig_chld() callback1.assert_called_once_with(43, -3, 7, 8) self.assertFalse(callback2.called) callback1.reset_mock() # child 2 still running self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) # child 2 terminates (code 108) self.add_zombie(44, EXITCODE(108)) self.running = False self.watcher._sig_chld() callback2.assert_called_once_with(44, 108, 147, 18) self.assertFalse(callback1.called) callback2.reset_mock() # ensure that the children are effectively reaped self.add_zombie(43, EXITCODE(14)) self.add_zombie(44, EXITCODE(15)) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) # sigchld called again self.zombies.clear() self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) @waitpid_mocks def test_sigchld_two_children_terminating_together(self, m_waitpid): callback1 = mock.Mock() callback2 = mock.Mock() # register child 1 with self.watcher: self.running = True self.watcher.add_child_handler(45, callback1, 17, 8) self.assertFalse(callback1.called) self.assertFalse(callback2.called) # register child 2 with self.watcher: self.watcher.add_child_handler(46, callback2, 1147, 18) self.assertFalse(callback1.called) self.assertFalse(callback2.called) # children are running self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) # child 1 terminates (code 78) # child 2 terminates (signal 5) self.add_zombie(45, EXITCODE(78)) self.add_zombie(46, SIGNAL(5)) self.running = False self.watcher._sig_chld() callback1.assert_called_once_with(45, 78, 17, 8) callback2.assert_called_once_with(46, -5, 1147, 18) callback1.reset_mock() callback2.reset_mock() # ensure that the children are effectively reaped self.add_zombie(45, EXITCODE(14)) self.add_zombie(46, EXITCODE(15)) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) @waitpid_mocks def test_sigchld_race_condition(self, m_waitpid): # register a child callback = mock.Mock() with self.watcher: # child terminates before being registered self.add_zombie(50, EXITCODE(4)) self.watcher._sig_chld() self.watcher.add_child_handler(50, callback, 1, 12) callback.assert_called_once_with(50, 4, 1, 12) callback.reset_mock() # ensure that the child is effectively reaped self.add_zombie(50, SIGNAL(1)) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback.called) @waitpid_mocks def test_sigchld_replace_handler(self, m_waitpid): callback1 = mock.Mock() callback2 = mock.Mock() # register a child with self.watcher: self.running = True self.watcher.add_child_handler(51, callback1, 19) self.assertFalse(callback1.called) self.assertFalse(callback2.called) # register the same child again with self.watcher: self.watcher.add_child_handler(51, callback2, 21) self.assertFalse(callback1.called) self.assertFalse(callback2.called) # child terminates (signal 8) self.running = False self.add_zombie(51, SIGNAL(8)) self.watcher._sig_chld() callback2.assert_called_once_with(51, -8, 21) self.assertFalse(callback1.called) callback2.reset_mock() # ensure that the child is effectively reaped self.add_zombie(51, EXITCODE(13)) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) @waitpid_mocks def test_sigchld_remove_handler(self, m_waitpid): callback = mock.Mock() # register a child with self.watcher: self.running = True self.watcher.add_child_handler(52, callback, 1984) self.assertFalse(callback.called) # unregister the child self.watcher.remove_child_handler(52) self.assertFalse(callback.called) # child terminates (code 99) self.running = False self.add_zombie(52, EXITCODE(99)) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback.called) @waitpid_mocks def test_sigchld_unknown_status(self, m_waitpid): callback = mock.Mock() # register a child with self.watcher: self.running = True self.watcher.add_child_handler(53, callback, -19) self.assertFalse(callback.called) # terminate with unknown status self.zombies[53] = 1178 self.running = False self.watcher._sig_chld() callback.assert_called_once_with(53, 1178, -19) callback.reset_mock() # ensure that the child is effectively reaped self.add_zombie(53, EXITCODE(101)) with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback.called) @waitpid_mocks def test_remove_child_handler(self, m_waitpid): callback1 = mock.Mock() callback2 = mock.Mock() callback3 = mock.Mock() # register children with self.watcher: self.running = True self.watcher.add_child_handler(54, callback1, 1) self.watcher.add_child_handler(55, callback2, 2) self.watcher.add_child_handler(56, callback3, 3) # remove child handler 1 self.assertTrue(self.watcher.remove_child_handler(54)) # remove child handler 2 multiple times self.assertTrue(self.watcher.remove_child_handler(55)) self.assertFalse(self.watcher.remove_child_handler(55)) self.assertFalse(self.watcher.remove_child_handler(55)) # all children terminate self.add_zombie(54, EXITCODE(0)) self.add_zombie(55, EXITCODE(1)) self.add_zombie(56, EXITCODE(2)) self.running = False with self.ignore_warnings: self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) callback3.assert_called_once_with(56, 2, 3) @waitpid_mocks def test_sigchld_unhandled_exception(self, m_waitpid): callback = mock.Mock() # register a child with self.watcher: self.running = True self.watcher.add_child_handler(57, callback) # raise an exception m_waitpid.side_effect = ValueError with mock.patch.object(log.logger, 'error') as m_error: self.assertEqual(self.watcher._sig_chld(), None) self.assertTrue(m_error.called) @waitpid_mocks def test_sigchld_child_reaped_elsewhere(self, m_waitpid): # register a child callback = mock.Mock() with self.watcher: self.running = True self.watcher.add_child_handler(58, callback) self.assertFalse(callback.called) # child terminates self.running = False self.add_zombie(58, EXITCODE(4)) # waitpid is called elsewhere os.waitpid(58, os.WNOHANG) m_waitpid.reset_mock() # sigchld with self.ignore_warnings: self.watcher._sig_chld() if isinstance(self.watcher, asyncio.FastChildWatcher): # here the FastChildWatche enters a deadlock # (there is no way to prevent it) self.assertFalse(callback.called) else: callback.assert_called_once_with(58, 255) @waitpid_mocks def test_sigchld_unknown_pid_during_registration(self, m_waitpid): # register two children callback1 = mock.Mock() callback2 = mock.Mock() with self.ignore_warnings, self.watcher: self.running = True # child 1 terminates self.add_zombie(591, EXITCODE(7)) # an unknown child terminates self.add_zombie(593, EXITCODE(17)) self.watcher._sig_chld() self.watcher.add_child_handler(591, callback1) self.watcher.add_child_handler(592, callback2) callback1.assert_called_once_with(591, 7) self.assertFalse(callback2.called) @waitpid_mocks def test_set_loop(self, m_waitpid): # register a child callback = mock.Mock() with self.watcher: self.running = True self.watcher.add_child_handler(60, callback) # attach a new loop old_loop = self.loop self.loop = self.new_test_loop() patch = mock.patch.object with patch(old_loop, "remove_signal_handler") as m_old_remove, \ patch(self.loop, "add_signal_handler") as m_new_add: self.watcher.attach_loop(self.loop) m_old_remove.assert_called_once_with( signal.SIGCHLD) m_new_add.assert_called_once_with( signal.SIGCHLD, self.watcher._sig_chld) # child terminates self.running = False self.add_zombie(60, EXITCODE(9)) self.watcher._sig_chld() callback.assert_called_once_with(60, 9) @waitpid_mocks def test_set_loop_race_condition(self, m_waitpid): # register 3 children callback1 = mock.Mock() callback2 = mock.Mock() callback3 = mock.Mock() with self.watcher: self.running = True self.watcher.add_child_handler(61, callback1) self.watcher.add_child_handler(62, callback2) self.watcher.add_child_handler(622, callback3) # detach the loop old_loop = self.loop self.loop = None with mock.patch.object( old_loop, "remove_signal_handler") as m_remove_signal_handler: with self.assertWarnsRegex( RuntimeWarning, 'A loop is being detached'): self.watcher.attach_loop(None) m_remove_signal_handler.assert_called_once_with( signal.SIGCHLD) # child 1 & 2 terminate self.add_zombie(61, EXITCODE(11)) self.add_zombie(62, SIGNAL(5)) # SIGCHLD was not caught self.assertFalse(callback1.called) self.assertFalse(callback2.called) self.assertFalse(callback3.called) # attach a new loop self.loop = self.new_test_loop() with mock.patch.object( self.loop, "add_signal_handler") as m_add_signal_handler: self.watcher.attach_loop(self.loop) m_add_signal_handler.assert_called_once_with( signal.SIGCHLD, self.watcher._sig_chld) callback1.assert_called_once_with(61, 11) # race condition! callback2.assert_called_once_with(62, -5) # race condition! self.assertFalse(callback3.called) callback1.reset_mock() callback2.reset_mock() # child 3 terminates self.running = False self.add_zombie(622, EXITCODE(19)) self.watcher._sig_chld() self.assertFalse(callback1.called) self.assertFalse(callback2.called) callback3.assert_called_once_with(622, 19) @waitpid_mocks def test_close(self, m_waitpid): # register two children callback1 = mock.Mock() with self.watcher: self.running = True # child 1 terminates self.add_zombie(63, EXITCODE(9)) # other child terminates self.add_zombie(65, EXITCODE(18)) self.watcher._sig_chld() self.watcher.add_child_handler(63, callback1) self.watcher.add_child_handler(64, callback1) self.assertEqual(len(self.watcher._callbacks), 1) if isinstance(self.watcher, asyncio.FastChildWatcher): self.assertEqual(len(self.watcher._zombies), 1) with mock.patch.object( self.loop, "remove_signal_handler") as m_remove_signal_handler: self.watcher.close() m_remove_signal_handler.assert_called_once_with( signal.SIGCHLD) self.assertFalse(self.watcher._callbacks) if isinstance(self.watcher, asyncio.FastChildWatcher): self.assertFalse(self.watcher._zombies) class SafeChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase): def create_watcher(self): return asyncio.SafeChildWatcher() class FastChildWatcherTests (ChildWatcherTestsMixin, test_utils.TestCase): def create_watcher(self): return asyncio.FastChildWatcher() class PolicyTests(unittest.TestCase): def create_policy(self): return asyncio.DefaultEventLoopPolicy() def test_get_default_child_watcher(self): policy = self.create_policy() self.assertIsNone(policy._watcher) watcher = policy.get_child_watcher() self.assertIsInstance(watcher, asyncio.ThreadedChildWatcher) self.assertIs(policy._watcher, watcher) self.assertIs(watcher, policy.get_child_watcher()) def test_get_child_watcher_after_set(self): policy = self.create_policy() watcher = asyncio.FastChildWatcher() policy.set_child_watcher(watcher) self.assertIs(policy._watcher, watcher) self.assertIs(watcher, policy.get_child_watcher()) def test_get_child_watcher_thread(self): def f(): policy.set_event_loop(policy.new_event_loop()) self.assertIsInstance(policy.get_event_loop(), asyncio.AbstractEventLoop) watcher = policy.get_child_watcher() self.assertIsInstance(watcher, asyncio.SafeChildWatcher) self.assertIsNone(watcher._loop) policy.get_event_loop().close() policy = self.create_policy() policy.set_child_watcher(asyncio.SafeChildWatcher()) th = threading.Thread(target=f) th.start() th.join() def test_child_watcher_replace_mainloop_existing(self): policy = self.create_policy() loop = policy.get_event_loop() # Explicitly setup SafeChildWatcher, # default ThreadedChildWatcher has no _loop property watcher = asyncio.SafeChildWatcher() policy.set_child_watcher(watcher) watcher.attach_loop(loop) self.assertIs(watcher._loop, loop) new_loop = policy.new_event_loop() policy.set_event_loop(new_loop) self.assertIs(watcher._loop, new_loop) policy.set_event_loop(None) self.assertIs(watcher._loop, None) loop.close() new_loop.close() class TestFunctional(unittest.TestCase): def setUp(self): self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) def tearDown(self): self.loop.close() asyncio.set_event_loop(None) def test_add_reader_invalid_argument(self): def assert_raises(): return self.assertRaisesRegex(ValueError, r'Invalid file object') cb = lambda: None with assert_raises(): self.loop.add_reader(object(), cb) with assert_raises(): self.loop.add_writer(object(), cb) with assert_raises(): self.loop.remove_reader(object()) with assert_raises(): self.loop.remove_writer(object()) def test_add_reader_or_writer_transport_fd(self): def assert_raises(): return self.assertRaisesRegex( RuntimeError, r'File descriptor .* is used by transport') async def runner(): tr, pr = await self.loop.create_connection( lambda: asyncio.Protocol(), sock=rsock) try: cb = lambda: None with assert_raises(): self.loop.add_reader(rsock, cb) with assert_raises(): self.loop.add_reader(rsock.fileno(), cb) with assert_raises(): self.loop.remove_reader(rsock) with assert_raises(): self.loop.remove_reader(rsock.fileno()) with assert_raises(): self.loop.add_writer(rsock, cb) with assert_raises(): self.loop.add_writer(rsock.fileno(), cb) with assert_raises(): self.loop.remove_writer(rsock) with assert_raises(): self.loop.remove_writer(rsock.fileno()) finally: tr.close() rsock, wsock = socket.socketpair() try: self.loop.run_until_complete(runner()) finally: rsock.close() wsock.close() if __name__ == '__main__': unittest.main()
__init__.py
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: r""" A Python module to maintain unique, run-wide *dMRIPrep* settings. This module implements the memory structures to keep a consistent, singleton config. Settings are passed across processes via filesystem, and a copy of the settings for each run and subject is left under ``<output_dir>/sub-<participant_id>/log/<run_unique_id>/dmriprep.toml``. Settings are stored using :abbr:`ToML (Tom's Markup Language)`. The module has a :py:func:`~dmriprep.config.to_filename` function to allow writing out the settings to hard disk in *ToML* format, which looks like: .. literalinclude:: ../../dmriprep/data/tests/config.toml :language: toml :name: dmriprep.toml :caption: **Example file representation of dMRIPrep settings**. This config file is used to pass the settings across processes, using the :py:func:`~dmriprep.config.load` function. Configuration sections ---------------------- .. autoclass:: environment :members: .. autoclass:: execution :members: .. autoclass:: workflow :members: .. autoclass:: nipype :members: Usage ----- A config file is used to pass settings and collect information as the execution graph is built across processes. .. code-block:: Python from dmriprep import config config_file = config.execution.work_dir / '.dmriprep.toml' config.to_filename(config_file) # Call build_workflow(config_file, retval) in a subprocess with Manager() as mgr: from .workflow import build_workflow retval = mgr.dict() p = Process(target=build_workflow, args=(str(config_file), retval)) p.start() p.join() config.load(config_file) # Access configs from any code section as: value = config.section.setting Logging ------- .. autoclass:: loggers :members: Other responsibilities ---------------------- The :py:mod:`config` is responsible for other convenience actions. * Switching Python's :obj:`multiprocessing` to *forkserver* mode. * Set up a filter for warnings as early as possible. * Automated I/O magic operations. Some conversions need to happen in the store/load processes (e.g., from/to :obj:`~pathlib.Path` \<-\> :obj:`str`, :py:class:`~bids.layout.BIDSLayout`, etc.) """ from multiprocessing import set_start_method import warnings # cmp is not used by dmriprep, so ignore nipype-generated warnings warnings.filterwarnings('ignore', 'cmp not installed') warnings.filterwarnings('ignore', 'This has not been fully tested. Please report any failures.') warnings.filterwarnings('ignore', "sklearn.externals.joblib is deprecated in 0.21") warnings.filterwarnings('ignore', "can't resolve package from __spec__ or __package__") warnings.filterwarnings('ignore', category=DeprecationWarning) warnings.filterwarnings('ignore', category=FutureWarning) warnings.filterwarnings('ignore', category=ResourceWarning) try: set_start_method('forkserver') except RuntimeError: pass # context has been already set finally: # Defer all custom import for after initializing the forkserver and # ignoring the most annoying warnings import os import sys import logging from uuid import uuid4 from pathlib import Path from time import strftime from niworkflows.utils.spaces import SpatialReferences as _SRs, Reference as _Ref from nipype import logging as nlogging, __version__ as _nipype_ver from templateflow import __version__ as _tf_ver from .. import __version__ def redirect_warnings(message, category, filename, lineno, file=None, line=None): """Redirect other warnings.""" logger = logging.getLogger() logger.debug('Captured warning (%s): %s', category, message) warnings.showwarning = redirect_warnings logging.addLevelName(25, 'IMPORTANT') # Add a new level between INFO and WARNING logging.addLevelName(15, 'VERBOSE') # Add a new level between INFO and DEBUG DEFAULT_MEMORY_MIN_GB = 0.01 NONSTANDARD_REFERENCES = ['anat', 'T1w', 'dwi', 'fsnative'] _exec_env = os.name _docker_ver = None # special variable set in the container if os.getenv('IS_DOCKER_8395080871'): _exec_env = 'singularity' _cgroup = Path('/proc/1/cgroup') if _cgroup.exists() and 'docker' in _cgroup.read_text(): _docker_ver = os.getenv('DOCKER_VERSION_8395080871') _exec_env = 'dmriprep-docker' if _docker_ver else 'docker' del _cgroup _fs_license = os.getenv('FS_LICENSE') if _fs_license is None and os.getenv('FREESURFER_HOME'): _fs_license = os.path.join(os.getenv('FREESURFER_HOME'), 'license.txt') _templateflow_home = Path(os.getenv( 'TEMPLATEFLOW_HOME', os.path.join(os.getenv('HOME'), '.cache', 'templateflow')) ) try: from psutil import virtual_memory _free_mem_at_start = round(virtual_memory().free / 1024**3, 1) except Exception: _free_mem_at_start = None _oc_limit = 'n/a' _oc_policy = 'n/a' try: # Memory policy may have a large effect on types of errors experienced _proc_oc_path = Path('/proc/sys/vm/overcommit_memory') if _proc_oc_path.exists(): _oc_policy = { '0': 'heuristic', '1': 'always', '2': 'never' }.get(_proc_oc_path.read_text().strip(), 'unknown') if _oc_policy != 'never': _proc_oc_kbytes = Path('/proc/sys/vm/overcommit_kbytes') if _proc_oc_kbytes.exists(): _oc_limit = _proc_oc_kbytes.read_text().strip() if _oc_limit in ('0', 'n/a') and Path('/proc/sys/vm/overcommit_ratio').exists(): _oc_limit = '{}%'.format( Path('/proc/sys/vm/overcommit_ratio').read_text().strip() ) except Exception: pass class _Config: """An abstract class forbidding instantiation.""" _paths = tuple() def __init__(self): """Avert instantiation.""" raise RuntimeError('Configuration type is not instantiable.') @classmethod def load(cls, settings, init=True): """Store settings from a dictionary.""" for k, v in settings.items(): if v is None: continue if k in cls._paths: setattr(cls, k, Path(v).absolute()) continue if hasattr(cls, k): setattr(cls, k, v) if init: try: cls.init() except AttributeError: pass @classmethod def get(cls): """Return defined settings.""" out = {} for k, v in cls.__dict__.items(): if k.startswith('_') or v is None: continue if callable(getattr(cls, k)): continue if k in cls._paths: v = str(v) if isinstance(v, _SRs): v = ' '.join([str(s) for s in v.references]) or None if isinstance(v, _Ref): v = str(v) or None out[k] = v return out class environment(_Config): """ Read-only options regarding the platform and environment. Crawls runtime descriptive settings (e.g., default FreeSurfer license, execution environment, nipype and *dMRIPrep* versions, etc.). The ``environment`` section is not loaded in from file, only written out when settings are exported. This config section is useful when reporting issues, and these variables are tracked whenever the user does not opt-out using the ``--notrack`` argument. """ cpu_count = os.cpu_count() """Number of available CPUs.""" exec_docker_version = _docker_ver """Version of Docker Engine.""" exec_env = _exec_env """A string representing the execution platform.""" free_mem = _free_mem_at_start """Free memory at start.""" overcommit_policy = _oc_policy """Linux's kernel virtual memory overcommit policy.""" overcommit_limit = _oc_limit """Linux's kernel virtual memory overcommit limits.""" nipype_version = _nipype_ver """Nipype's current version.""" templateflow_version = _tf_ver """The TemplateFlow client version installed.""" version = __version__ """*dMRIPrep*'s version.""" class nipype(_Config): """Nipype settings.""" crashfile_format = 'txt' """The file format for crashfiles, either text or pickle.""" get_linked_libs = False """Run NiPype's tool to enlist linked libraries for every interface.""" memory_gb = None """Estimation in GB of the RAM this workflow can allocate at any given time.""" nprocs = os.cpu_count() """Number of processes (compute tasks) that can be run in parallel (multiprocessing only).""" omp_nthreads = os.cpu_count() """Number of CPUs a single process can access for multithreaded execution.""" plugin = 'MultiProc' """NiPype's execution plugin.""" plugin_args = { 'maxtasksperchild': 1, 'raise_insufficient': False, } """Settings for NiPype's execution plugin.""" resource_monitor = False """Enable resource monitor.""" stop_on_first_crash = True """Whether the workflow should stop or continue after the first error.""" @classmethod def get_plugin(cls): """Format a dictionary for Nipype consumption.""" out = { 'plugin': cls.plugin, 'plugin_args': cls.plugin_args, } if cls.plugin in ('MultiProc', 'LegacyMultiProc'): out['plugin_args']['nprocs'] = int(cls.nprocs) if cls.memory_gb: out['plugin_args']['memory_gb'] = float(cls.memory_gb) return out @classmethod def init(cls): """Set NiPype configurations.""" from nipype import config as ncfg # Configure resource_monitor if cls.resource_monitor: ncfg.update_config({ 'monitoring': { 'enabled': cls.resource_monitor, 'sample_frequency': '0.5', 'summary_append': True, } }) ncfg.enable_resource_monitor() # Nipype config (logs and execution) ncfg.update_config({ 'execution': { 'crashdump_dir': str(execution.log_dir), 'crashfile_format': cls.crashfile_format, 'get_linked_libs': cls.get_linked_libs, 'stop_on_first_crash': cls.stop_on_first_crash, } }) class execution(_Config): """Configure run-level settings.""" bids_dir = None """An existing path to the dataset, which must be BIDS-compliant.""" bids_description_hash = None """Checksum (SHA256) of the ``dataset_description.json`` of the BIDS dataset.""" bids_filters = None """A dictionary of BIDS selection filters.""" boilerplate_only = False """Only generate a boilerplate.""" debug = False """Run in sloppy mode (meaning, suboptimal parameters that minimize run-time).""" fs_license_file = _fs_license """An existing file containing a FreeSurfer license.""" fs_subjects_dir = None """FreeSurfer's subjects directory.""" layout = None """A :py:class:`~bids.layout.BIDSLayout` object, see :py:func:`init`.""" log_dir = None """The path to a directory that contains execution logs.""" log_level = 25 """Output verbosity.""" low_mem = None """Utilize uncompressed NIfTIs and other tricks to minimize memory allocation.""" md_only_boilerplate = False """Do not convert boilerplate from MarkDown to LaTex and HTML.""" notrack = False """Do not monitor *dMRIPrep* using Google Analytics.""" output_dir = None """Folder where derivatives will be stored.""" output_spaces = None """List of (non)standard spaces designated (with the ``--output-spaces`` flag of the command line) as spatial references for outputs.""" reports_only = False """Only build the reports, based on the reportlets found in a cached working directory.""" run_uuid = '%s_%s' % (strftime('%Y%m%d-%H%M%S'), uuid4()) """Unique identifier of this particular run.""" participant_label = None """List of participant identifiers that are to be preprocessed.""" templateflow_home = _templateflow_home """The root folder of the TemplateFlow client.""" work_dir = Path('work').absolute() """Path to a working directory where intermediate results will be available.""" write_graph = False """Write out the computational graph corresponding to the planned preprocessing.""" _layout = None _paths = ( 'bids_dir', 'fs_license_file', 'fs_subjects_dir', 'layout', 'log_dir', 'output_dir', 'templateflow_home', 'work_dir', ) @classmethod def init(cls): """Create a new BIDS Layout accessible with :attr:`~execution.layout`.""" if cls._layout is None: import re from bids.layout import BIDSLayout work_dir = cls.work_dir / 'bids.db' work_dir.mkdir(exist_ok=True, parents=True) cls._layout = BIDSLayout( str(cls.bids_dir), validate=False, # database_path=str(work_dir), ignore=("code", "stimuli", "sourcedata", "models", "derivatives", re.compile(r'^\.'))) cls.layout = cls._layout # These variables are not necessary anymore del _fs_license del _exec_env del _nipype_ver del _templateflow_home del _tf_ver del _free_mem_at_start del _oc_limit del _oc_policy class workflow(_Config): """Configure the particular execution graph of this workflow.""" anat_only = False """Execute the anatomical preprocessing only.""" fmap_bspline = None """Regularize fieldmaps with a field of B-Spline basis.""" fmap_demean = None """Remove the mean from fieldmaps.""" force_syn = None """Run *fieldmap-less* susceptibility-derived distortions estimation.""" hires = None """Run FreeSurfer ``recon-all`` with the ``-hires`` flag.""" ignore = None """Ignore particular steps for *dMRIPrep*.""" longitudinal = False """Run FreeSurfer ``recon-all`` with the ``-logitudinal`` flag.""" run_reconall = True """Run FreeSurfer's surface reconstruction.""" skull_strip_fixed_seed = False """Fix a seed for skull-stripping.""" skull_strip_template = "OASIS30ANTs" """Change default brain extraction template.""" spaces = None """Keeps the :py:class:`~niworkflows.utils.spaces.SpatialReferences` instance keeping standard and nonstandard spaces.""" use_syn = None """Run *fieldmap-less* susceptibility-derived distortions estimation in the absence of any alternatives.""" class loggers: """Keep loggers easily accessible (see :py:func:`init`).""" _fmt = "%(asctime)s,%(msecs)d %(name)-2s " "%(levelname)-2s:\n\t %(message)s" _datefmt = "%y%m%d-%H:%M:%S" default = logging.getLogger() """The root logger.""" cli = logging.getLogger('cli') """Command-line interface logging.""" workflow = nlogging.getLogger('nipype.workflow') """NiPype's workflow logger.""" interface = nlogging.getLogger('nipype.interface') """NiPype's interface logger.""" utils = nlogging.getLogger('nipype.utils') """NiPype's utils logger.""" @classmethod def init(cls): """ Set the log level, initialize all loggers into :py:class:`loggers`. * Add new logger levels (25: IMPORTANT, and 15: VERBOSE). * Add a new sub-logger (``cli``). * Logger configuration. """ from nipype import config as ncfg _handler = logging.StreamHandler(stream=sys.stdout) _handler.setFormatter( logging.Formatter(fmt=cls._fmt, datefmt=cls._datefmt) ) cls.cli.addHandler(_handler) cls.default.setLevel(execution.log_level) cls.cli.setLevel(execution.log_level) cls.interface.setLevel(execution.log_level) cls.workflow.setLevel(execution.log_level) cls.utils.setLevel(execution.log_level) ncfg.update_config({ 'logging': { 'log_directory': str(execution.log_dir), 'log_to_file': True }, }) def from_dict(settings): """Read settings from a flat dictionary.""" nipype.load(settings) execution.load(settings) workflow.load(settings) loggers.init() def load(filename): """Load settings from file.""" from toml import loads filename = Path(filename) settings = loads(filename.read_text()) for sectionname, configs in settings.items(): if sectionname != 'environment': section = getattr(sys.modules[__name__], sectionname) section.load(configs) init_spaces() def get(flat=False): """Get config as a dict.""" settings = { 'environment': environment.get(), 'execution': execution.get(), 'workflow': workflow.get(), 'nipype': nipype.get(), } if not flat: return settings return {'.'.join((section, k)): v for section, configs in settings.items() for k, v in configs.items()} def dumps(flat=False): """Format config into toml.""" from toml import dumps return dumps(get(flat=flat)) def to_filename(filename): """Write settings to file.""" filename = Path(filename) filename.write_text(dumps()) def init_spaces(checkpoint=True): """Initialize the :attr:`~workflow.spaces` setting.""" from niworkflows.utils.spaces import Reference, SpatialReferences spaces = execution.output_spaces or SpatialReferences() if not isinstance(spaces, SpatialReferences): spaces = SpatialReferences( [ref for s in spaces.split(' ') for ref in Reference.from_string(s)] ) if checkpoint and not spaces.is_cached(): spaces.checkpoint() # Make the SpatialReferences object available workflow.spaces = spaces
runFastTextWiki.py
from helper import * import multiprocessing import fasttext.util import sys import time numKeywords = int(sys.argv[1]) vectorSize = int(sys.argv[2]) maxCandidateArticles = int(sys.argv[3]) reducedSet = str(sys.argv[4]) printTimestamp("Getting candidate articles") if reducedSet=='true': candidate_articles = getCandidateArticles(maxCandidateArticles, True) else: candidate_articles = getCandidateArticles(maxCandidateArticles, False) printTimestamp("Loading FasTextWiki") fasttext.util.download_model('en', if_exists='ignore') model = fasttext.load_model('cc.en.300.bin') start = time.time() for query in range(6,7): for keywordExtractor in ["TopicRank", "TfIdf", "KPMiner", "YAKE", "TextRank", "SingleRank", "TopicalPageRank", "PositionRank", "MultipartiteRank"]: mp = multiprocessing.Process(target=findSimilarity, args=(keywordExtractor, "FTWiki", model, candidate_articles, query, numKeywords, vectorSize)) mp.start() #mp.join() end = time.time() print('{:.4f} s'.format(end - start))
test_fx.py
import builtins import contextlib import copy import functools import inspect import math import numbers import operator import os import pickle import sys import torch import traceback import warnings import unittest from math import sqrt from torch.multiprocessing import Process from torch.testing import FileCheck from torch.testing._internal.common_methods_invocations import op_db from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests import torch.utils._pytree as pytree import torch.fx._pytree as fx_pytree from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH import torch._C._fx from torch.fx.node import Target, Argument from torch.fx.passes import shape_prop from torch.fx.immutable_collections import immutable_dict, immutable_list from torch.fx.experimental.rewriter import RewritingTracer from torch.fx.operator_schemas import get_signature_for_torch_op from copy import deepcopy from collections import namedtuple from torch.fx.proxy import TraceError from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401 from fx.test_dce_pass import TestDCE # noqa: F401 from fx.test_fx_const_fold import TestConstFold # noqa: F401 from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401 if sys.version_info >= (3, 7): from fx.test_gradual_type import AnnotationsTest # noqa: F401 if sys.version_info >= (3, 7): from fx.test_gradual_type import TypeCheckerTest # noqa: F401 from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union from torch.testing._internal.common_utils import ( IS_FBCODE, IS_MACOS, IS_WINDOWS, TEST_WITH_ROCM, find_library_location, run_tests, ) from torch.testing._internal.jit_utils import JitTestCase from fx.named_tup import MyNamedTup try: from torchvision import models as torchvision_models HAS_TORCHVISION = True except ImportError: HAS_TORCHVISION = False skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision") class SimpleTest(torch.nn.Module): def forward(self, x): return torch.relu(x + 3.0) def a_non_torch_leaf(a, b): return a + b # Used for test_autowrap_function. Autowrapped functions need to be global def fx_int(x: float) -> int: return int(x) def fx_int_x2(x: float) -> int: return int(x) * 2 # used in test_pytree. It's all the way out here because pickling a GraphModule # that uses Point errors out if Point is local to the function Point = namedtuple('Point', ['x', 'y']) # Test wrap() passing both a function name as well as a function # directly def a_lifted_leaf(a, b): return a[0] + a[1] + b wrap('a_lifted_leaf') # Test wrapping twice doesn't break anything wrap('a_lifted_leaf') def a_lifted_leaf2(a, b): return a[0] + a[1] + b wrap(a_lifted_leaf2) wrap('len') @wrap def wrapped_via_decorator(a): return a + 1 wrap('wrapped_with_submodule') def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d): return batchnorm1d(x) real_wrapped_via_decorator = wrapped_via_decorator real_a_lifed_leaf = a_lifted_leaf real_a_lifed_leaf2 = a_lifted_leaf2 _sqrt = sqrt wrap('wrapper_fn') def wrapper_fn(x): return torch.foo(x) class Pair(NamedTuple): x : torch.Tensor y : torch.Tensor # for testing pytrees class Foo(object): # noqa: B209 def __init__(self, a, b): self.a = a self.b = b class TestFX(JitTestCase): def setUp(self): if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS: return lib_file_path = find_library_location('libtorchbind_test.so') torch.ops.load_library(str(lib_file_path)) def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None): """Check that an nn.Module's results match the GraphModule version for a given set of args/kwargs. """ kwargs = kwargs if kwargs else {} ref_outs = m(*args, **kwargs) gm = symbolic_trace(m) gm.graph.lint() test_outs = gm(*args, **kwargs) self.assertEqual(ref_outs, test_outs) def test_graph_module(self): class MySub(torch.nn.Module): def __init__(self): super().__init__() self.w = torch.nn.Parameter(torch.rand(4, 3)) def forward(self, x): return self.w + x class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.lin = torch.nn.Linear(4, 3) self.sub_mod = MySub() self.w = torch.nn.Parameter(torch.rand(3)) def forward(self, A, B, c): t = torch.sigmoid(A) + self.lin(c) return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3)) m = MyModule() gm = symbolic_trace(m) ms = torch.jit.script(gm) class M2(torch.nn.Module): def forward(self, A): m, idx = torch.max(A, 0) return m + 1, idx + 1 m2 = M2() gm2 = symbolic_trace(m2) class T(torch.nn.Module): def forward(self, A, b=4, *args, c=5, **kwargs): x = A + 1 + args[0] + kwargs['3'] return x t = T() symbolic_trace(t) def test_custom_import(self): graph = torch.fx.Graph() a = graph.placeholder('x') b = graph.placeholder('y') c = graph.call_function(a_non_torch_leaf, (a, b)) d = graph.call_function(torch.sin, (c,)) graph.output(d) gm = GraphModule(torch.nn.Module(), graph) x, y = torch.rand(1), torch.rand(1) self.assertEqual(torch.sin(x + y), gm(x, y)) def test_args_kwargs(self): class T(torch.nn.Module): def forward(self, *args, **kwargs): x = args[0] + kwargs['foo'] return x t = T() self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)}) def test_args_kwargs_no_self(self): class T(torch.nn.Module): def forward(*args, **kwargs): # noqa: B902 self = args[0] return torch.relu(args[1]) t = T() with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'): self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)}) def test_fx_shifts(self): class MyModule(torch.nn.Module): def forward(self, x): return x << 3, x >> 3 input = torch.LongTensor(10).random_(0, 1024) m = MyModule() self.checkGraphModule(m, (input,)) def test_dict(self): class MyDictMod(torch.nn.Module): def forward(self, d): return d['3'].relu(), {'4' : d['3'].neg()} input_dict = {'3': torch.rand(3, 4)} m = MyDictMod() self.checkGraphModule(m, (input_dict,)) def test_disallow_override(self): # Custom delegate to disallow in-place tensor operations class NoMutableCallTracer(Tracer): def create_node(self, kind : str, target : Union[str, Callable], args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None, type_expr : Optional[Any] = None) -> Node: name = target if isinstance(target, str) else torch.typename(target) if name[-1] == '_': raise RuntimeError('In-place operations are not supported') return super().create_node(kind, target, args, kwargs, name) # Test method class MyInplaceMod(torch.nn.Module): def forward(self, x): x.add_(3.0) return x m = MyInplaceMod() with self.assertRaisesRegex(RuntimeError, 'In-place operations'): NoMutableCallTracer().trace(m) # Test free function class MyInplaceMod2(torch.nn.Module): def forward(self, x): torch.log_(x) return x m2 = MyInplaceMod2() with self.assertRaisesRegex(RuntimeError, 'In-place operations'): NoMutableCallTracer().trace(m2) # Test symbolic node as an arg class MyInplaceMod3(torch.nn.Module): def forward(self, x): y = torch.ones(3, 4) y.add_(x) return x m3 = MyInplaceMod3() with self.assertRaisesRegex(RuntimeError, 'In-place operations'): NoMutableCallTracer().trace(m3) def test_leaf_module(self): # Custom delegate to make it so that there are no leaf modules, everything # should get traced through class NoLeafModulesTracer(Tracer): def is_leaf_module(self, m, qualname): return False class MyReluMod(torch.nn.Module): def __init__(self): super().__init__() self.relu = torch.nn.ReLU() def forward(self, x): return self.relu(x) mrm = MyReluMod() sym = NoLeafModulesTracer().trace(mrm) for node in sym.nodes: self.assertNotEqual(node.op, 'call_module') sym.lint() def test_wrap(self): self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5)) def to_trace(y): return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y) m = symbolic_trace(to_trace) self.assertIn('a_lifted_leaf', m.code) self.assertEqual(27, m(2)) self.assertIs(a_lifted_leaf, real_a_lifed_leaf) def test_wrap_fn_directly(self): self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5)) def to_trace(y): return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y) m = symbolic_trace(to_trace) self.assertIn('a_lifted_leaf2', m.code) self.assertEqual(27, m(2)) self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2) def test_wrapped_via_decorator(self): self.assertEqual(wrapped_via_decorator(0), 1) def to_trace(y): return wrapped_via_decorator(y) m = symbolic_trace(to_trace) self.assertIn('wrapped_via_decorator', m.code) self.assertEqual(m(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) def test_wrapped_via_decorator_and_transformed(self): self.assertEqual(wrapped_via_decorator(0), 1) def to_trace(y): return wrapped_via_decorator(y) m = symbolic_trace(to_trace) self.assertIn('wrapped_via_decorator', m.code) self.assertEqual(m(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) transformed = torch.fx.Transformer(m).transform() self.assertIn('wrapped_via_decorator', transformed.code) self.assertEqual(transformed(0), 1) self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator) self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched")) def test_wrap_with_submodule(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) def forward(self, x: torch.Tensor): return wrapped_with_submodule(x, self.batchnorm1d) m = symbolic_trace(M()) self.assertIn("wrapped_with_submodule", m.code) input = torch.rand(3, 2) ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False) self.assertEqual(ref_batchnorm1d(input), m(input)) def test_wrapped_retrace(self): def to_trace(y): return wrapped_via_decorator(y) m = symbolic_trace(to_trace) self.assertIn('wrapped_via_decorator', m.code) self.assertEqual(m(0), 1) retraced = symbolic_trace(m) self.assertIn('wrapped_via_decorator', retraced.code) self.assertEqual(retraced(0), 1) def test_graph_edit_with_proxy(self): class M(torch.nn.Module): def forward(self, a, b): return a + b m = M() g = symbolic_trace(m).graph new_g = torch.fx.Graph() val_map : Dict[Node, Node] = {} output_val = new_g.graph_copy(g, val_map) t = Proxy(output_val) # test that we can use proxy objects to generate more graph code later for things that do not need to work with modules. new_g.output((t + t).node) gm = GraphModule(m, new_g) gm.graph.lint() self.assertEqual(gm(3, 4), 14) def test_graph_unique_names(self): class M(torch.nn.Module): def forward(self, a, b): return a + b m = M() g = symbolic_trace(m).graph new_g = torch.fx.Graph() val_map : Dict[Node, Node] = {} output_val = new_g.graph_copy(g, val_map) t = Proxy(output_val) # test that we can use proxy objects to generate more graph code later for things that do not need to work with modules. new_g.output((t + t).node) gm = GraphModule(m, new_g) seen_names : Set[str] = set() for node in gm.graph.nodes: assert node.name not in seen_names seen_names.add(node.name) def test_stack_traces(self): class M(torch.nn.Module): def forward(self, a, b): return a + b tracer = torch.fx.Tracer() tracer.record_stack_traces = True graph = tracer.trace(M()) for node in graph.nodes: if node.op == 'output': continue self.assertTrue(node.stack_trace is not None) assert 'test_fx.py' in node.stack_trace def test_graph_unique_names_manual(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1') c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1') d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c)) graph.output(d) graph2 = torch.fx.Graph() val_map : Dict[Node, Node] = {} graph2.graph_copy(graph, val_map) seen_names : Set[str] = set() for node in graph2.nodes: assert node.name not in seen_names seen_names.add(node.name) def test_unpack(self): class M(torch.nn.Module): def forward(self, a, b): c, d = a return c + d + b a = (torch.rand(1), torch.rand(1)) b = torch.rand(1) m = M() self.checkGraphModule(m, (a, b)) def test_native_callable(self): if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS: raise unittest.SkipTest("non-portable load_library call used in test") # This test exercises the case where we use FX to translate from Python # code to some native callable object # # For the purposes of testing, we use ElementwiseInterpreter defined # in test_custom_class.cpp. # # We test that we can # 1) Construct a native callable from FX IR # 2) Construct a drop-in replacement module that delegates to the # native callable rather than the original code # 3) Run both the original code and native callable wrapper with # equivalent results # 4) TorchScript compile the native callable wrapper and confirm # equivalent results with the reference # 5) TorchScript serialize and deserialize the native callable # and confirm equivalent results with the reference # We use this simple Module as a reference computation class MySimpleMod(torch.nn.Module): def forward(self, x): return 3.0 * x + x msm = MySimpleMod() # This is what a lowering pass might look like: a function that takes # a valid nn.Module, symbolically traces it, lowers the Module to some # representation, and wraps that representation up into another # nn.Module instance that handles dispatch to the compiled/lowered code. def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module: # ===== Stage 1: Symbolic trace the module ===== mod = symbolic_trace(orig_mod) # ===== Stage 2: Lower GraphModule representation to the C++ # interpreter's instruction format ====== instructions = [] constant_idx = 0 constants = {} fn_input_names = [] target_to_name = { operator.add : "add", operator.mul : "mul" } output_node : Optional[Node] = None # For each instruction, create a triple # (instruction_name : str, inputs : List[str], output : str) # to feed into the C++ interpreter for n in mod.graph.nodes: target, args, out_name = n.target, n.args, n.name assert len(n.kwargs) == 0, "kwargs currently not supported" if n.op == 'placeholder': # Placeholders specify function argument names. Save these # for later when we generate the wrapper GraphModule fn_input_names.append(target) elif n.op == 'call_function': assert target in target_to_name, "Unsupported call target " + target arg_names = [] for arg in args: if not isinstance(arg, Node): # Pull out constants. These constants will later be # fed to the interpreter C++ object via add_constant() arg_name = f'constant_{constant_idx}' constants[arg_name] = torch.tensor( [arg] if isinstance(arg, numbers.Number) else arg) arg_names.append(arg_name) constant_idx += 1 else: arg_names.append(arg.name) instructions.append((target_to_name[target], arg_names, out_name)) elif n.op == 'output': if output_node is not None: raise RuntimeError('Multiple output nodes!') output_node = n else: raise RuntimeError('Unsupported opcode ' + n.op) interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter() # Load constants for k, v in constants.items(): interpreter.add_constant(k, v) # Specify names for positional input arguments interpreter.set_input_names(fn_input_names) # Load instructions interpreter.set_instructions(instructions) # Specify name for single output assert isinstance(output_node.args[0], torch.fx.Node) interpreter.set_output_name(output_node.args[0].name) # ===== Stage 3: Create a wrapper GraphModule around the interpreter ===== class WrapperModule(torch.nn.Module): def __init__(self, interpreter): super().__init__() self.interpreter = interpreter wrapper = WrapperModule(interpreter) # Create a graph that: 1) Takes function arguments 2) Invokes the interpreter # 3) Returns the speficied return value # FIXME: The following code could be greatly simplified by symbolic_trace'ing # the wrapper with a Tracer that considers the Wrapper instance a root # module, however, I can't get `__call__` exposed on TorchBind classes # without it messing up Python `hasattr` for some reason. More digging # into CPython's implementation of hasattr is probably in order... graph = torch.fx.Graph() # Add placeholders for fn inputs placeholder_nodes = [] for name in fn_input_names: placeholder_nodes.append(graph.create_node('placeholder', name)) # Get the interpreter object interpreter_node = graph.create_node('get_attr', 'interpreter') # Add a node to call the interpreter instance output_node = graph.create_node( op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes)) # Register output graph.output(output_node) graph.lint() # Return final GraphModule!!! return GraphModule(wrapper, graph) # Lower GraphModule to C++ interpreter lowered = lower_to_elementwise_interpreter(msm) # Compare correctness with original module x = torch.rand(3, 4) ref_out = msm(x) test_out = lowered(x) torch.testing.assert_allclose(test_out, ref_out) # Test TorchScript compilation scripted_lowered = torch.jit.script(lowered) script_out = scripted_lowered(x) torch.testing.assert_allclose(script_out, ref_out) # Test TorchScript ser/de import_copy = self.getExportImportCopy(scripted_lowered) imported_out = import_copy(x) torch.testing.assert_allclose(imported_out, ref_out) def test_reserved_getattr(self): """Ensure that we do not name any nodes with a reserved builtin like `getattr`""" class M(torch.nn.Module): def forward(self, a): return a.foo.bar.baz m = M() m_g = symbolic_trace(m) m_g.graph.lint() for node in m_g.graph.nodes: self.assertTrue(node.name != "getattr") def test_node_tagging(self): class TaggingTracer(Tracer): def create_node(self, kind : str, target : Union[str, Callable], args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None, type_expr : Optional[Any] = None) -> Node: n = super().create_node(kind, target, args, kwargs, name) n.tag = 'foo' return n class M(torch.nn.Module): def forward(self, a, b): return a + b m = M() g = TaggingTracer().trace(m) g.lint() for n in g.nodes: self.assertTrue(hasattr(n, 'tag')) self.assertEqual(n.tag, 'foo') def test_tensor_attribute(self): class TensorAttribute(torch.nn.Module): def __init__(self): super().__init__() self.tensor = torch.rand(3, 4) def forward(self, x): return torch.nn.functional.linear(x, self.tensor) ta = TensorAttribute() traced = symbolic_trace(ta) traced(torch.rand(4, 4)) class WrapperForQualname(torch.nn.Module): def __init__(self): super().__init__() self.ta = TensorAttribute() def forward(self, x): return torch.nn.functional.linear(x, self.ta.tensor) wfq = WrapperForQualname() traced2 = symbolic_trace(wfq) traced2.graph.lint() traced2(torch.rand(4, 4)) def test_symbolic_trace_sequential(self): class Simple(torch.nn.Module): def forward(self, x): return torch.neg(x) seq = torch.nn.Sequential( Simple(), Simple(), Simple() ) traced = symbolic_trace(seq) traced.graph.lint() x = torch.rand(3, 4) self.assertEqual(traced(x), seq(x)) def test_tensor_constant(self): class ConstTensor(torch.nn.Module): def forward(self, x): return torch.nn.functional.linear(x, torch.zeros(3, 4)) ct = ConstTensor() traced = symbolic_trace(ct) traced.graph.lint() traced(torch.rand(4, 4)) def test_pickle_graphmodule(self): class Nested(torch.nn.Module): def __init__(self): super().__init__() self.st = torch.nn.Linear(4, 4) def forward(self, x): return self.st(x) n = Nested() traced = symbolic_trace(n) traced.graph.lint() pickled = pickle.dumps(traced) loaded = pickle.loads(pickled) loaded.graph.lint() x = torch.rand(3, 4) self.assertEqual(loaded(x), traced(x)) def test_pickle_custom_import(self): graph = torch.fx.Graph() a = graph.placeholder('x') b = graph.placeholder('y') c = graph.call_function(a_non_torch_leaf, (a, b)) d = graph.call_function(torch.sin, (c,)) graph.output(d) gm = GraphModule(torch.nn.Module(), graph) pickled = pickle.dumps(gm) loaded = pickle.loads(pickled) loaded.graph.lint() x, y = torch.rand(1), torch.rand(1) self.assertEqual(loaded(x, y), gm(x, y)) def test_all_input_nodes(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.placeholder('x') b : torch.fx.Node = graph.call_module('linear_mod', args=(a,)) c : torch.fx.Node = graph.get_attr('y_attr') d : torch.fx.Node = graph.call_function(operator.add, args=(b, c)) e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0)) graph.output(e) graph.lint() self.assertEqual(b.all_input_nodes, [a]) self.assertEqual(c.all_input_nodes, []) self.assertEqual(d.all_input_nodes, [b, c]) self.assertEqual(e.all_input_nodes, [d]) def test_deepcopy_graphmodule_with_transform(self): st = SimpleTest() traced = symbolic_trace(st) traced.graph.lint() def transform(traced): new_graph = torch.fx.Graph() val_map : Dict[Node, Node] = {} output_value = new_graph.graph_copy(traced.graph, val_map) relu_out = new_graph.create_node( op='call_method', target='neg', args=(output_value,), kwargs={}) new_graph.output(relu_out) return GraphModule(traced, new_graph) transformed = transform(traced) transformed.graph.lint() copied = copy.deepcopy(transformed) self.assertNotEqual(id(type(transformed)), id(type(copied))) x = torch.randn(3, 4) self.assertEqual(copied(x), transformed(x)) def test_deepcopy_with_submods_params(self): class Bar(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) def forward(self, x): return torch.relu(x) + self.param class Baz(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.bar = Bar() def forward(self, x): return self.bar(x) - self.param baz = Baz() traced = symbolic_trace(baz) traced.graph.lint() copied = copy.deepcopy(traced) copied.graph.lint() def test_unpack_list_better_error(self): class SomeArgs(torch.nn.Module): def forward(self, a, b): return torch.rand(3, 4) class UnpacksList(torch.nn.Module): def __init__(self): super().__init__() self.sa = SomeArgs() def forward(self, x : list): return self.sa(*x) ul = UnpacksList() with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'): symbolic_trace(ul) def test_unpack_dict_better_error(self): class SomeKwargs(torch.nn.Module): def forward(self, x=3, y=4): return torch.rand(3, 4) class UnpacksDict(torch.nn.Module): def __init__(self): super().__init__() self.sk = SomeKwargs() def forward(self, x : dict): return self.sk(**x) ud = UnpacksDict() with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'): symbolic_trace(ud) def test_pretty_print_targets(self): # Test that Graph pretty-print prints friendly name for targets # in `operator` and `builtins` class SomeMod(torch.nn.Module): def forward(self, x): return torch.add(x.foo + x.bar, 3.0) traced = symbolic_trace(SomeMod()) graph_str = str(traced.graph) self.assertIn('builtins.getattr', graph_str) self.assertIn('operator.add', graph_str) self.assertIn('torch.add', graph_str) def test_pretty_print_node(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.param: torch.nn.Parameter = torch.nn.Parameter( torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x: torch.Tensor, y: int = 2): return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0) traced = symbolic_trace(M()) all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes]) FileCheck().check("x").check("placeholder") \ .check("y").check("placeholder") \ .check("getitem").check("call_function") \ .check("param").check("get_attr") \ .check("add").check("call_function") \ .check("linear").check("call_module") \ .check("clamp").check("call_method") \ .run(all_formatted) def test_script_tensor_constant(self): # TorchScript seems to ignore attributes that start with `__`. # We used to call anonymous Tensor values `__tensor_constant*`, but # they were getting ignored by script. Now they're called # `_tensor_constant*` class IHaveATensorConstant(torch.nn.Module): def forward(self, x): return x + torch.rand(3, 4) traced = torch.fx.symbolic_trace(IHaveATensorConstant()) torch.jit.script(traced) def test_autowrap_functions(self): class AutowrapFnTest(torch.nn.Module): def forward(self, x): return fx_int(x.shape[0] / 2) class AutowrapFnTest2(torch.nn.Module): def forward(self, x): return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2) # Check function(s) are wrapped # `int` would normally throw a TypeError as argument can't be `Proxy` tracer = Tracer(autowrap_functions=(fx_int,)) graph = tracer.trace(AutowrapFnTest()) traced = GraphModule(tracer.root, graph, 'test') tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2)) tracer_2.trace(AutowrapFnTest2()) # Test scriptability traced_scripted = torch.jit.script(traced) self.assertEqual(traced_scripted(torch.rand(4)), 2) def test_torch_fx_len(self): class FXLenTest(torch.nn.Module): def forward(self, x): return len(x) traced = symbolic_trace(FXLenTest()) self.assertEqual(traced(torch.rand(3, 4)), 3) # Test scriptability scripted = torch.jit.script(FXLenTest()) self.assertEqual(scripted(torch.rand(3)), 3) traced_scripted = torch.jit.script(traced) self.assertEqual(traced_scripted(torch.rand(3)), 3) # Test non-proxy len class FXLenTest2(torch.nn.Module): def __init__(self): super().__init__() self.l = [3, 4, 5] def forward(self, x): return x + len(self.l) traced2 = symbolic_trace(FXLenTest2()) inp = torch.rand(3, 4) self.assertEqual(traced2(inp), inp + 3.0) self.assertIs(len, builtins.len) def test_sqrt(self): class Sqrt1(torch.nn.Module): def forward(self, x): return sqrt(x.size(0)) class Sqrt2(torch.nn.Module): def forward(self, x): return math.sqrt(x.size(0)) class Sqrt3(torch.nn.Module): def forward(self, x): return x + math.sqrt(2) + sqrt(2) self.checkGraphModule(Sqrt1(), [torch.zeros(8)]) self.checkGraphModule(Sqrt2(), [torch.zeros(8)]) self.checkGraphModule(Sqrt3(), [torch.zeros(8)]) self.assertIs(sqrt, _sqrt) self.assertIs(math.sqrt, _sqrt) def test_torch_custom_ops(self): class M(torch.nn.Module): def forward(self, a): b = torch.ops.aten.sigmoid(a) c = torch.ops.aten.cat([a, b]) return torch.ops.aten.cat((c, c)) m = M() input = torch.randn(3) ref_out = m(input) gm = symbolic_trace(m) gm.graph.lint() out = gm(input) self.assertEqual(out, ref_out) def test_pickle_torch_custom_ops(self): class M(torch.nn.Module): def forward(self, a): b = torch.ops.aten.sigmoid(a) c = torch.ops.aten.cat([a, b]) return torch.ops.aten.cat((c, c)) m = M() input = torch.randn(3) ref_out = m(input) gm = symbolic_trace(m) gm.graph.lint() pickled = pickle.dumps(gm) loaded = pickle.loads(pickled) self.assertEqual(loaded(input), gm(input)) def test_pretty_print(self): st = SimpleTest() traced = symbolic_trace(st) traced.graph.lint() printed = str(traced) assert 'SimpleTest()' in printed assert 'torch.relu' in printed def test_pretty_print_graph(self): class KwargPrintTest(torch.nn.Module): def forward(self, x): return torch.squeeze(x + 3.0, dim=2) st = KwargPrintTest() traced = symbolic_trace(st) traced.graph.lint() stringed = str(traced.graph) for s in ['args', 'kwargs', '#users']: assert s in stringed def test_custom_proxy_type(self): class TensorPair: def __init__(self, left, right): self.left, self.right = left, right def add(self, other): l = self.left + other.left r = self.right + other.right return TensorPair(l, r) def mul(self, other): l = self.left * other.left r = self.right * other.right return TensorPair(l, r) def use_tensor_pair(x : TensorPair, y : TensorPair): s = x.add(y) return s.mul(x) x = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) y = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) ref_out = use_tensor_pair(x, y) traced = symbolic_trace(use_tensor_pair) traced_out = traced(x, y) self.assertEqual(traced_out.left, ref_out.left) self.assertEqual(traced_out.right, ref_out.right) def test_custom_proxy_type_literal(self): class TensorPair(metaclass=torch.fx.ProxyableClassMeta): def __init__(self, left, right): self.left, self.right = left, right def add(self, other): l = self.left + other.left r = self.right + other.right return TensorPair(l, r) def mul(self, other): l = self.left * other.left r = self.right * other.right return TensorPair(l, r) def use_tensor_pair_literal(x : TensorPair): s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3))) return s.mul(x) x = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) ref_out = use_tensor_pair_literal(x) traced = symbolic_trace(use_tensor_pair_literal) traced_out = traced(x) self.assertEqual(traced_out.left, ref_out.left) self.assertEqual(traced_out.right, ref_out.right) def test_custom_proxy_dynamic_value(self): class TensorPair(metaclass=torch.fx.ProxyableClassMeta): def __init__(self, left, right): self.left, self.right = left, right def add(self, other): l = self.left + other.left r = self.right + other.right return TensorPair(l, r) def mul(self, other): l = self.left * other.left r = self.right * other.right return TensorPair(l, r) def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor): s = x.add(TensorPair(y, y)) return s.mul(x) x = TensorPair(torch.randn(5, 3), torch.randn(5, 3)) y = torch.randn(5, 3) ref_out = use_tensor_pair_ctor(x, y) traced = symbolic_trace(use_tensor_pair_ctor) traced_out = traced(x, y) self.assertEqual(traced_out.left, ref_out.left) self.assertEqual(traced_out.right, ref_out.right) def test_custom_proxy_input_dependent_control_flow(self): class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta): def __init__(self, inp): if inp.sum() == 0: self.is_zero = True self.tensor = torch.tensor([]) else: self.is_zero = False self.tensor = inp def add(self, other): if self.is_zero: return ZeroTensor(other.tensor) elif other.is_zero: return self def use_zero_tensor(x : torch.Tensor, y : torch.Tensor): return ZeroTensor(x + y) x, y = torch.randn(5, 3), torch.randn(5, 3) ref_out = use_zero_tensor(x, y) traced = symbolic_trace(use_zero_tensor) traced_out = traced(x, y) self.assertEqual(traced_out.is_zero, ref_out.is_zero) self.assertEqual(traced_out.tensor, ref_out.tensor) def test_graph_fns(self): g = Graph() a = g.placeholder('a') b = g.call_module('linear', (a,)) c = g.get_attr('bias') d = g.call_method('add', (b, c)) e = g.call_function(torch.sin, (d,)) g.output(e) mod = torch.nn.Module() mod.linear = torch.nn.Linear(3, 4) mod.bias = torch.rand(4) gm = GraphModule(mod, g) gm.graph.lint() input = torch.rand(3) r = gm(input) ref = torch.sin(mod.linear(input) + mod.bias) self.assertEqual(r, ref) def test_remove_uses(self): g : torch.fx.Graph = Graph() x : torch.fx.Node = g.placeholder('x') relu : torch.fx.Node = g.call_function(torch.relu, (x,)) neg : torch.fx.Node = g.call_function(torch.neg, (relu,)) g.output(neg) neg.replace_all_uses_with(relu) g.erase_node(neg) self.assertTrue(neg not in relu.users) def test_nonetype_annotation(self): eb = torch.nn.EmbeddingBag(3, 4) symbolic_trace(eb) def test_pickle_nonetype_annotation(self): eb = torch.nn.EmbeddingBag(10, 3, mode='sum') traced = symbolic_trace(eb) pickled = pickle.dumps(traced) loaded = pickle.loads(pickled) loaded.graph.lint() input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9]) offsets = torch.LongTensor([0, 4]) self.assertEqual(loaded(input, offsets), traced(input, offsets)) def test_return_tuple(self): class M(torch.nn.Module): def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: return (x, x + x) original = M() traced = symbolic_trace(original) self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1))) def test_construct_root_dict(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,)) c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam') d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c)) graph.output(d) linear_mod : torch.nn.Module = torch.nn.Linear(3, 4) add_param : torch.Tensor = torch.rand(3, 4) gm : torch.fx.GraphModule = torch.fx.GraphModule( {'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph) gm.graph.lint() assert 'self.foo.bar.baz' in gm.code x : torch.Tensor = torch.rand(3, 3) out : torch.Tensor = gm(x) ref_out : torch.Tensor = linear_mod(x) + add_param self.assertEqual(out, ref_out) def test_symbolic_trace_assert(self): class AssertsTensorShape(torch.nn.Module): def forward(self, x): torch._assert(x.shape[1] > 4, "assert_foobar") return x m = AssertsTensorShape() # verify traceability traced = symbolic_trace(m) # verify assertion on traced model works correctly at runtime traced(torch.rand(4, 5)) with self.assertRaisesRegex(AssertionError, "assert_foobar"): traced(torch.rand(4, 3)) # verify the symbolically traced module is scriptable ms = torch.jit.script(m) with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"): ms(torch.rand(4, 3)) def test_fx_create_arg(self): class CustomArgObject: def __init__(self, x, y): self.x = x self.y = y def __fx_create_arg__(self, tracer: torch.fx.Tracer): return tracer.create_node( "call_function", CustomArgObject, args=( tracer.create_arg(self.x), tracer.create_arg(self.y), ), kwargs={}, ) class HasCustomArgObjectWhenLeaf(torch.nn.Module): def forward(self, o: CustomArgObject): # Not normally traceable; good reason to make # this module a leaf. for x in o.x: o.y += x return o.y class Root(torch.nn.Module): def __init__(self): super().__init__() self.inner = HasCustomArgObjectWhenLeaf() def forward(self, x, y): o = CustomArgObject(x, y) return self.inner(o) class CreateArgTracer(torch.fx.Tracer): def is_leaf_module(self, m, module_qualified_name): return type(m) is HasCustomArgObjectWhenLeaf m = Root() graph = CreateArgTracer().trace(m) gm = torch.fx.GraphModule(m, graph) assert "CustomArgObject(" in gm.code def test_trace_fn_constant(self): some_constant = torch.rand(3, 4) def add_const(x): return some_constant + x traced = symbolic_trace(add_const) input = torch.rand(3, 4) self.assertEqual(traced(input), add_const(input)) def test_copy_no_remap(self): traced = symbolic_trace(SimpleTest()) g = traced.graph copied = torch.fx.Graph() for node in g.nodes: copied.node_copy(node) with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'): copied.lint() def test_wrong_topo(self): graph : torch.fx.Graph = torch.fx.Graph() a : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,)) c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam') d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c)) graph.output(d) nodes = list(graph.nodes) nodes[3].append(nodes[2]) with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'): graph.lint() def test_example_shape_prop(self): class TestCase(torch.nn.Module): def __init__(self): super().__init__() self.attr = torch.randn(3, 4) self.submod = torch.nn.Linear(4, 4) def forward(self, x): return torch.neg(self.submod(x.relu() + self.attr)) tc = TestCase() tc_traced = symbolic_trace(tc) ref_out = tc_traced(torch.rand(3, 4)) shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4)) # Make sure we're testing all opcodes opcodes = set() output_shape : Optional[torch.Shape] = None output_stride : Optional[Tuple[int]] = None for node in tc_traced.graph.nodes: opcodes.add(node.op) if node.op == 'output': output_shape = node.args[0].meta['tensor_meta'].shape output_stride = node.args[0].meta['tensor_meta'].stride self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method', 'call_module', 'output'])) # Test shape propogation and make sure results match actual self.assertEqual(output_shape, ref_out.shape) self.assertEqual(output_stride, ref_out.stride()) def test_shape_prop_layout(self): class ConvTest(torch.nn.Module): def __init__(self): super().__init__() self.conv_mod = torch.nn.Conv2d(5, 5, 3) def forward(self, x): return self.conv_mod(x) # contiguous layout test_mod = ConvTest() traced = symbolic_trace(test_mod) x = torch.randn(5, 5, 224, 224) shape_prop.ShapeProp(traced).propagate(x) assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format for node in traced.graph.nodes)) x_channels_last = x.contiguous(memory_format=torch.channels_last) traced.to(memory_format=torch.channels_last) shape_prop.ShapeProp(traced).propagate(x_channels_last) for node in traced.graph.nodes: # NB: the implementation of conv may not preserve the memory format, # unfortunately. The best we can do is just check that the placeholder # node is channels-last if node.op in {'placeholder'}: self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last) def test_shape_prop_aggregate(self): class ReturnTwo(torch.nn.Module): def forward(self, x): return (3, torch.sum(x)) class UnderTest(torch.nn.Module): def __init__(self): super().__init__() self.rt = ReturnTwo() def forward(self, x): return self.rt(x) ut = UnderTest() class RTTracer(torch.fx.Tracer): def is_leaf_module(self, m, module_qualified_name): return type(m) is ReturnTwo graph = RTTracer().trace(ut) mod = torch.fx.GraphModule(ut, graph) shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4)) for node in mod.graph.nodes: if node.op == 'call_module': assert 'tensor_meta' in node.meta tensor_meta = node.meta['tensor_meta'] assert tensor_meta[0] == 3 assert tensor_meta[1].shape == torch.Size([]) def test_shape_prop_layout_3d(self): class ConvTest3d(torch.nn.Module): def __init__(self): super().__init__() self.conv_mod = torch.nn.Conv3d(5, 5, 3) def forward(self, x): return self.conv_mod(x) test_mod_3d = ConvTest3d() traced_3d = symbolic_trace(test_mod_3d) x_3d = torch.randn(5, 5, 224, 224, 15) shape_prop.ShapeProp(traced_3d).propagate(x_3d) assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format for node in traced_3d.graph.nodes)) x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d) traced_3d.to(memory_format=torch.channels_last_3d) shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d) for node in traced_3d.graph.nodes: # NB: the implementation of conv may not preserve the memory format, # unfortunately. The best we can do is just check that the placeholder # node is channels-last if node.op in {'placeholder'}: self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d) def test_interpreter(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) m = MyModule() gm = torch.fx.symbolic_trace(m) interpreter = Interpreter(gm) input = torch.randn(3, 4) self.assertEqual(interpreter.run(input), gm(input)) self.assertEqual(interpreter.run(input), m(input)) def test_interpreter_run_node_override(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) m = MyModule() gm = torch.fx.symbolic_trace(m) class RunNodeInterpreter(Interpreter): def __init__(self, module): super().__init__(module) def run_node(self, n : Node) -> Any: result = super().run_node(n) n.cached_value = result return result input = torch.randn(3, 4) RunNodeInterpreter(gm).run(input) for node in gm.graph.nodes: assert hasattr(node, 'cached_value') def test_interpreter_onthefly_swap(self): def fn(x): return torch.sigmoid(x).neg() gm = torch.fx.symbolic_trace(fn) class NegSigmSwapInterpreter(Interpreter): def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == torch.sigmoid: return torch.neg(*args, **kwargs) return super().call_function(n) def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == 'neg': call_self, *args_tail = args return call_self.sigmoid(*args_tail, **kwargs) return super().call_method(n) input = torch.randn(3, 4) result = NegSigmSwapInterpreter(gm).run(input) self.assertEqual(result, torch.neg(input).sigmoid()) def test_interpreter_partial_eval(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) gm = torch.fx.symbolic_trace(MyModule()) interp = Interpreter(gm) env = {} for node in gm.graph.nodes: if node.op == 'call_module' and node.target == 'linear': env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0 break assert len(env) == 1 x = torch.randn(3, 4) result = interp.run(x, initial_env=env) self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0)) def test_interpreter_star_args(self): def with_star_args(x, *args): return x + args[0] gm = torch.fx.symbolic_trace(with_star_args) interp = Interpreter(gm) result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4)) self.assertEqual(result, torch.ones(3, 4) * 2.0) @skipIfNoTorchVision def test_interpreter_noop_resnet18(self): rn18 = torchvision_models.resnet18() transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform() inp = torch.randn(5, 3, 224, 224) self.assertEqual(transformed(inp), rn18(inp)) @skipIfNoTorchVision def test_interpreter_gc_values(self): rn18 = torchvision_models.resnet18() interp = Interpreter(symbolic_trace(rn18)) inp = torch.rand(5, 3, 224, 224) out = interp.run(inp) env_key_names = set(n.name for n in interp.env.keys()) self.assertEqual(env_key_names, set(['output'])) def test_transformer_noop(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): return self.linear(x + self.param).clamp(min=0.0, max=1.0) m = MyModule() gm = torch.fx.symbolic_trace(m) new_gm = Transformer(gm).transform() input = torch.randn(3, 4) self.assertEqual(new_gm(input), gm(input)) def test_transformer_op_swap(self): def fn(x): return torch.sigmoid(x).neg() gm = torch.fx.symbolic_trace(fn) class NegSigmSwapXformer(Transformer): def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == torch.sigmoid: return torch.neg(*args, **kwargs) return super().call_function(n) def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any: if target == 'neg': call_self, *args_tail = args return call_self.sigmoid(*args_tail, **kwargs) return super().call_method(n) transformed = NegSigmSwapXformer(gm).transform() input = torch.randn(3, 4) self.assertEqual(transformed(input), torch.neg(input).sigmoid()) def test_transformer_multi_outputs(self): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.param = torch.nn.Parameter(torch.rand(3, 4)) self.linear = torch.nn.Linear(4, 5) def forward(self, x): x = x + self.param out = self.linear(x) return x, out m = MyModule() gm = torch.fx.symbolic_trace(m) new_gm = Transformer(gm).transform() input = torch.randn(3, 4) self.assertEqual(new_gm(input), gm(input)) def test_fn_type_annotations(self): class Foo(torch.nn.Module): def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]: return {'a': p.x + p.y + z + i} foo_scripted = torch.jit.script(Foo()) foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3) fxed = symbolic_trace(Foo()) fxed_scripted = torch.jit.script(fxed) fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3) def test_fn_type_annotation_empty(self): def forward(a : List[torch.Tensor]): return a[0] torch.jit.script(symbolic_trace(forward)) def test_wrapped_method(self): def wrap_with_relu(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): return torch.relu(fn(*args, **kwargs)) return wrapper class Foo(torch.nn.Module): @wrap_with_relu def forward(self, x, w): return torch.matmul(x, w) f = Foo() traced = symbolic_trace(f) x, w = torch.rand(3, 4), torch.rand(4, 4) self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes)) def test_empty_graph_codegen(self): graph = torch.fx.Graph() gm = torch.fx.GraphModule(torch.nn.Module(), graph) self.assertEqual(gm(), None) def test_sequential(self): m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1)) gm = torch.fx.symbolic_trace(m) gm_copy = copy.deepcopy(gm) def test_ctx_mgr(self): @contextlib.contextmanager def do_nothing(): yield class M(torch.nn.Module): def __init__(self): super().__init__() @do_nothing() def forward(self, x): return torch.relu(x) m = M() self.checkGraphModule(m, (torch.rand(3, 4),)) def test_typename_print(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,), type_expr=List[float]) output : torch.fx.Node = graph.output(b) self.assertTrue('typing.List[float]' in str(graph)) def test_ellipsis(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y): return x + y[:, 1:10, ...] traced = symbolic_trace(M()) x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4) self.assertEqual(traced(x, y), x + y[:, 1:10, ...]) def test_inf_nan(self): class FooMod(torch.nn.Module): def forward(self, x): return x + float('inf'), x + float('-inf'), x + float('nan') fm = FooMod() self.checkGraphModule(fm, (torch.rand(3, 4),)) def test_inf_nan_kwds(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf') c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan') graph.output((b, c)) gm = torch.fx.GraphModule(torch.nn.Module(), graph) x = torch.rand(3, 4) self.assertEqual(gm(x), (x + float('inf'), x + float('nan'))) def test_deepcopy_recursion_depth(self): depth = sys.getrecursionlimit() + 20 g = torch.fx.Graph() x = g.placeholder('x') for i in range(depth): x = g.call_function(torch.relu, (x,)) g.output(x) copied_graph = copy.deepcopy(g) val_map = {} for orig_node, new_node in zip(g.nodes, copied_graph.nodes): val_map[orig_node] = new_node for orig_node, new_node in zip(g.nodes, copied_graph.nodes): orig_users = set(orig_node.users.keys()) orig_users_equiv = set(val_map[u] for u in orig_users) new_users = set(new_node.users.keys()) self.assertEqual(orig_users_equiv, new_users) @skipIfNoTorchVision def test_replace_uses(self): rn18 = torchvision_models.resnet18() class LowerReluTracer(torch.fx.Tracer): def is_leaf_module(self, m : torch.nn.Module, qualname : str): if isinstance(m, torch.nn.ReLU): return False return super().is_leaf_module(m, qualname) rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18)) to_erase = [] for node in rn18_traced.graph.nodes: if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]: kwargs = node.kwargs.copy() # Neg doesn't have in-place kwargs.pop('inplace') with rn18_traced.graph.inserting_before(node): new_node = rn18_traced.graph.call_function( the_function=torch.neg, args=node.args, kwargs=node.kwargs) node.replace_all_uses_with(replace_with=new_node) to_erase.append(node) for node in to_erase: rn18_traced.graph.erase_node(node) def test_replace_input(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') y : torch.fx.Node = graph.create_node('placeholder', 'y') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) b.replace_input_with(x, y) gm = torch.fx.GraphModule(torch.nn.Module(), graph) input_x = torch.randn(33, 44) input_y = torch.randn(11, 22) self.assertEqual(gm(input_x, input_y), torch.relu(input_y)) def test_insertion_point(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) with graph.inserting_before(b): neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,)) _, *relu_args = b.args b.args = (neg, *relu_args) gm = torch.fx.GraphModule(torch.nn.Module(), graph) input = torch.randn(33, 44) self.assertEqual(gm(input), torch.relu(torch.neg(input))) def test_update_args_api(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') y : torch.fx.Node = graph.create_node('placeholder', 'y') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph) inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5) self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x)) b.update_arg(0, y) new_gm = torch.fx.GraphModule(torch.nn.Module(), graph) self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y)) def test_update_kwargs_api(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') y : torch.fx.Node = graph.create_node('placeholder', 'y') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x}) output : torch.fx.Node = graph.output(b) orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph) inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5) self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x)) b.update_kwarg('input', y) new_gm = torch.fx.GraphModule(torch.nn.Module(), graph) self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y)) def test_move_before(self): graph : torch.fx.Graph = torch.fx.Graph() x : torch.fx.Node = graph.create_node('placeholder', 'x') b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,)) output : torch.fx.Node = graph.output(b) neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,)) _, *relu_args = b.args b.args = (neg, *relu_args) b.prepend(neg) gm = torch.fx.GraphModule(torch.nn.Module(), graph) input = torch.randn(33, 44) self.assertEqual(gm(input), torch.relu(torch.neg(input))) def test_erase_node_error(self): st = SimpleTest() traced = symbolic_trace(st) for node in traced.graph.nodes: # Test deleting with uses both in another Node and at the output if node.target in [operator.add, torch.relu]: with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'): traced.graph.erase_node(node) def test_copy_it(self): d = immutable_dict([(3, 4), (5, 6)]) l = immutable_list([(3, 4), (5, 6)]) self.assertEqual(d, deepcopy(d)) self.assertEqual(l, deepcopy(l)) def test_get_torch_func_signature(self): for key in dir(torch): obj = getattr(torch, key) if callable(obj): schemas = get_signature_for_torch_op(obj) def test_find_uses(self): graph = torch.fx.Graph() x = torch.fx.Proxy(graph.placeholder('x')) y = torch.relu(x) z = x + x u = torch.neg(x) graph.output((y + z + u).node) graph.lint() users_of_x = x.node.users self.assertEqual(len(users_of_x), 3) expected_ops = set(['relu', 'add', 'neg']) for use in users_of_x: assert any(use.name.startswith(prefix) for prefix in expected_ops) def test_inline_graph(self): class InlineInto(torch.nn.Module): def forward(self, x): return torch.relu(x) class ToInline(torch.nn.Module): def forward(self, x): return torch.neg(x) inline_into = symbolic_trace(InlineInto()) to_inline = symbolic_trace(ToInline()) combined_graph = torch.fx.Graph() output_node = combined_graph.graph_copy(inline_into.graph, {}) input_node = list(to_inline.graph.nodes)[0] assert input_node and input_node.op == 'placeholder' val_map = {input_node : output_node} output = combined_graph.graph_copy(to_inline.graph, val_map) combined_graph.output(output) combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph) input = torch.rand(3, 4) self.assertEqual(combined_module(input), input.relu().neg()) def test_multi_insert_point(self): graph = torch.fx.Graph() x = torch.fx.Proxy(graph.placeholder('x')) relu = torch.relu(x) with graph.inserting_before(relu.node): y = torch.neg(x) z = torch.tanh(y) graph.output((relu.node, z.node)) graph.lint() expected_ops = ['x', 'neg', 'tanh', 'relu'] for node, expected in zip(graph.nodes, expected_ops): assert expected in node.name def test_reassign_args_kwargs_uses(self): graph = torch.fx.Graph() x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y')) z = x + y zed = z + z + z graph.output(zed.node) graph.lint() # zed = z + z + z -> zed = z + z + x zed.node.args = (zed.node.args[0], x.node) self.assertEqual(x.node.users.keys(), [z.node, zed.node]) # z = x + y -> z = y + y z.node.args = (y.node, y.node) self.assertEqual(x.node.users.keys(), [zed.node]) def test_trace_function(self): def foo(x, y): return torch.relu(x) + y x, y = torch.randn(3, 4), torch.randn(3, 4) self.checkGraphModule(foo, (x, y)) def test_trace_dict_int_keys(self): class ModWithDictArg(torch.nn.Module): def forward(self, d : Dict[int, torch.Tensor]): return d[42] class CallsModWithDict(torch.nn.Module): def __init__(self): super().__init__() self.m = ModWithDictArg() def forward(self, x): return self.m({42: x}) class MyTracer(torch.fx.Tracer): def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool: return isinstance(m, ModWithDictArg) traced_graph = MyTracer().trace(CallsModWithDict()) def test_trace_dict_proxy_keys(self): class ModWithDictArg(torch.nn.Module): def forward(self, d : Dict[torch.Tensor, torch.Tensor]): return d[42] class CallsModWithDict(torch.nn.Module): def __init__(self): super().__init__() self.m = ModWithDictArg() def forward(self, x): return self.m({x: x}) class MyTracer(torch.fx.Tracer): def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool: return isinstance(m, ModWithDictArg) with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'): traced_graph = MyTracer().trace(CallsModWithDict()) def test_direct_param_use(self): class TransposeTest(torch.nn.Module): def __init__(self): super().__init__() self.b = torch.nn.Parameter(torch.rand(4, 3)) def forward(self, x): return self.b class Foo(torch.nn.Module): def __init__(self): super().__init__() self.a = TransposeTest() def forward(self, x): return self.a.b, self.a.b.t(), self.a.b.view(12) traced = torch.fx.symbolic_trace(Foo()) assert(all('constant' not in node.target for node in traced.graph.nodes)) def test_single_default_arg(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, y=1): return y m = M() self.checkGraphModule(m, ()) self.checkGraphModule(m, (3,)) def test_multiple_default_args(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, y=1, z=2): return y + z m = M() self.checkGraphModule(m, ()) self.checkGraphModule(m, (3,)) self.checkGraphModule(m, (3, 4)) def test_regular_and_default_args(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self, x, y=1): return x + y m = M() self.checkGraphModule(m, (2,)) self.checkGraphModule(m, (2, 3)) def test_string_literal_return(self): class M(torch.nn.Module): def __init__(self): super().__init__() def forward(self): return "foo" m = M() self.checkGraphModule(m, ()) def test_namedtuple_return_qualname(self): class NamedTupReturn(torch.nn.Module): def forward(self, x): return MyNamedTup(x, x) traced = symbolic_trace(NamedTupReturn()) input = torch.rand(3, 4) self.assertEqual(traced(input), MyNamedTup(input, input)) def test_update_args_kwargs_yells_at_you(self): symtraced = symbolic_trace(SimpleTest()) node = next(iter(symtraced.graph.nodes)) with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'): node.__update_args_kwargs((), {}) def test_torchbind_class_attribute_in_fx(self): if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS: self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping") class FooBar1234(torch.nn.Module): def __init__(self): super(FooBar1234, self).__init__() self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"]) def forward(self): return self.f.top() m = FooBar1234() self.checkGraphModule(m, ()) def test_torchbind_class_attribute_in_fx_tensor_arg(self): if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS: self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping") class FooBar2341(torch.nn.Module): def __init__(self): super(FooBar2341, self).__init__() self.f = torch.classes._TorchScriptTesting._ReLUClass() def forward(self, x): return self.f.run(x) m = FooBar2341() traced = symbolic_trace(m) input = torch.randn(3, 4) self.assertEqual(traced(input), m(input)) self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes)) def test_script_method_trace(self): class Scripted(torch.nn.Module): def forward(self, x): return torch.relu(x) class Holder(torch.nn.Module): def __init__(self): super().__init__() self.s = torch.jit.script(Scripted()) def forward(self, x): return self.s(x) h = Holder() traced = symbolic_trace(h) input = torch.randn(3, 4) self.assertEqual(traced(input), h(input)) self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes)) def test_namedtuple_return_trace(self): class NamedTupReturn(torch.nn.Module): def forward(self, x): return Pair(x, x) traced = symbolic_trace(NamedTupReturn()) input = torch.rand(3, 4) self.assertEqual(traced(input), Pair(input, input)) def test_return_type_exists(self): class ReturnTypeModule(torch.nn.Module): def other(self, x: List[str]) -> List[str]: return x def forward(self, x: List[str]) -> List[str]: return self.other(x) traced = symbolic_trace(ReturnTypeModule()) self.assertIn("-> typing_List[str]", traced._code) scripted = torch.jit.script(traced) self.assertIn("-> List[str]", scripted.code) def getitem_inner(self): class GetItemBase(torch.nn.Module): def __init__(self): super().__init__() self.register_buffer('pe', torch.randn(8, 8)) class GetItem1(GetItemBase): def forward(self, x): return self.pe[:, :x.size(0)] class GetItem2(GetItemBase): def forward(self, x): return self.pe[x.size(0)] class GetItem3(GetItemBase): def forward(self, x): return self.pe[4] # fx creates `self._tensor_constant0` here self.checkGraphModule(GetItem1(), [torch.zeros(4)]) self.checkGraphModule(GetItem2(), [torch.zeros(4)]) self.checkGraphModule(GetItem3(), [torch.zeros(4)]) @unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1", "Will be checked in test_getitem_subproc") def test_getitem(self): self.getitem_inner() def test_getitem_subproc(self): # need to run this test in a subproc to work around: # https://github.com/pytorch/pytorch/issues/50710 proc = Process(target=run_getitem_target) proc.start() proc.join() self.assertEqual(proc.exitcode, 0) def test_user_friendly_call_provenance_with_function(self): def fn(x): return wrapper_fn(x) traced = torch.fx.symbolic_trace(fn) with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is " "being compiled since it was called" " from 'fn.forward'"): scripted = torch.jit.script(traced) def test_user_friendly_call_provenance_with_module(self): class M(torch.nn.Module): def forward(self, x): return wrapper_fn(x) traced = torch.fx.symbolic_trace(M()) with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is " "being compiled since it was called" " from 'M.forward'"): scripted = torch.jit.script(traced) def test_snake_case(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.activations = torch.nn.ModuleDict([ ["snake_case", torch.nn.ReLU()], ["PascalCase", torch.nn.LeakyReLU()], ["ALL_CAPS", torch.nn.PReLU()] ]) def forward(self, x): a = self.activations["snake_case"](x) b = self.activations["PascalCase"](x) c = self.activations["ALL_CAPS"](x) return a, b, c traced = symbolic_trace(M()) check = [ ("activations_snake_case", "activations.snake_case"), ("activations_pascal_case", "activations.PascalCase"), ("activations_all_caps", "activations.ALL_CAPS") ] i = 0 for node in traced.graph.nodes: if node.op == "placeholder" or node.op == "output": continue name = check[i][0] target = check[i][1] self.assertEqual(name, node.name) self.assertEqual(target, node.target) i += 1 self.assertEqual(i, 3) def test_no_mutation(self): from torch.fx.immutable_collections import immutable_list x = immutable_list([3, 4]) with self.assertRaisesRegex(NotImplementedError, "new_args"): x[0] = 4 def test_partial_trace(self): class Foo(torch.nn.Module): def forward(self, x, y): if y: return 2 * x else: return x mod = Foo() mod_true = symbolic_trace(mod, concrete_args={'y': True}) mod_false = symbolic_trace(mod, concrete_args={'y': False}) self.assertEqual(mod_true(3, True), 6) print(mod_true.code) assert(any([i.target == torch._assert for i in mod_true.graph.nodes])) with self.assertRaises(AssertionError): mod_true(3, False) self.assertEqual(mod_false(3, False), 3) with self.assertRaises(AssertionError): mod_false(3, True) def f_higher(a, f): return f(a) nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2}) self.assertEqual(nf(3, lambda x: x * 2), 6) def test_custom_traceback_raised_when_exception_source_is_graphmodule(self): class M(torch.nn.Module): def __init__(self): super(M, self).__init__() self.W = torch.nn.Parameter(torch.randn(5)) def forward(self, x): return torch.dot(self.W, x) traced = torch.fx.symbolic_trace(M()) out = [n for n in traced.graph.nodes if n.op == "output"][-1] with traced.graph.inserting_before(out): relu_out = traced.graph.call_method(method_name='relu', args=(out.args[0],)) out.args = (relu_out,) traced.recompile() with self.capture_stderr() as captured: with self.assertRaises(TypeError): traced(5) self.assertRegex(captured[0], r"Call using an FX-traced Module, line .* of the " r"traced Module's generated forward function:") def test_custom_traceback_not_raised_when_exception_source_is_submodule(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(3, 4) def forward(self, x): return self.linear(x) traced = torch.fx.symbolic_trace(M()) # Do not change this to `capture_stderr` or another context # manager without ensuring that the output is as expected try: traced(torch.rand(5, 5)) except RuntimeError: captured = traceback.format_exc() self.assertNotRegex(captured, r"Call using an FX-traced Module, line .* of the " r"traced Module's generated forward function:") def test_ast_rewriter_rewrites_assert(self): class M(torch.nn.Module): def forward(self, x: torch.Tensor, y: int, z: int): assert y == z return torch.add(x, x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") traced.graph.lint() def test_ast_rewriter_rewrites_assert_with_message(self): class M(torch.nn.Module): def forward(self, x: torch.Tensor, y: int, z: int): assert y == z, "msg" return torch.add(x, x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") traced.graph.lint() def test_ast_rewriter_reassigns_submodules(self): class M(torch.nn.Module): def __init__(self): super().__init__() self.bn = torch.nn.BatchNorm2d(100) def forward(self, x: torch.Tensor): return torch.add(x, x) ast_rewriter = RewritingTracer() graph = ast_rewriter.trace(M()) traced = GraphModule(ast_rewriter.root, graph, "gm") traced.graph.lint() def test_submodule_manipulation_API(self): class C(torch.nn.Module): def __init__(self): super(C, self).__init__() self.conv = torch.nn.Conv2d(16, 33, 3, stride=2) self.param = torch.nn.Parameter(torch.rand(2, 3)) def forward(self, x): return self.conv(torch.cat([self.param, x])) class B(torch.nn.Module): def __init__(self): super(B, self).__init__() self.linear = torch.nn.Linear(100, 200) self.register_buffer("buf", torch.randn(2, 3)) self.net_c = C() def forward(self, x): return self.linear(torch.cat([self.buf, self.net_c(x)])) class A(torch.nn.Module): def __init__(self): super(A, self).__init__() self.net_b = B() self.param = torch.nn.Parameter(torch.rand(2, 3)) def forward(self, x): return self.net_b(x) + self.param a = symbolic_trace(A()) a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2)) conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1] with a.graph.inserting_before(conv): with warnings.catch_warnings(record=True) as w: dropout = a.graph.call_module(module_name="net_b.net_c.dropout", args=conv.args) self.assertEqual(len(w), 0) conv.replace_all_uses_with(dropout) a.graph.erase_node(conv) a.recompile() def module_exists(gm: GraphModule, path: str) -> bool: return any(path == name for name, _ in gm.named_modules()) def parameter_exists(gm: GraphModule, path: str) -> bool: return (any(path == name for name, _ in gm.named_parameters()) and any(path == name for name in gm.state_dict().keys())) def buffer_exists(gm: GraphModule, path: str) -> bool: return (any(path == name for name, _ in gm.named_buffers()) and any(path == name for name in gm.state_dict().keys())) # Test that we added the "dropout" submodule self.assertTrue(module_exists(a, "net_b.net_c.dropout")) # Test `get_submodule` with an added submodule self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout")) # Test that the "conv" submodule is still there self.assertTrue(module_exists(a, "net_b.net_c.conv")) # Test `get_submodule` with an original module self.assertIsNotNone(a.get_submodule("net_b.net_c.conv")) # Test that the "conv" node is NOT still there conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"] self.assertEqual(conv, []) a.delete_submodule("net_b.net_c.conv") # Test that the "conv" submodule is now gone self.assertFalse(module_exists(a, "net_b.net_c.conv")) # Test `get_submodule` with a deleted submodule with self.assertRaisesRegex(AttributeError, "has no attribute " "`conv`"): self.assertIsNone(a.get_submodule("net_b.net_c.conv")) # Test `get_attr` warnings cat = [n for n in a.graph.nodes if n.target == torch.cat][-1] with a.graph.inserting_before(cat): with warnings.catch_warnings(record=True) as w: param = a.graph.get_attr(qualified_name="net_b.net_c.param") self.assertEqual(len(w), 0) with self.assertWarnsRegex(UserWarning, "Attempted to " "insert a get_attr Node with no " "underlying reference in the " "owning GraphModule"): bad_param = a.graph.get_attr(qualified_name="net_b.param") a.graph.erase_node(bad_param) cat.args = (*cat.args, param) a.recompile() a.graph.lint() # Test `get_parameter` a.get_parameter("net_b.net_c.param") with self.assertRaisesRegex(AttributeError, "is not an " "nn.Parameter"): a.get_parameter("net_b.buf") with self.assertRaisesRegex(AttributeError, "has no attribute " "`param`"): a.get_parameter("net_b.param") # Test `get_buffer` a.get_buffer("net_b.buf") with self.assertRaisesRegex(AttributeError, "is not a " "buffer"): a.get_buffer("net_b.net_c.param") with self.assertRaisesRegex(AttributeError, "has no attribute " "`buf`"): a.get_buffer("net_b.net_c.buf") # Test non-nested attributes a.get_submodule("") a.get_parameter("param") # Insert some unused submodules a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3)) a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3)) a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2)) a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100)) # Garbage collection a.delete_all_unused_submodules() # Test that all the unused submodules are gone self.assertFalse(module_exists(a, "net_b.embedding")) self.assertFalse(module_exists(a, "net_b.net_c.embedding")) self.assertFalse(module_exists(a, "net_b.net_c.rnn")) self.assertFalse(module_exists(a, "batch_norm_2d")) # Test that we didn't delete any unused Parameters or buffers self.assertTrue(parameter_exists(a, "net_b.net_c.param")) self.assertTrue(buffer_exists(a, "net_b.buf")) a.graph.lint() def test_tracing_graphmodules_as_leaf_submodules(self): class A(torch.nn.Module): def forward(self, t): return t + t class B(torch.nn.Module): def __init__(self): super(type(self), self).__init__() self.calling = False self.called = False def forward(self, t): if self.calling: return t - t else: return t + t def __call__(self, *args): self.called = True self.calling = True return super(type(self), self).__call__(*args) self.calling = False class M(torch.nn.Module): def __init__(self, a, b): super().__init__() self.a = a self.b = b def forward(self, t): x = self.a(t) y = self.b(t) return x + y class LeafTracer(Tracer): def is_leaf_module(self, module, name): return True class LeafTracerNotB(Tracer): def is_leaf_module(self, module, name): return False if "b" in name else True # Recompile calls added "for fun", since they # chain __call__ wrappers. # # Test: B as a regular, non-leaf module # a = symbolic_trace(A()) a.recompile() m = M(a, B()) graph = LeafTracerNotB().trace(m) gm = GraphModule(m, graph) gm.recompile() # Test graphmodule/submodule a is not inlined. self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"] self.assertTrue(len(match) == 1) # Test submodule b is not treated as leaf. self.assertFalse(hasattr(gm, "b")) # Test assert custom __call__ on submodule b was honored. match = [ n for n in gm.graph.nodes if n.op == "call_function" and n.target == operator.sub ] self.assertTrue(len(match) == 1) # # Test: B as a regular, leaf module # symbolic_trace should only patch torch.nn.Module.__call__, # which means B.__call__ should still execute # a = symbolic_trace(A()) a.recompile() b = B() m = M(a, b) graph = LeafTracer().trace(m) gm = GraphModule(m, graph) gm.recompile() # Test graphmodule/submodule a is not inlined. self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"] self.assertTrue(len(match) == 1) # Test submodule b is leaf: self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"] self.assertTrue(len(match) == 1) # Test b.__call__ was run self.assertTrue(b.called) self.assertTrue(gm.get_submodule("b").called) # # Test: B as GraphModule leaf # __call__ not honored since symbolic_trace directly invokes forward() # a = symbolic_trace(A()) a.recompile() b = symbolic_trace(B()) b.recompile() m = M(a, b) graph = LeafTracer().trace(m) gm = GraphModule(m, graph) gm.recompile() self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"] self.assertTrue(len(match) == 1) self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module)) match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"] self.assertTrue(len(match) == 1) def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.register_buffer("my_buff", torch.rand(3, 4)) self.register_parameter( "my_param", torch.nn.Parameter(torch.rand(3, 4)) ) def forward(self, x): return x + self.my_buff + self.my_param mod = MyModule() mod_traced = symbolic_trace(mod) # Create new GraphModule based on original, either w/ dict or root module. orig_buff = mod_traced.get_buffer("my_buff") orig_param = mod_traced.get_parameter("my_param") mod_traced_new = GraphModule( {"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod, mod_traced.graph, ) # Check that both my_buff and my_param are found and the same. try: new_buff = mod_traced_new.get_buffer("my_buff") except Exception: self.fail("Did not find my_buff") self.assertEqual(orig_buff, new_buff) try: new_param = mod_traced_new.get_parameter("my_param") except Exception: self.fail("Did not find my_param") self.assertEqual(orig_param, new_param) x = torch.rand(3, 4) orig_out = mod_traced(x) submodules_out = mod_traced_new(x) self.assertEqual(orig_out, submodules_out) def test_graph_module_init_buffer_param_copied_dict_init(self): self._test_graph_module_init_buffer_param_copied(use_dict_init=True) def test_graph_module_init_buffer_param_copied_mod_init(self): self._test_graph_module_init_buffer_param_copied(use_dict_init=False) def test_annotations_with_no_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: torch.Tensor, a: A) -> torch.Tensor: return a(x) self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) def test_annotations_with_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor': return a(x) self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor: return a(x[0]) self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) def test_annotations_with_non_torch_reference_and_internal_forward_references(self): class A: def __call__(self, x: torch.Tensor): return torch.add(x, x) class M(torch.nn.Module): def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor': return a(x)[0] self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None) @unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature " "`annotations` is not defined in Python <3.7") def test_annotation_with_future(self): try: import fx.test_future # noqa: F401 finally: del sys.modules["__future__"] def test_annotations_empty_tuple(self): class Foo(torch.nn.Module): def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]): return "foo" traced = torch.fx.symbolic_trace(Foo()) x = () y = ("bar", ()) traced(x, y) FileCheck().check("_Tuple[()]") \ .check("typing_Tuple[str,typing_Tuple[()]]") \ .run(traced.code) scripted = torch.jit.script(traced) scripted(x, y) FileCheck().check("Tuple[()]") \ .check("Tuple[str, Tuple[()]]") \ .run(scripted.code) @skipIfNoTorchVision def test_cpatcher(self): cnt = 0 def patched_impl(to_patch, args, kwargs): nonlocal cnt cnt += 1 return to_patch(*args, **kwargs) c_patch_enabled = True def patched_in(to_patch, args, kwargs): nonlocal c_patch_enabled try: c_patch_enabled = False r = patched_impl(to_patch, args, kwargs) finally: c_patch_enabled = True return r def trace_func(frame, action, arg): if action == 'c_call': if c_patch_enabled: torch._C._fx.patch_function(arg, patched_in) import torch rn = torchvision_models.resnet18() try: sys.setprofile(trace_func) rn(torch.rand(1, 3, 224, 224)) print("testing print patch") finally: sys.setprofile(None) assert(cnt != 0) def test_randn(self): def f(): return torch.randn(3, 3) fx_f = symbolic_trace(f, enable_cpatching=True) assert(any(i.target == torch.randn for i in fx_f.graph.nodes)) fx_f = symbolic_trace(f, enable_cpatching=False) assert(all(i.target != torch.randn for i in fx_f.graph.nodes)) fx_f = symbolic_trace(f, enable_cpatching=True) assert(any(i.target == torch.randn for i in fx_f.graph.nodes)) def test_pytree(self): def f_sum(x): return sum(x) def f_sum_dict(x): out = 0 for k, v in x.items(): out += v return out def f_dict_list_map(x): new_dict = {} for k, v in x.items(): new_dict[k] = [i + 1 for i in v] return new_dict def f_dict_add(x): return x['a'] + sum(x['z']) def f_namedtuple_add(x): return x.x + x.y pytree._register_pytree_node( Foo, lambda x: ([x.a, x.b], None), lambda x, _: Foo(x[0], x[1]), ) fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b]) def f_custom(x): return x.a + x.b def f_custom_dict(x): return f_sum_dict(x.a) + x.b def f_return_custom(x): return Foo(x.b, x.a) tests = [ (f_sum, [PH, PH, PH]), (f_sum, []), (f_sum_dict, {'a': PH, 'b': PH, 'c': PH}), (f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}), (f_dict_list_map, {5: (PH, PH, PH)}), (f_dict_add, {'a': PH, 'z': (PH, PH, PH)}), (f_dict_add, {'a': PH, 'z': []}), (f_custom, Foo(PH, PH)), (f_custom, Foo(PH, 3)), (f_custom_dict, Foo({'a': PH, 'b': PH}, PH)), # (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees (f_namedtuple_add, Point(PH, PH)), ] def verify_pytree(f, inp): val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp) num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]]) orig_out = f(val) nf = symbolic_trace(f, concrete_args={'x': inp}) self.assertEqual(nf(val), orig_out) assert num_flat_args == 0 or "tree_flatten_spec" in nf.code assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args) nf = symbolic_trace(nf) self.assertEqual(nf(val), orig_out) assert "tree_flatten_spec" not in nf.code assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1) nf = symbolic_trace(nf, concrete_args={'x': inp}) self.assertEqual(nf(val), orig_out) assert num_flat_args == 0 or "tree_flatten_spec" in nf.code assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args) pickled = pickle.dumps(nf) nf = pickle.loads(pickled) self.assertEqual(nf(val), orig_out) for f, inp in tests: verify_pytree(f, inp) def test_pytree_concrete(self): def f(b, a): if b: return a['a'] else: return a['z'] inp = {'a': {'a': PH, 'z': PH}, 'b': True} nf = symbolic_trace(f, concrete_args=inp) val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp) self.assertEqual(nf(**val), f(**val)) nf = symbolic_trace(nf) self.assertEqual(nf(**val), f(**val)) def run_getitem_target(): from torch.fx._symbolic_trace import _wrapped_methods_to_patch _wrapped_methods_to_patch.append((torch.Tensor, "__getitem__")) try: TestFX().getitem_inner() finally: _wrapped_methods_to_patch.pop() class TestOperatorSignatures(JitTestCase): @onlyCPU @ops(op_db, allowed_dtypes=(torch.float,)) def test_get_torch_func_signature_exhaustive(self, device, dtype, op): # Sorted and one entry on each line to minimize merge conflicts. known_no_schema = {'cdist', 'contiguous', 'dstack', 'einsum', 'expand', 'expand_as', 'fill_', 'hstack', 'linalg.multi_dot', 'lu', 'norm', 'polygamma', 'special.polygamma', 'repeat', 'reshape_as', 'resize_', 'resize_as_', 'special.zeta', 'stack', 'to_sparse', 'view', 'view_as', 'nn.functional.hardshrink', 'vstack', 'where', 'zero_', '__getitem__', '__radd__', '__rsub__', '__rmul__', '__rdiv__', '__rmod__', '__rpow__', '__rand__', '__ror__', '__rxor__', '__rmatmul__'} try: sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False) schemas = get_signature_for_torch_op(op.op) if not schemas: raise RuntimeError('No Schemas Returned') for sample_input in sample_inputs_itr: # Iterate through overloads until we hit a match. If we exit this # loop via `else`, we haven't found a match for schema in schemas: try: bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs) bound_args.apply_defaults() op(*bound_args.args, **bound_args.kwargs) break except TypeError as e: pass else: raise RuntimeError(f'Did not match any schemas for op {op.name}!') except Exception as e: assert op.name in known_no_schema or "nn.functional" in op.name class TestFunctionalTracing(JitTestCase): IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary", "has_torch_function_variadic", "handle_torch_function", "boolean_dispatch") TO_PATCH = {"has_torch_function": None, "has_torch_function_unary": None, "has_torch_function_variadic": None} BUILT_IN_FUNC = (AssertionError, "") PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable") PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated") LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default") ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$") CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow") INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined") UNTRACEABLE_FUNCTIONALS = { "adaptive_avg_pool1d": BUILT_IN_FUNC, "avg_pool1d": BUILT_IN_FUNC, "avg_pool2d": BUILT_IN_FUNC, "avg_pool3d": BUILT_IN_FUNC, "celu_": BUILT_IN_FUNC, "channel_shuffle": BUILT_IN_FUNC, "conv1d": BUILT_IN_FUNC, "conv2d": BUILT_IN_FUNC, "conv3d": BUILT_IN_FUNC, "conv_tbc": BUILT_IN_FUNC, "conv_transpose1d": BUILT_IN_FUNC, "conv_transpose2d": BUILT_IN_FUNC, "conv_transpose3d": BUILT_IN_FUNC, "cosine_similarity": BUILT_IN_FUNC, "elu_": BUILT_IN_FUNC, "hardtanh_": BUILT_IN_FUNC, "leaky_relu_": BUILT_IN_FUNC, "logsigmoid": BUILT_IN_FUNC, "one_hot": BUILT_IN_FUNC, "pdist": BUILT_IN_FUNC, "pixel_shuffle": BUILT_IN_FUNC, "pixel_unshuffle": BUILT_IN_FUNC, "relu_": BUILT_IN_FUNC, "rrelu_": BUILT_IN_FUNC, "selu_": BUILT_IN_FUNC, "softplus": BUILT_IN_FUNC, "softshrink": BUILT_IN_FUNC, "threshold_": BUILT_IN_FUNC, "adaptive_avg_pool2d": LEN_ERROR, "adaptive_avg_pool3d": LEN_ERROR, "adaptive_max_pool2d_with_indices": LEN_ERROR, "adaptive_max_pool3d_with_indices": LEN_ERROR, "instance_norm": CONTROL_FLOW, "pad": LEN_ERROR, "adaptive_max_pool1d": PROXY_ITERABLE, "adaptive_max_pool2d": PROXY_ITERABLE, "adaptive_max_pool3d": PROXY_ITERABLE, "fractional_max_pool2d": PROXY_ITERABLE, "fractional_max_pool3d": PROXY_ITERABLE, "max_pool1d": PROXY_ITERABLE, "max_pool2d": PROXY_ITERABLE, "max_pool3d": PROXY_ITERABLE, "group_norm": PROXY_ITERATED, "lp_pool2d": PROXY_ITERATED, "max_unpool1d": PROXY_ITERATED, "max_unpool2d": PROXY_ITERATED, "max_unpool3d": PROXY_ITERATED, "adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH, "fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH, "fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH, "hardshrink": ARG_TYPE_MISMATCH, "layer_norm": ARG_TYPE_MISMATCH, "lp_pool1d": ARG_TYPE_MISMATCH, "max_pool1d_with_indices": ARG_TYPE_MISMATCH, "max_pool2d_with_indices": ARG_TYPE_MISMATCH, "max_pool3d_with_indices": ARG_TYPE_MISMATCH, "pairwise_distance": ARG_TYPE_MISMATCH, "affine_grid": CONTROL_FLOW, "alpha_dropout": CONTROL_FLOW, "batch_norm": CONTROL_FLOW, "binary_cross_entropy": CONTROL_FLOW, "binary_cross_entropy_with_logits": CONTROL_FLOW, "celu": CONTROL_FLOW, "cosine_embedding_loss": CONTROL_FLOW, "cross_entropy": CONTROL_FLOW, "ctc_loss": CONTROL_FLOW, "dropout": CONTROL_FLOW, "dropout2d": CONTROL_FLOW, "dropout3d": CONTROL_FLOW, "elu": CONTROL_FLOW, "embedding": CONTROL_FLOW, "embedding_bag": CONTROL_FLOW, "feature_alpha_dropout": CONTROL_FLOW, "fold": CONTROL_FLOW, "gaussian_nll_loss": CONTROL_FLOW, "glu": CONTROL_FLOW, "grid_sample": CONTROL_FLOW, "gumbel_softmax": CONTROL_FLOW, "hardsigmoid": CONTROL_FLOW, "hardswish": CONTROL_FLOW, "hardtanh": CONTROL_FLOW, "hinge_embedding_loss": CONTROL_FLOW, "huber_loss": CONTROL_FLOW, "interpolate": CONTROL_FLOW, "kl_div": CONTROL_FLOW, "l1_loss": CONTROL_FLOW, "leaky_relu": CONTROL_FLOW, "local_response_norm": CONTROL_FLOW, "margin_ranking_loss": CONTROL_FLOW, "mse_loss": CONTROL_FLOW, "multi_head_attention_forward": CONTROL_FLOW, "multi_margin_loss": CONTROL_FLOW, "multilabel_margin_loss": CONTROL_FLOW, "multilabel_soft_margin_loss": CONTROL_FLOW, "nll_loss": CONTROL_FLOW, "poisson_nll_loss": CONTROL_FLOW, "relu": CONTROL_FLOW, "relu6": CONTROL_FLOW, "rrelu": CONTROL_FLOW, "selu": CONTROL_FLOW, "silu": CONTROL_FLOW, "mish": CONTROL_FLOW, "smooth_l1_loss": CONTROL_FLOW, "soft_margin_loss": CONTROL_FLOW, "threshold": CONTROL_FLOW, "triplet_margin_loss": CONTROL_FLOW, "triplet_margin_with_distance_loss": CONTROL_FLOW, "unfold": CONTROL_FLOW, "upsample": CONTROL_FLOW, "upsample_bilinear": INTERPOLATE_ARGS_CONFLICT, "upsample_nearest": INTERPOLATE_ARGS_CONFLICT, } # List of nn.functionals with Tensor inputs but not with type annotation FUNCTIONALS_WITHOUT_ANNOTATION = ( "adaptive_max_pool1d", "adaptive_max_pool2d", "adaptive_max_pool3d", "fractional_max_pool2d", "fractional_max_pool3d", "max_pool1d", "max_pool2d", "max_pool3d", "gaussian_nll_loss", "upsample", "upsample_bilinear", "upsample_nearest", ) # Inconsistent behavior between Python 3.8 and other Python versions: # - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED` # - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same # internal exception above # Use the following map to override the expected exception for Python 3.8 UNTRACEABLE_FUNCTIONALS_PY38 = { "adaptive_max_pool1d": PROXY_ITERATED, "adaptive_max_pool2d": PROXY_ITERATED, "adaptive_max_pool3d": PROXY_ITERATED, "fractional_max_pool2d": PROXY_ITERATED, "fractional_max_pool3d": PROXY_ITERATED, "max_pool1d": PROXY_ITERATED, "max_pool2d": PROXY_ITERATED, "max_pool3d": PROXY_ITERATED, "group_norm": LEN_ERROR } @classmethod def _get_functional(cls): functional_list = [] for f in dir(torch.nn.functional): if not f.islower(): continue # Ignore internal functions if f.startswith('_'): continue # Ignore supporting functions if f in cls.IGNORE_FUNCS: continue fn = getattr(torch.nn.functional, f) # Ignore non-callable object like modules if not isinstance(fn, Callable): continue if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION: try: sig = inspect.signature(fn) has_tensor_arg = False for arg, param in sig.parameters.items(): if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor): has_tensor_arg = True if not has_tensor_arg: continue # No signature or Object is not supported except ValueError: pass functional_list.append((f, fn)) return functional_list @classmethod def generate_test_func(cls, func_name, fn): def functional_test(self): if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \ sys.version_info >= (3, 8) and sys.version_info < (3, 10): exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name] with self.assertRaisesRegex(exc, err): symbolic_trace(fn) elif func_name in self.UNTRACEABLE_FUNCTIONALS: exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name] with self.assertRaisesRegex(exc, err): symbolic_trace(fn) else: symbolic_trace(fn) return functional_test @classmethod def generate_tests(cls): functional_list = cls._get_functional() for func_name, fn in functional_list: test_name = "test_nn_functional_" + func_name functional_test = cls.generate_test_func(func_name, fn) setattr(cls, test_name, functional_test) @classmethod def setUpClass(cls): def no(*args, **kwargs): return False for name in cls.TO_PATCH.keys(): cls.TO_PATCH[name] = getattr(torch.nn.functional, name) setattr(torch.nn.functional, name, no) @classmethod def tearDownClass(cls): for name in cls.TO_PATCH.keys(): setattr(torch.nn.functional, name, cls.TO_PATCH[name]) TestFunctionalTracing.generate_tests() instantiate_device_type_tests(TestOperatorSignatures, globals()) @skipIfNoTorchVision class TestVisionTracing(JitTestCase): PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated") INCONSISTENT_TYPE = ( RuntimeError, r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor" ) UNTRACEABLE_MODELS = { "fasterrcnn_resnet50_fpn": PROXY_ITERATED, "fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED, "fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED, "maskrcnn_resnet50_fpn": PROXY_ITERATED, "keypointrcnn_resnet50_fpn": PROXY_ITERATED, "retinanet_resnet50_fpn": PROXY_ITERATED, } UNSCRIPTABLE_MODELS = { "googlenet": INCONSISTENT_TYPE, "inception_v3": INCONSISTENT_TYPE, } output_transform = { "fcn_resnet50": lambda x: x["out"], "fcn_resnet101": lambda x: x["out"], "deeplabv3_resnet50": lambda x: x["out"], "deeplabv3_resnet101": lambda x: x["out"], "deeplabv3_mobilenet_v3_large": lambda x: x["out"], "lraspp_mobilenet_v3_large": lambda x: x["out"], "fasterrcnn_resnet50_fpn": lambda x: x[1], "fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1], "fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1], "maskrcnn_resnet50_fpn": lambda x: x[1], "keypointrcnn_resnet50_fpn": lambda x: x[1], "retinanet_resnet50_fpn": lambda x: x[1], } @classmethod def generate_test_fn(cls, name, model_fn, x, kwargs): def run_test(self): model = model_fn(**kwargs) model = model.eval() if name in self.UNTRACEABLE_MODELS: err, exc = self.UNTRACEABLE_MODELS[name] with self.assertRaisesRegex(err, exc): graph = symbolic_trace(model) else: out_transform = self.output_transform.get(name, lambda x: x) graph : torch.fx.GraphModule = symbolic_trace(model) a = out_transform(model(x)) b = out_transform(graph(x)) self.assertEqual(a, b) if name in self.UNSCRIPTABLE_MODELS: err, exc = self.UNSCRIPTABLE_MODELS[name] with self.assertRaisesRegex(err, exc): script = torch.jit.script(graph) else: script = torch.jit.script(graph) c = out_transform(script(x)) self.assertEqual(a, c) return run_test @classmethod def generate_classification_tests(cls): for k, v in torchvision_models.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_' + k x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224) kwargs = dict(num_classes=50) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_segmentation_tests(cls): for k, v in torchvision_models.segmentation.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_segmentation_' + k x = torch.rand(1, 3, 32, 32) kwargs = dict(num_classes=10, pretrained_backbone=False) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_detection_tests(cls): for k, v in torchvision_models.detection.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_detection_' + k x = [torch.rand(3, 300, 300)] kwargs = dict(num_classes=10, pretrained_backbone=False) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_video_tests(cls): for k, v in torchvision_models.video.__dict__.items(): if callable(v) and k[0].lower() == k[0] and k[0] != "_": test_name = 'test_torchvision_models_video_' + k x = torch.rand(1, 3, 4, 112, 112) kwargs = dict(num_classes=50) model_test = cls.generate_test_fn(k, v, x, kwargs) setattr(cls, test_name, model_test) @classmethod def generate_tests(cls): cls.generate_classification_tests() cls.generate_detection_tests() cls.generate_segmentation_tests() cls.generate_video_tests() if HAS_TORCHVISION: TestVisionTracing.generate_tests() if __name__ == '__main__': run_tests()
recipe-576910.py
#! /usr/bin/python import threading import Queue import time import sys Instance = None def getInstance(): global Instance if not Instance: Instance = ThreadPool() return Instance class ThreadPool: def __init__(self,maxWorkers = 10): self.tasks = Queue.Queue() self.workers = 0 self.working = 0 self.maxWorkers = maxWorkers self.allKilled = threading.Event() self.countLock = threading.RLock() self.timers = {} self.timersLock = threading.Lock() self.timersThreadLock = threading.Lock() self.timersEvent = threading.Event() self.allKilled.set() def run(self,target,callback = None, *args, **kargs): """ starts task. target = callable to run with *args and **kargs arguments. callback = callable executed when target ends callback sould accept one parameter where target's return value is passed. If callback is None it's ignored. """ self.countLock.acquire() if not self.workers: self.addWorker() self.countLock.release() self.tasks.put((target,callback,args,kargs)) def setMaxWorkers(self,num): """ Sets the maximum workers to create. num = max workers If number passed is lower than active workers it will kill workers to match that number. """ self.countLock.acquire() self.maxWorkers = num if self.workers > self.maxWorkers: self.killWorker(self.workers - self.maxWorkers) self.countLock.release() def addWorker(self,num = 1): """ Add workers. num = number of workers to create/add. """ for x in xrange(num): self.countLock.acquire() self.workers += 1 self.allKilled.clear() self.countLock.release() t = threading.Thread(target = self.__workerThread) t.setDaemon(True) t.start() def killWorker(self,num = 1): """ Kill workers. num = number of workers to kill. """ self.countLock.acquire() if num > self.workers: num = self.workers self.countLock.release() for x in xrange(num): self.tasks.put("exit") def killAllWorkers(self,wait = None): """ Kill all active workers. wait = seconds to wait until last worker ends if None it waits forever. """ self.countLock.acquire() self.killWorker(self.workers) self.countLock.release() self.allKilled.wait(wait) def __workerThread(self): while True: task = self.tasks.get() # exit is "special" tasks to kill thread if task == "exit": break self.countLock.acquire() self.working += 1 if (self.working >= self.workers) and (self.workers < self.maxWorkers): # create thread on demand self.addWorker() self.countLock.release() fun,cb,args,kargs = task try: ret = fun(*args,**kargs) if cb: cb(ret) except: print "Unexpected error:", sys.exc_info() self.countLock.acquire() self.working -= 1 self.countLock.release() self.countLock.acquire() self.workers -= 1 if not self.workers: self.allKilled.set() self.countLock.release() def timer(self, cb, period): """ Add or remove timers. cb = callback function. period = period in seconds (float) if period is 0 timer is deleted. """ self.run(self.__timerThread, None, cb, period) def __timerThread(self, cb, period): self.timersLock.acquire() self.timersEvent.set() if not period: if cb in self.timers: del(self.timers[cb]) self.timersLock.release() return self.timers[cb] = [period,time.time()] self.timersLock.release() if not self.timersThreadLock.acquire(0): return while True: self.timersLock.acquire() if len(self.timers) == 0: self.timersThreadLock.release() self.timersLock.release() break minWait = 30*24*3600 now = time.time() for k,v in self.timers.items(): period, last = v wait = period - (now - last) if wait <=0: self.run(k) wait = period v[1] = now if wait < minWait: minWait = wait self.timersLock.release() self.timersEvent.wait(minWait) self.timersEvent.clear()
great-four.py
from utils import load_configs from twarc import Twarc import json import os import datetime from multiprocessing import Process from DB_Communicator import send_to_db def stream_city(cf, city, keywords=None): bbox = { "great_syd": [149.971885992, -34.33117400499998, 151.63054702400007, -32.99606922499993], "great_mel": [144.33363404800002, -38.50298801599996, 145.8784120140001, -37.17509899299995], "great_brisbane": [152.07339276400012, -28.363962911999977, 153.54670756200005, -26.452339004999942], "great_ald": [138.435645001, -35.350296029999974, 139.04403010400003, -34.50022530299998] } if keywords == None: keywords = cf["search_words"] t = Twarc(**cf['account']) # no keyword restriction but from a specific city # reason see this https://stackoverflow.com/questions/22889122/how-to-add-a-location-filter-to-tweepy-module if not os.path.isdir(city+"/"): os.makedirs(city) path = city + "/" + str(datetime.date.today())+".jsonl" locations = ",".join([str(i) for i in bbox[city]]) for tweet in t.filter(locations=locations): print("get one tweet") #TODO send_to_db(tweet) if __name__ == "__main__": cfs = load_configs() jobs = [] for i in range(len(cfs)): boxes = ["great_syd", "great_mel", "great_brisbane", "great_ald"] p = Process(target=stream_city, args=((cfs[i], boxes[i],)), daemon=True) jobs.append(p) p.start() [p.join() for p in jobs]
surface_stats_collector.py
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import Queue import datetime import logging import re import threading # Log marker containing SurfaceTexture timestamps. _SURFACE_TEXTURE_TIMESTAMPS_MESSAGE = 'SurfaceTexture update timestamps' _SURFACE_TEXTURE_TIMESTAMP_RE = '\d+' class SurfaceStatsCollector(object): """Collects surface stats for a SurfaceView from the output of SurfaceFlinger. Args: adb: the adb connection to use. """ class Result(object): def __init__(self, name, value, unit): self.name = name self.value = value self.unit = unit def __init__(self, adb): self._adb = adb self._collector_thread = None self._use_legacy_method = False self._surface_before = None self._get_data_event = None self._data_queue = None self._stop_event = None self._results = [] self._warn_about_empty_data = True def DisableWarningAboutEmptyData(self): self._warn_about_empty_data = False def Start(self): assert not self._collector_thread if self._ClearSurfaceFlingerLatencyData(): self._get_data_event = threading.Event() self._stop_event = threading.Event() self._data_queue = Queue.Queue() self._collector_thread = threading.Thread(target=self._CollectorThread) self._collector_thread.start() else: self._use_legacy_method = True self._surface_before = self._GetSurfaceStatsLegacy() def Stop(self): self._StorePerfResults() if self._collector_thread: self._stop_event.set() self._collector_thread.join() self._collector_thread = None def SampleResults(self): self._StorePerfResults() results = self.GetResults() self._results = [] return results def GetResults(self): return self._results or self._GetEmptyResults() def _GetEmptyResults(self): return [ SurfaceStatsCollector.Result('refresh_period', None, 'seconds'), SurfaceStatsCollector.Result('jank_count', None, 'janks'), SurfaceStatsCollector.Result('max_frame_delay', None, 'vsyncs'), SurfaceStatsCollector.Result('frame_lengths', None, 'vsyncs'), SurfaceStatsCollector.Result('avg_surface_fps', None, 'fps') ] @staticmethod def _GetNormalizedDeltas(data, refresh_period): deltas = [t2 - t1 for t1, t2 in zip(data, data[1:])] return (deltas, [delta / refresh_period for delta in deltas]) @staticmethod def _CalculateResults(refresh_period, timestamps, result_suffix): """Returns a list of SurfaceStatsCollector.Result.""" frame_count = len(timestamps) seconds = timestamps[-1] - timestamps[0] frame_lengths, normalized_frame_lengths = \ SurfaceStatsCollector._GetNormalizedDeltas(timestamps, refresh_period) length_changes, normalized_changes = \ SurfaceStatsCollector._GetNormalizedDeltas( frame_lengths, refresh_period) jankiness = [max(0, round(change)) for change in normalized_changes] pause_threshold = 20 jank_count = sum(1 for change in jankiness if change > 0 and change < pause_threshold) return [ SurfaceStatsCollector.Result( 'avg_surface_fps' + result_suffix, int(round(frame_count / seconds)), 'fps'), SurfaceStatsCollector.Result( 'jank_count' + result_suffix, jank_count, 'janks'), SurfaceStatsCollector.Result( 'max_frame_delay' + result_suffix, round(max(normalized_frame_lengths)), 'vsyncs'), SurfaceStatsCollector.Result( 'frame_lengths' + result_suffix, normalized_frame_lengths, 'vsyncs'), ] @staticmethod def _CalculateBuckets(refresh_period, timestamps): results = [] for pct in [0.99, 0.5]: sliced = timestamps[min(int(-pct * len(timestamps)), -3) : ] results += SurfaceStatsCollector._CalculateResults( refresh_period, sliced, '_' + str(int(pct * 100))) return results def _StorePerfResults(self): if self._use_legacy_method: surface_after = self._GetSurfaceStatsLegacy() td = surface_after['timestamp'] - self._surface_before['timestamp'] seconds = td.seconds + td.microseconds / 1e6 frame_count = (surface_after['page_flip_count'] - self._surface_before['page_flip_count']) self._results.append(SurfaceStatsCollector.Result( 'avg_surface_fps', int(round(frame_count / seconds)), 'fps')) return # Non-legacy method. assert self._collector_thread (refresh_period, timestamps) = self._GetDataFromThread() if not refresh_period or not len(timestamps) >= 3: if self._warn_about_empty_data: logging.warning('Surface stat data is empty') return self._results.append(SurfaceStatsCollector.Result( 'refresh_period', refresh_period, 'seconds')) self._results += self._CalculateResults(refresh_period, timestamps, '') self._results += self._CalculateBuckets(refresh_period, timestamps) def _CollectorThread(self): last_timestamp = 0 timestamps = [] retries = 0 while not self._stop_event.is_set(): self._get_data_event.wait(1) try: refresh_period, new_timestamps = self._GetSurfaceFlingerFrameData() if refresh_period is None or timestamps is None: retries += 1 if retries < 3: continue if last_timestamp: # Some data has already been collected, but either the app # was closed or there's no new data. Signal the main thread and # wait. self._data_queue.put((None, None)) self._stop_event.wait() break raise Exception('Unable to get surface flinger latency data') timestamps += [timestamp for timestamp in new_timestamps if timestamp > last_timestamp] if len(timestamps): last_timestamp = timestamps[-1] if self._get_data_event.is_set(): self._get_data_event.clear() self._data_queue.put((refresh_period, timestamps)) timestamps = [] except Exception as e: # On any error, before aborting, put the exception into _data_queue to # prevent the main thread from waiting at _data_queue.get() infinitely. self._data_queue.put(e) raise def _GetDataFromThread(self): self._get_data_event.set() ret = self._data_queue.get() if isinstance(ret, Exception): raise ret return ret def _ClearSurfaceFlingerLatencyData(self): """Clears the SurfaceFlinger latency data. Returns: True if SurfaceFlinger latency is supported by the device, otherwise False. """ # The command returns nothing if it is supported, otherwise returns many # lines of result just like 'dumpsys SurfaceFlinger'. results = self._adb.RunShellCommand( 'dumpsys SurfaceFlinger --latency-clear SurfaceView') return not len(results) def _GetSurfaceFlingerFrameData(self): """Returns collected SurfaceFlinger frame timing data. Returns: A tuple containing: - The display's nominal refresh period in seconds. - A list of timestamps signifying frame presentation times in seconds. The return value may be (None, None) if there was no data collected (for example, if the app was closed before the collector thread has finished). """ # adb shell dumpsys SurfaceFlinger --latency <window name> # prints some information about the last 128 frames displayed in # that window. # The data returned looks like this: # 16954612 # 7657467895508 7657482691352 7657493499756 # 7657484466553 7657499645964 7657511077881 # 7657500793457 7657516600576 7657527404785 # (...) # # The first line is the refresh period (here 16.95 ms), it is followed # by 128 lines w/ 3 timestamps in nanosecond each: # A) when the app started to draw # B) the vsync immediately preceding SF submitting the frame to the h/w # C) timestamp immediately after SF submitted that frame to the h/w # # The difference between the 1st and 3rd timestamp is the frame-latency. # An interesting data is when the frame latency crosses a refresh period # boundary, this can be calculated this way: # # ceil((C - A) / refresh-period) # # (each time the number above changes, we have a "jank"). # If this happens a lot during an animation, the animation appears # janky, even if it runs at 60 fps in average. # # We use the special "SurfaceView" window name because the statistics for # the activity's main window are not updated when the main web content is # composited into a SurfaceView. results = self._adb.RunShellCommand( 'dumpsys SurfaceFlinger --latency SurfaceView', log_result=logging.getLogger().isEnabledFor(logging.DEBUG)) if not len(results): return (None, None) timestamps = [] nanoseconds_per_second = 1e9 refresh_period = long(results[0]) / nanoseconds_per_second # If a fence associated with a frame is still pending when we query the # latency data, SurfaceFlinger gives the frame a timestamp of INT64_MAX. # Since we only care about completed frames, we will ignore any timestamps # with this value. pending_fence_timestamp = (1 << 63) - 1 for line in results[1:]: fields = line.split() if len(fields) != 3: continue timestamp = long(fields[1]) if timestamp == pending_fence_timestamp: continue timestamp /= nanoseconds_per_second timestamps.append(timestamp) return (refresh_period, timestamps) def _GetSurfaceStatsLegacy(self): """Legacy method (before JellyBean), returns the current Surface index and timestamp. Calculate FPS by measuring the difference of Surface index returned by SurfaceFlinger in a period of time. Returns: Dict of {page_flip_count (or 0 if there was an error), timestamp}. """ results = self._adb.RunShellCommand('service call SurfaceFlinger 1013') assert len(results) == 1 match = re.search('^Result: Parcel\((\w+)', results[0]) cur_surface = 0 if match: try: cur_surface = int(match.group(1), 16) except Exception: logging.error('Failed to parse current surface from ' + match.group(1)) else: logging.warning('Failed to call SurfaceFlinger surface ' + results[0]) return { 'page_flip_count': cur_surface, 'timestamp': datetime.datetime.now(), }
panic_bcast.py
#!/usr/bin/python2.7 # Copyright (c) 2012, Niklas Femerstrand <nik@qnrq.se> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import re import os import md5 import socket import threading import BaseHTTPServer from optparse import OptionParser from SimpleHTTPServer import SimpleHTTPRequestHandler from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer argparse = OptionParser() argparse.add_option("-k", "--key", dest="key", help = "Optional, when set it adds a key to the panic signal.", metavar="<your key>") argparse.add_option("-p", "--port", dest="port", help = "HTTP port to use", metavar="<your key>") args = argparse.parse_args() global key key = args[0].key or "" global port port = int(args[0].port or 8080) global signal signal = "/\\x" signal += "\\x".join(x.encode("hex") for x in md5.new("panic" + key).digest()) # Basic HTTP server that listens to GET /panic and triggers panic. # Written to provide a standardized interface for panic triggering. # To trigger panic through HTTP simply request http://localhost:port/panic class panicHandler(BaseHTTPRequestHandler): def do_GET(self): req = "/\\x" + "\\x".join(x.encode("hex") for x in md5.new(re.sub("^\/", "", self.path)).digest()) if req == signal: sendSignal() def httpd(): s = HTTPServer(('', port), panicHandler) s.serve_forever() # TODO: Extend with a C lib that iterates through used physmem addresses and # overwrites values in a prio order before triggering poweroff. # TODO: Use mountedDrives() to iterate and eject (crypto) mounts def treatPanic(): os.popen("truecrypt -d") # Linux, possibly more os.popen("shutdown -P now") # FreeBSD, possibly more os.popen("shutdown -p now") def sigListener(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) s.bind(("", 1337)) # Listen on all devices while True: try: message, address = s.recvfrom(65) if message == signal: treatPanic() except: pass def bcast(): bcast = os.popen("ifconfig | grep -o \"broadcast [0-9]*\.[0-9]*\.[0-9]*\.[0-9]*\"").read() bcast = bcast.replace("broadcast ", "") return bcast def sendSignal(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) s.sendto(signal, (bcast(), 1337)) s.close() return 0 httpd = threading.Thread(name="httpd", target=httpd) httpd.start() sigListener = threading.Thread(name="sigListener", target=sigListener) sigListener.start()
test_app.py
import json import webssh.handler as handler import random import threading import tornado.websocket import tornado.gen from tornado.testing import AsyncHTTPTestCase from tornado.options import options from webssh.main import make_app, make_handlers from webssh.settings import get_app_settings from tests.sshserver import run_ssh_server handler.DELAY = 0.1 class TestApp(AsyncHTTPTestCase): _is_running = False sshserver_port = 2200 body = u'hostname=127.0.0.1&port={}&username=robey&password=foo'.format(sshserver_port) # noqa def get_app(self): loop = self.io_loop options.debug = True options.policy = random.choice(['warning', 'autoadd']) options.hostFile = '' options.sysHostFile = '' app = make_app(make_handlers(loop, options), get_app_settings(options)) return app @classmethod def setUpClass(cls): t = threading.Thread( target=run_ssh_server, args=(cls.sshserver_port, cls) ) t.setDaemon(True) t.start() @classmethod def tearDownClass(cls): cls._is_running = True @classmethod def __bool__(cls): return cls._is_running def test_app_with_invalid_form(self): response = self.fetch('/') self.assertEqual(response.code, 200) body = u'hostname=&port=&username=&password' response = self.fetch('/', method="POST", body=body) self.assertIn(b'"status": "Empty hostname"', response.body) body = u'hostname=127.0.0.1&port=&username=&password' response = self.fetch('/', method="POST", body=body) self.assertIn(b'"status": "Empty port"', response.body) body = u'hostname=127.0.0.1&port=port&username=&password' response = self.fetch('/', method="POST", body=body) self.assertIn(b'"status": "Invalid port', response.body) body = u'hostname=127.0.0.1&port=70000&username=&password' response = self.fetch('/', method="POST", body=body) self.assertIn(b'"status": "Invalid port', response.body) body = u'hostname=127.0.0.1&port=7000&username=&password' response = self.fetch('/', method="POST", body=body) self.assertIn(b'"status": "Empty username"', response.body) def test_app_with_wrong_credentials(self): response = self.fetch('/') self.assertEqual(response.code, 200) response = self.fetch('/', method="POST", body=self.body + u's') self.assertIn(b'Authentication failed.', response.body) def test_app_with_correct_credentials(self): response = self.fetch('/') self.assertEqual(response.code, 200) response = self.fetch('/', method="POST", body=self.body) worker_id = json.loads(response.body.decode('utf-8'))['id'] self.assertIsNotNone(worker_id) @tornado.testing.gen_test def test_app_with_correct_credentials_timeout(self): url = self.get_url('/') client = self.get_http_client() response = yield client.fetch(url) self.assertEqual(response.code, 200) response = yield client.fetch(url, method="POST", body=self.body) worker_id = json.loads(response.body.decode('utf-8'))['id'] self.assertIsNotNone(worker_id) url = url.replace('http', 'ws') ws_url = url + 'ws?id=' + worker_id yield tornado.gen.sleep(handler.DELAY + 0.1) ws = yield tornado.websocket.websocket_connect(ws_url) msg = yield ws.read_message() self.assertIsNone(msg) ws.close() @tornado.testing.gen_test def test_app_with_correct_credentials_user_robey(self): url = self.get_url('/') client = self.get_http_client() response = yield client.fetch(url) self.assertEqual(response.code, 200) response = yield client.fetch(url, method="POST", body=self.body) worker_id = json.loads(response.body.decode('utf-8'))['id'] self.assertIsNotNone(worker_id) url = url.replace('http', 'ws') ws_url = url + 'ws?id=' + worker_id ws = yield tornado.websocket.websocket_connect(ws_url) msg = yield ws.read_message() self.assertIn(b'Welcome!', msg) ws.close() @tornado.testing.gen_test def test_app_with_correct_credentials_user_bar(self): url = self.get_url('/') client = self.get_http_client() response = yield client.fetch(url) self.assertEqual(response.code, 200) body = self.body.replace('robey', 'bar') response = yield client.fetch(url, method="POST", body=body) worker_id = json.loads(response.body.decode('utf-8'))['id'] self.assertIsNotNone(worker_id) url = url.replace('http', 'ws') ws_url = url + 'ws?id=' + worker_id ws = yield tornado.websocket.websocket_connect(ws_url) msg = yield ws.read_message() self.assertIn(b'Welcome!', msg) # message will be ignored silently yield ws.write_message('hello') yield ws.write_message('"hello"') yield ws.write_message('[hello]') yield ws.write_message(json.dumps({'resize': []})) yield ws.write_message(json.dumps({'resize': {}})) yield ws.write_message(json.dumps({'resize': [100]})) yield ws.write_message(json.dumps({'resize': [100]*10})) yield ws.write_message(json.dumps({'resize': [-1, -1]})) yield ws.write_message(json.dumps({'data': [1]})) yield ws.write_message(json.dumps({'data': (1,)})) yield ws.write_message(json.dumps({'data': {'a': 2}})) yield ws.write_message(json.dumps({'data': 1})) yield ws.write_message(json.dumps({'data': 2.1})) yield ws.write_message(json.dumps({'key-non-existed': 'hello'})) yield ws.write_message(json.dumps({'resize': [79, 23], 'data': 'bye'})) msg = yield ws.read_message() self.assertEqual(b'bye', msg) ws.close()
test_webserver.py
import subprocess import time import sys import os import re import threading from six.moves import queue import pytest import requests from pytest_test_filters import skip_on_windows pytestmark = skip_on_windows( reason="Subprocess based approach is problematic on windows." ) from testplan.common.utils.process import kill_process _TIMEOUT = 120 _REQUEST_TIMEOUT = 0.5 _URL_RE = re.compile(r"^\s*Local: (?P<url>[^\s]+)\s*$") @pytest.yield_fixture( scope="module", params=[ ["dummy_programmatic_test_plan.py"], ["dummy_cli_arg_test_plan.py", "--ui"], ], ids=["webserver_exporter_programmatic", "webserver_exporter_cli_arg"], ) def dummy_testplan(request): """ Start the dummy testplan in a separate process. Terminate the dummy testplan and wait for the process to end. """ cmd = [sys.executable] + request.param cwd = os.path.dirname(os.path.abspath(__file__)) testplan_proc = subprocess.Popen( cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=0, ) # Set up a thread to read from the process' stdout and write to a queue. # This prevents the main thread from blocking when there is no output. stdout_queue = queue.Queue() thr = threading.Thread( target=_enqueue_output, args=(testplan_proc.stdout, stdout_queue) ) thr.daemon = True thr.start() yield testplan_proc, stdout_queue if testplan_proc.poll() is None: kill_process(testplan_proc) assert testplan_proc.poll() is not None thr.join(timeout=_TIMEOUT) assert not thr.is_alive() def _enqueue_output(out, queue): """Enqueues lines from an output stream.""" for line in iter(out.readline, b""): queue.put(line.decode("utf-8")) out.close() def test_webserver_exporter(dummy_testplan): """ WebServer Exporter should start a web server and respond to GET requests. Repeatedly send requests to the web server until it answers or timeout is hit. """ # Unpack the process and stdout queue. proc, stdout_queue = dummy_testplan # By default Testplan will grab an ephemeral port to serve the UI, so we # must parse the stdout to find the URL. url = None timeout = time.time() + _TIMEOUT while ( (url is None) and (proc.poll() is None) and ((time.time() < timeout)) ): try: stdout_line = stdout_queue.get_nowait() except queue.Empty: time.sleep(0.1) continue print(stdout_line.rstrip("\n")) match = _URL_RE.match(stdout_line) if match: url = match.group("url") assert url is not None, "Failed to parse the webserver URL" # Now that we have the URL, try to make a GET request to it. This might # not immediately succeed so try a few times allowing for connection # errors. When the GET response is received, just verify the status code # is 200 OK. timeout = time.time() + _TIMEOUT status_code = None while time.time() < timeout: try: response = requests.get(url, timeout=_REQUEST_TIMEOUT) except ( requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout, ): time.sleep(_REQUEST_TIMEOUT) else: status_code = response.status_code break # Flush the stdout queue and print any remaining lines for debug. while not stdout_queue.empty(): print(stdout_queue.get_nowait().rstrip("\n")) assert status_code == 200
multiprocess1.py
""" 使用Process类创建多个进程 Version: 0.1 Author: 骆昊 Date: 2018-03-20 """ # 通过下面程序的执行结果可以证实 父进程在创建子进程时复制了进程及其数据结构 # 每个进程都有自己独立的内存空间 所以进程之间共享数据只能通过IPC的方式 from multiprocessing import Process, Queue from time import sleep def sub_task(string, q): number = q.get() while number: print('%d: %s' % (number, string)) sleep(0.001) number = q.get() def main(): q = Queue(10) for number in range(1, 11): q.put(number) Process(target=sub_task, args=('Ping', q)).start() Process(target=sub_task, args=('Pong', q)).start() if __name__ == '__main__': main()
fifo_queue_test.py
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.ops.data_flow_ops.FIFOQueue.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import random import re import time import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf class FIFOQueueTest(tf.test.TestCase): def testConstructor(self): with tf.Graph().as_default(): q = tf.FIFOQueue(10, tf.float32, name="Q") self.assertTrue(isinstance(q.queue_ref, tf.Tensor)) self.assertEquals(tf.string_ref, q.queue_ref.dtype) self.assertProtoEquals(""" name:'Q' op:'FIFOQueue' attr { key: 'component_types' value { list { type: DT_FLOAT } } } attr { key: 'shapes' value { list {} } } attr { key: 'capacity' value { i: 10 } } attr { key: 'container' value { s: '' } } attr { key: 'shared_name' value { s: '' } } """, q.queue_ref.op.node_def) def testMultiQueueConstructor(self): with tf.Graph().as_default(): q = tf.FIFOQueue(5, (tf.int32, tf.float32), shared_name="foo", name="Q") self.assertTrue(isinstance(q.queue_ref, tf.Tensor)) self.assertEquals(tf.string_ref, q.queue_ref.dtype) self.assertProtoEquals(""" name:'Q' op:'FIFOQueue' attr { key: 'component_types' value { list { type: DT_INT32 type : DT_FLOAT } } } attr { key: 'shapes' value { list {} } } attr { key: 'capacity' value { i: 5 } } attr { key: 'container' value { s: '' } } attr { key: 'shared_name' value { s: 'foo' } } """, q.queue_ref.op.node_def) def testConstructorWithShapes(self): with tf.Graph().as_default(): q = tf.FIFOQueue(5, (tf.int32, tf.float32), shapes=(tf.TensorShape([1, 1, 2, 3]), tf.TensorShape([5, 8])), name="Q") self.assertTrue(isinstance(q.queue_ref, tf.Tensor)) self.assertEquals(tf.string_ref, q.queue_ref.dtype) self.assertProtoEquals(""" name:'Q' op:'FIFOQueue' attr { key: 'component_types' value { list { type: DT_INT32 type : DT_FLOAT } } } attr { key: 'shapes' value { list { shape { dim { size: 1 } dim { size: 1 } dim { size: 2 } dim { size: 3 } } shape { dim { size: 5 } dim { size: 8 } } } } } attr { key: 'capacity' value { i: 5 } } attr { key: 'container' value { s: '' } } attr { key: 'shared_name' value { s: '' } } """, q.queue_ref.op.node_def) def testEnqueue(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32) enqueue_op = q.enqueue((10.0,)) enqueue_op.run() def testEnqueueHalf(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float16) enqueue_op = q.enqueue((10.0,)) enqueue_op.run() def testEnqueueWithShape(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32, shapes=(3, 2)) enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],)) enqueue_correct_op.run() with self.assertRaises(ValueError): q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],)) self.assertEqual(1, q.size().eval()) def testEnqueueManyWithShape(self): with self.test_session(): q = tf.FIFOQueue(10, [tf.int32, tf.int32], shapes=[(), (2,)]) q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run() self.assertEqual(4, q.size().eval()) def testEnqueueDictWithoutNames(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32) with self.assertRaisesRegexp(ValueError, "must have names"): q.enqueue({"a": 12.0}) with self.assertRaisesRegexp(ValueError, "must have names"): q.enqueue_many({"a": [12.0, 13.0]}) def testParallelEnqueue(self): with self.test_session() as sess: q = tf.FIFOQueue(10, tf.float32) elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] enqueue_ops = [q.enqueue((x,)) for x in elems] dequeued_t = q.dequeue() # Run one producer thread for each element in elems. def enqueue(enqueue_op): sess.run(enqueue_op) threads = [self.checkedThread(target=enqueue, args=(e,)) for e in enqueue_ops] for thread in threads: thread.start() for thread in threads: thread.join() # Dequeue every element using a single thread. results = [] for _ in xrange(len(elems)): results.append(dequeued_t.eval()) self.assertItemsEqual(elems, results) def testParallelDequeue(self): with self.test_session() as sess: q = tf.FIFOQueue(10, tf.float32) elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] enqueue_ops = [q.enqueue((x,)) for x in elems] dequeued_t = q.dequeue() # Enqueue every element using a single thread. for enqueue_op in enqueue_ops: enqueue_op.run() # Run one consumer thread for each element in elems. results = [] def dequeue(): results.append(sess.run(dequeued_t)) threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops] for thread in threads: thread.start() for thread in threads: thread.join() self.assertItemsEqual(elems, results) def testDequeue(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32) elems = [10.0, 20.0, 30.0] enqueue_ops = [q.enqueue((x,)) for x in elems] dequeued_t = q.dequeue() for enqueue_op in enqueue_ops: enqueue_op.run() for i in xrange(len(elems)): vals = dequeued_t.eval() self.assertEqual([elems[i]], vals) def testDequeueHalf(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float16) elems = [10.0, 20.0, 30.0] enqueue_ops = [q.enqueue((x,)) for x in elems] dequeued_t = q.dequeue() for enqueue_op in enqueue_ops: enqueue_op.run() for i in xrange(len(elems)): vals = dequeued_t.eval() self.assertEqual([elems[i]], vals) def testEnqueueAndBlockingDequeue(self): with self.test_session() as sess: q = tf.FIFOQueue(3, tf.float32) elems = [10.0, 20.0, 30.0] enqueue_ops = [q.enqueue((x,)) for x in elems] dequeued_t = q.dequeue() def enqueue(): # The enqueue_ops should run after the dequeue op has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) for enqueue_op in enqueue_ops: sess.run(enqueue_op) results = [] def dequeue(): for _ in xrange(len(elems)): results.append(sess.run(dequeued_t)) enqueue_thread = self.checkedThread(target=enqueue) dequeue_thread = self.checkedThread(target=dequeue) enqueue_thread.start() dequeue_thread.start() enqueue_thread.join() dequeue_thread.join() for elem, result in zip(elems, results): self.assertEqual([elem], result) def testMultiEnqueueAndDequeue(self): with self.test_session() as sess: q = tf.FIFOQueue(10, (tf.int32, tf.float32)) elems = [(5, 10.0), (10, 20.0), (15, 30.0)] enqueue_ops = [q.enqueue((x, y)) for x, y in elems] dequeued_t = q.dequeue() for enqueue_op in enqueue_ops: enqueue_op.run() for i in xrange(len(elems)): x_val, y_val = sess.run(dequeued_t) x, y = elems[i] self.assertEqual([x], x_val) self.assertEqual([y], y_val) def testQueueSizeEmpty(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32) self.assertEqual([0], q.size().eval()) def testQueueSizeAfterEnqueueAndDequeue(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32) enqueue_op = q.enqueue((10.0,)) dequeued_t = q.dequeue() size = q.size() self.assertEqual([], size.get_shape()) enqueue_op.run() self.assertEqual(1, size.eval()) dequeued_t.op.run() self.assertEqual(0, size.eval()) def testEnqueueMany(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32) elems = [10.0, 20.0, 30.0, 40.0] enqueue_op = q.enqueue_many((elems,)) dequeued_t = q.dequeue() enqueue_op.run() enqueue_op.run() for i in range(8): vals = dequeued_t.eval() self.assertEqual([elems[i % 4]], vals) def testEmptyEnqueueMany(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32) empty_t = tf.constant([], dtype=tf.float32, shape=[0, 2, 3]) enqueue_op = q.enqueue_many((empty_t,)) size_t = q.size() self.assertEqual([0], size_t.eval()) enqueue_op.run() self.assertEqual([0], size_t.eval()) def testEmptyDequeueMany(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32, shapes=()) enqueue_op = q.enqueue((10.0,)) dequeued_t = q.dequeue_many(0) self.assertEqual([], dequeued_t.eval().tolist()) enqueue_op.run() self.assertEqual([], dequeued_t.eval().tolist()) def testEmptyDequeueUpTo(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32, shapes=()) enqueue_op = q.enqueue((10.0,)) dequeued_t = q.dequeue_up_to(0) self.assertEqual([], dequeued_t.eval().tolist()) enqueue_op.run() self.assertEqual([], dequeued_t.eval().tolist()) def testEmptyDequeueManyWithNoShape(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32) # Expect the operation to fail due to the shape not being constrained. with self.assertRaisesOpError("specified shapes"): q.dequeue_many(0).eval() def testMultiEnqueueMany(self): with self.test_session() as sess: q = tf.FIFOQueue(10, (tf.float32, tf.int32)) float_elems = [10.0, 20.0, 30.0, 40.0] int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]] enqueue_op = q.enqueue_many((float_elems, int_elems)) dequeued_t = q.dequeue() enqueue_op.run() enqueue_op.run() for i in range(8): float_val, int_val = sess.run(dequeued_t) self.assertEqual(float_elems[i % 4], float_val) self.assertAllEqual(int_elems[i % 4], int_val) def testDequeueMany(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32, ()) elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] enqueue_op = q.enqueue_many((elems,)) dequeued_t = q.dequeue_many(4) enqueue_op.run() self.assertAllEqual(elems[0:4], dequeued_t.eval()) self.assertAllEqual(elems[4:8], dequeued_t.eval()) def testDequeueUpToNoBlocking(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32, ()) elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] enqueue_op = q.enqueue_many((elems,)) dequeued_t = q.dequeue_up_to(4) enqueue_op.run() self.assertAllEqual(elems[0:4], dequeued_t.eval()) self.assertAllEqual(elems[4:8], dequeued_t.eval()) def testMultiDequeueMany(self): with self.test_session() as sess: q = tf.FIFOQueue(10, (tf.float32, tf.int32), shapes=((), (2,))) float_elems = [ 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], [17, 18], [19, 20]] enqueue_op = q.enqueue_many((float_elems, int_elems)) dequeued_t = q.dequeue_many(4) dequeued_single_t = q.dequeue() enqueue_op.run() float_val, int_val = sess.run(dequeued_t) self.assertAllEqual(float_elems[0:4], float_val) self.assertAllEqual(int_elems[0:4], int_val) self.assertEqual(float_val.shape, dequeued_t[0].get_shape()) self.assertEqual(int_val.shape, dequeued_t[1].get_shape()) float_val, int_val = sess.run(dequeued_t) self.assertAllEqual(float_elems[4:8], float_val) self.assertAllEqual(int_elems[4:8], int_val) float_val, int_val = sess.run(dequeued_single_t) self.assertAllEqual(float_elems[8], float_val) self.assertAllEqual(int_elems[8], int_val) self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape()) self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape()) def testMultiDequeueUpToNoBlocking(self): with self.test_session() as sess: q = tf.FIFOQueue(10, (tf.float32, tf.int32), shapes=((), (2,))) float_elems = [ 10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0] int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], [17, 18], [19, 20]] enqueue_op = q.enqueue_many((float_elems, int_elems)) dequeued_t = q.dequeue_up_to(4) enqueue_op.run() float_val, int_val = sess.run(dequeued_t) self.assertAllEqual(float_elems[0:4], float_val) self.assertAllEqual(int_elems[0:4], int_val) self.assertEqual([None], dequeued_t[0].get_shape().as_list()) self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list()) float_val, int_val = sess.run(dequeued_t) self.assertAllEqual(float_elems[4:8], float_val) self.assertAllEqual(int_elems[4:8], int_val) def testHighDimension(self): with self.test_session(): q = tf.FIFOQueue(10, tf.int32, (4, 4, 4, 4)) elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32) enqueue_op = q.enqueue_many((elems,)) dequeued_t = q.dequeue_many(10) enqueue_op.run() self.assertAllEqual(dequeued_t.eval(), elems) def testEnqueueWrongShape(self): q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((), (2))) with self.assertRaises(ValueError): q.enqueue(([1, 2], [2, 2])) with self.assertRaises(ValueError): q.enqueue_many((7, [[1, 2], [3, 4], [5, 6]])) def testBatchSizeMismatch(self): q = tf.FIFOQueue(10, (tf.int32, tf.int32, tf.int32), ((), (), ())) with self.assertRaises(ValueError): q.enqueue_many(([1, 2, 3], [1, 2], [1, 2, 3])) with self.assertRaises(ValueError): q.enqueue_many(([1, 2, 3], [1, 2], tf.placeholder(tf.int32))) with self.assertRaises(ValueError): q.enqueue_many((tf.placeholder(tf.int32), [1, 2], [1, 2, 3])) def testEnqueueManyEmptyTypeConversion(self): q = tf.FIFOQueue(10, (tf.int32, tf.float32), ((), ())) enq = q.enqueue_many(([], [])) self.assertEqual(tf.int32, enq.inputs[1].dtype) self.assertEqual(tf.float32, enq.inputs[2].dtype) def testEnqueueWrongType(self): q = tf.FIFOQueue(10, (tf.int32, tf.float32), ((), ())) with self.assertRaises(ValueError): q.enqueue((tf.placeholder(tf.int32), tf.placeholder(tf.int32))) with self.assertRaises(ValueError): q.enqueue_many((tf.placeholder(tf.int32), tf.placeholder(tf.int32))) def testEnqueueWrongShapeAtRuntime(self): with self.test_session() as sess: q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (3, 3))) elems_ok = np.array([1] * 4).reshape((2, 2)).astype(np.int32) elems_bad = tf.placeholder(tf.int32) enqueue_op = q.enqueue((elems_ok, elems_bad)) with self.assertRaisesRegexp( tf.errors.InvalidArgumentError, r"Expected \[3,3\], got \[3,4\]"): sess.run([enqueue_op], feed_dict={elems_bad: np.array([1] * 12).reshape((3, 4))}) def testEnqueueDequeueManyWrongShape(self): with self.test_session() as sess: q = tf.FIFOQueue(10, (tf.int32, tf.int32), ((2, 2), (3, 3))) elems_ok = np.array([1] * 8).reshape((2, 2, 2)).astype(np.int32) elems_bad = tf.placeholder(tf.int32) enqueue_op = q.enqueue_many((elems_ok, elems_bad)) dequeued_t = q.dequeue_many(2) with self.assertRaisesRegexp( tf.errors.InvalidArgumentError, "Shape mismatch in tuple component 1. " r"Expected \[2,3,3\], got \[2,3,4\]"): sess.run([enqueue_op], feed_dict={elems_bad: np.array([1] * 24).reshape((2, 3, 4))}) dequeued_t.eval() def testParallelEnqueueMany(self): with self.test_session() as sess: q = tf.FIFOQueue(1000, tf.float32, shapes=()) elems = [10.0 * x for x in range(100)] enqueue_op = q.enqueue_many((elems,)) dequeued_t = q.dequeue_many(1000) # Enqueue 100 items in parallel on 10 threads. def enqueue(): sess.run(enqueue_op) threads = [self.checkedThread(target=enqueue) for _ in range(10)] for thread in threads: thread.start() for thread in threads: thread.join() self.assertItemsEqual(dequeued_t.eval(), elems * 10) def testParallelDequeueMany(self): with self.test_session() as sess: q = tf.FIFOQueue(1000, tf.float32, shapes=()) elems = [10.0 * x for x in range(1000)] enqueue_op = q.enqueue_many((elems,)) dequeued_t = q.dequeue_many(100) enqueue_op.run() # Dequeue 100 items in parallel on 10 threads. dequeued_elems = [] def dequeue(): dequeued_elems.extend(sess.run(dequeued_t)) threads = [self.checkedThread(target=dequeue) for _ in range(10)] for thread in threads: thread.start() for thread in threads: thread.join() self.assertItemsEqual(elems, dequeued_elems) def testParallelDequeueUpTo(self): with self.test_session() as sess: q = tf.FIFOQueue(1000, tf.float32, shapes=()) elems = [10.0 * x for x in range(1000)] enqueue_op = q.enqueue_many((elems,)) close_op = q.close() dequeued_t = q.dequeue_up_to(101) enqueue_op.run() close_op.run() # Dequeue up to 101 items in parallel on 10 threads, from closed queue. dequeued_elems = [] def dequeue(): dequeued_elems.extend(sess.run(dequeued_t)) threads = [self.checkedThread(target=dequeue) for _ in range(10)] for thread in threads: thread.start() for thread in threads: thread.join() self.assertItemsEqual(elems, dequeued_elems) def testParallelEnqueueAndDequeue(self): with self.test_session() as sess: q = tf.FIFOQueue(50, tf.float32, shapes=()) initial_elements = [10.0] * 49 q.enqueue_many((initial_elements,)).run() enqueue_op = q.enqueue((20.0,)) dequeued_t = q.dequeue() def enqueue(): for _ in xrange(100): sess.run(enqueue_op) def dequeue(): for _ in xrange(100): self.assertTrue(sess.run(dequeued_t) in (10.0, 20.0)) enqueue_threads = [self.checkedThread(target=enqueue) for _ in range(10)] dequeue_threads = [self.checkedThread(target=dequeue) for _ in range(10)] for enqueue_thread in enqueue_threads: enqueue_thread.start() for dequeue_thread in dequeue_threads: dequeue_thread.start() for enqueue_thread in enqueue_threads: enqueue_thread.join() for dequeue_thread in dequeue_threads: dequeue_thread.join() # Dequeue the initial count of elements to clean up. cleanup_elems = q.dequeue_many(49).eval() for elem in cleanup_elems: self.assertTrue(elem in (10.0, 20.0)) def testMixtureOfEnqueueAndEnqueueMany(self): with self.test_session() as sess: q = tf.FIFOQueue(10, tf.int32, shapes=()) enqueue_placeholder = tf.placeholder(tf.int32, shape=()) enqueue_op = q.enqueue((enqueue_placeholder,)) enqueuemany_placeholder = tf.placeholder( tf.int32, shape=(None,)) enqueuemany_op = q.enqueue_many((enqueuemany_placeholder,)) dequeued_t = q.dequeue() close_op = q.close() def dequeue(): for i in xrange(250): self.assertEqual(i, sess.run(dequeued_t)) dequeue_thread = self.checkedThread(target=dequeue) dequeue_thread.start() elements_enqueued = 0 while elements_enqueued < 250: # With equal probability, run Enqueue or enqueue_many. if random.random() > 0.5: enqueue_op.run({enqueue_placeholder: elements_enqueued}) elements_enqueued += 1 else: count = random.randint(0, min(20, 250 - elements_enqueued)) range_to_enqueue = np.arange(elements_enqueued, elements_enqueued + count, dtype=np.int32) enqueuemany_op.run({enqueuemany_placeholder: range_to_enqueue}) elements_enqueued += count close_op.run() dequeue_thread.join() self.assertEqual(0, q.size().eval()) def testMixtureOfDequeueAndDequeueMany(self): with self.test_session() as sess: q = tf.FIFOQueue(10, tf.int32, shapes=()) enqueue_op = q.enqueue_many((np.arange(250, dtype=np.int32),)) dequeued_t = q.dequeue() count_placeholder = tf.placeholder(tf.int32, shape=()) dequeuemany_t = q.dequeue_many(count_placeholder) def enqueue(): sess.run(enqueue_op) enqueue_thread = self.checkedThread(target=enqueue) enqueue_thread.start() elements_dequeued = 0 while elements_dequeued < 250: # With equal probability, run Dequeue or dequeue_many. if random.random() > 0.5: self.assertEqual(elements_dequeued, dequeued_t.eval()) elements_dequeued += 1 else: count = random.randint(0, min(20, 250 - elements_dequeued)) expected_range = np.arange(elements_dequeued, elements_dequeued + count, dtype=np.int32) self.assertAllEqual( expected_range, dequeuemany_t.eval({count_placeholder: count})) elements_dequeued += count q.close().run() enqueue_thread.join() self.assertEqual(0, q.size().eval()) def testBlockingDequeueMany(self): with self.test_session() as sess: q = tf.FIFOQueue(10, tf.float32, ()) elems = [10.0, 20.0, 30.0, 40.0] enqueue_op = q.enqueue_many((elems,)) dequeued_t = q.dequeue_many(4) dequeued_elems = [] def enqueue(): # The enqueue_op should run after the dequeue op has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) sess.run(enqueue_op) def dequeue(): dequeued_elems.extend(sess.run(dequeued_t).tolist()) enqueue_thread = self.checkedThread(target=enqueue) dequeue_thread = self.checkedThread(target=dequeue) enqueue_thread.start() dequeue_thread.start() enqueue_thread.join() dequeue_thread.join() self.assertAllEqual(elems, dequeued_elems) def testBlockingDequeueUpTo(self): with self.test_session() as sess: q = tf.FIFOQueue(10, tf.float32, ()) elems = [10.0, 20.0, 30.0, 40.0] enqueue_op = q.enqueue_many((elems,)) dequeued_t = q.dequeue_up_to(4) dequeued_elems = [] def enqueue(): # The enqueue_op should run after the dequeue op has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) sess.run(enqueue_op) def dequeue(): dequeued_elems.extend(sess.run(dequeued_t).tolist()) enqueue_thread = self.checkedThread(target=enqueue) dequeue_thread = self.checkedThread(target=dequeue) enqueue_thread.start() dequeue_thread.start() enqueue_thread.join() dequeue_thread.join() self.assertAllEqual(elems, dequeued_elems) def testDequeueManyWithTensorParameter(self): with self.test_session(): # Define a first queue that contains integer counts. dequeue_counts = [random.randint(1, 10) for _ in range(100)] count_q = tf.FIFOQueue(100, tf.int32, ()) enqueue_counts_op = count_q.enqueue_many((dequeue_counts,)) total_count = sum(dequeue_counts) # Define a second queue that contains total_count elements. elems = [random.randint(0, 100) for _ in range(total_count)] q = tf.FIFOQueue(total_count, tf.int32, ()) enqueue_elems_op = q.enqueue_many((elems,)) # Define a subgraph that first dequeues a count, then DequeuesMany # that number of elements. dequeued_t = q.dequeue_many(count_q.dequeue()) enqueue_counts_op.run() enqueue_elems_op.run() dequeued_elems = [] for _ in dequeue_counts: dequeued_elems.extend(dequeued_t.eval()) self.assertEqual(elems, dequeued_elems) def testDequeueFromClosedQueue(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32) elems = [10.0, 20.0, 30.0, 40.0] enqueue_op = q.enqueue_many((elems,)) close_op = q.close() dequeued_t = q.dequeue() enqueue_op.run() close_op.run() for elem in elems: self.assertEqual([elem], dequeued_t.eval()) # Expect the operation to fail due to the queue being closed. with self.assertRaisesRegexp(tf.errors.OutOfRangeError, "is closed and has insufficient"): dequeued_t.eval() def testBlockingDequeueFromClosedQueue(self): with self.test_session() as sess: q = tf.FIFOQueue(10, tf.float32) elems = [10.0, 20.0, 30.0, 40.0] enqueue_op = q.enqueue_many((elems,)) close_op = q.close() dequeued_t = q.dequeue() enqueue_op.run() def dequeue(): for elem in elems: self.assertEqual([elem], sess.run(dequeued_t)) # Expect the operation to fail due to the queue being closed. with self.assertRaisesRegexp(tf.errors.OutOfRangeError, "is closed and has insufficient"): sess.run(dequeued_t) dequeue_thread = self.checkedThread(target=dequeue) dequeue_thread.start() # The close_op should run after the dequeue_thread has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) close_op.run() dequeue_thread.join() def testBlockingDequeueFromClosedEmptyQueue(self): with self.test_session() as sess: q = tf.FIFOQueue(10, tf.float32) close_op = q.close() dequeued_t = q.dequeue() def dequeue(): # Expect the operation to fail due to the queue being closed. with self.assertRaisesRegexp(tf.errors.OutOfRangeError, "is closed and has insufficient"): sess.run(dequeued_t) dequeue_thread = self.checkedThread(target=dequeue) dequeue_thread.start() # The close_op should run after the dequeue_thread has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) close_op.run() dequeue_thread.join() def testBlockingDequeueManyFromClosedQueue(self): with self.test_session() as sess: q = tf.FIFOQueue(10, tf.float32, ()) elems = [10.0, 20.0, 30.0, 40.0] enqueue_op = q.enqueue_many((elems,)) close_op = q.close() dequeued_t = q.dequeue_many(4) enqueue_op.run() def dequeue(): self.assertAllEqual(elems, sess.run(dequeued_t)) # Expect the operation to fail due to the queue being closed. with self.assertRaisesRegexp(tf.errors.OutOfRangeError, "is closed and has insufficient"): sess.run(dequeued_t) dequeue_thread = self.checkedThread(target=dequeue) dequeue_thread.start() # The close_op should run after the dequeue_thread has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) close_op.run() dequeue_thread.join() def testBlockingDequeueManyButNotAllFromClosedQueue(self): with self.test_session() as sess: q = tf.FIFOQueue(10, tf.float32, ()) elems = [10.0, 20.0, 30.0, 40.0] enqueue_op = q.enqueue_many((elems,)) close_op = q.close() dequeued_t = q.dequeue_many(3) enqueue_op.run() def dequeue(): self.assertAllEqual(elems[:3], sess.run(dequeued_t)) # Expect the operation to fail due to the queue being closed. with self.assertRaisesRegexp(tf.errors.OutOfRangeError, "is closed and has insufficient"): sess.run(dequeued_t) dequeue_thread = self.checkedThread(target=dequeue) dequeue_thread.start() # The close_op should run after the dequeue_thread has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) close_op.run() dequeue_thread.join() def testDequeueUpToFromClosedQueueReturnsRemainder(self): with self.test_session() as sess: q = tf.FIFOQueue(10, tf.float32, ()) elems = [10.0, 20.0, 30.0, 40.0] enqueue_op = q.enqueue_many((elems,)) close_op = q.close() dequeued_t = q.dequeue_up_to(3) enqueue_op.run() def dequeue(): self.assertAllEqual(elems[:3], sess.run(dequeued_t)) self.assertAllEqual(elems[3:], sess.run(dequeued_t)) dequeue_thread = self.checkedThread(target=dequeue) dequeue_thread.start() # The close_op should run after the dequeue_thread has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) close_op.run() dequeue_thread.join() def testEnqueueManyLargerThanCapacityWithConcurrentDequeueMany(self): with self.test_session() as sess: q = tf.FIFOQueue(4, tf.float32, ()) elems = [10.0, 20.0, 30.0, 40.0] enqueue_op = q.enqueue_many((elems,)) close_op = q.close() dequeued_t = q.dequeue_many(3) cleanup_dequeue_t = q.dequeue() def enqueue(): sess.run(enqueue_op) def dequeue(): self.assertAllEqual(elems[0:3], sess.run(dequeued_t)) with self.assertRaises(tf.errors.OutOfRangeError): sess.run(dequeued_t) self.assertEqual(elems[3], sess.run(cleanup_dequeue_t)) def close(): sess.run(close_op) enqueue_thread = self.checkedThread(target=enqueue) enqueue_thread.start() dequeue_thread = self.checkedThread(target=dequeue) dequeue_thread.start() # The close_op should run after the dequeue_thread has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) close_thread = self.checkedThread(target=close) close_thread.start() enqueue_thread.join() dequeue_thread.join() close_thread.join() def testClosedBlockingDequeueManyRestoresPartialBatch(self): with self.test_session() as sess: q = tf.FIFOQueue(4, (tf.float32, tf.float32), ((), ())) elems_a = [1.0, 2.0, 3.0] elems_b = [10.0, 20.0, 30.0] enqueue_op = q.enqueue_many((elems_a, elems_b)) dequeued_a_t, dequeued_b_t = q.dequeue_many(4) cleanup_dequeue_a_t, cleanup_dequeue_b_t = q.dequeue() close_op = q.close() enqueue_op.run() def dequeue(): with self.assertRaises(tf.errors.OutOfRangeError): sess.run([dequeued_a_t, dequeued_b_t]) dequeue_thread = self.checkedThread(target=dequeue) dequeue_thread.start() # The close_op should run after the dequeue_thread has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) close_op.run() dequeue_thread.join() # Test that the elements in the partially-dequeued batch are # restored in the correct order. for elem_a, elem_b in zip(elems_a, elems_b): val_a, val_b = sess.run([cleanup_dequeue_a_t, cleanup_dequeue_b_t]) self.assertEqual(elem_a, val_a) self.assertEqual(elem_b, val_b) self.assertEqual(0, q.size().eval()) def testBlockingDequeueManyFromClosedEmptyQueue(self): with self.test_session() as sess: q = tf.FIFOQueue(10, tf.float32, ()) close_op = q.close() dequeued_t = q.dequeue_many(4) def dequeue(): # Expect the operation to fail due to the queue being closed. with self.assertRaisesRegexp(tf.errors.OutOfRangeError, "is closed and has insufficient"): sess.run(dequeued_t) dequeue_thread = self.checkedThread(target=dequeue) dequeue_thread.start() # The close_op should run after the dequeue_thread has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) close_op.run() dequeue_thread.join() def testBlockingDequeueUpToFromClosedEmptyQueue(self): with self.test_session() as sess: q = tf.FIFOQueue(10, tf.float32, ()) close_op = q.close() dequeued_t = q.dequeue_up_to(4) def dequeue(): # Expect the operation to fail due to the queue being closed. with self.assertRaisesRegexp(tf.errors.OutOfRangeError, "is closed and has insufficient"): sess.run(dequeued_t) dequeue_thread = self.checkedThread(target=dequeue) dequeue_thread.start() # The close_op should run after the dequeue_thread has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) close_op.run() dequeue_thread.join() def testEnqueueToClosedQueue(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32) enqueue_op = q.enqueue((10.0,)) close_op = q.close() enqueue_op.run() close_op.run() # Expect the operation to fail due to the queue being closed. with self.assertRaisesRegexp(tf.errors.CancelledError, "is closed"): enqueue_op.run() def testEnqueueManyToClosedQueue(self): with self.test_session(): q = tf.FIFOQueue(10, tf.float32) elems = [10.0, 20.0, 30.0, 40.0] enqueue_op = q.enqueue_many((elems,)) close_op = q.close() enqueue_op.run() close_op.run() # Expect the operation to fail due to the queue being closed. with self.assertRaisesRegexp(tf.errors.CancelledError, "is closed"): enqueue_op.run() def testBlockingEnqueueToFullQueue(self): with self.test_session() as sess: q = tf.FIFOQueue(4, tf.float32) elems = [10.0, 20.0, 30.0, 40.0] enqueue_op = q.enqueue_many((elems,)) blocking_enqueue_op = q.enqueue((50.0,)) dequeued_t = q.dequeue() enqueue_op.run() def blocking_enqueue(): sess.run(blocking_enqueue_op) thread = self.checkedThread(target=blocking_enqueue) thread.start() # The dequeue ops should run after the blocking_enqueue_op has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) for elem in elems: self.assertEqual([elem], dequeued_t.eval()) self.assertEqual([50.0], dequeued_t.eval()) thread.join() def testBlockingEnqueueManyToFullQueue(self): with self.test_session() as sess: q = tf.FIFOQueue(4, tf.float32) elems = [10.0, 20.0, 30.0, 40.0] enqueue_op = q.enqueue_many((elems,)) blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],)) dequeued_t = q.dequeue() enqueue_op.run() def blocking_enqueue(): sess.run(blocking_enqueue_op) thread = self.checkedThread(target=blocking_enqueue) thread.start() # The dequeue ops should run after the blocking_enqueue_op has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) for elem in elems: self.assertEqual([elem], dequeued_t.eval()) time.sleep(0.01) self.assertEqual([50.0], dequeued_t.eval()) self.assertEqual([60.0], dequeued_t.eval()) def testBlockingEnqueueBeforeClose(self): with self.test_session() as sess: q = tf.FIFOQueue(4, tf.float32) elems = [10.0, 20.0, 30.0, 40.0] enqueue_op = q.enqueue_many((elems,)) blocking_enqueue_op = q.enqueue((50.0,)) close_op = q.close() dequeued_t = q.dequeue() enqueue_op.run() def blocking_enqueue(): # Expect the operation to succeed once the dequeue op runs. sess.run(blocking_enqueue_op) enqueue_thread = self.checkedThread(target=blocking_enqueue) enqueue_thread.start() # The close_op should run after the blocking_enqueue_op has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) def close(): sess.run(close_op) close_thread = self.checkedThread(target=close) close_thread.start() # The dequeue will unblock both threads. self.assertEqual(10.0, dequeued_t.eval()) enqueue_thread.join() close_thread.join() for elem in [20.0, 30.0, 40.0, 50.0]: self.assertEqual(elem, dequeued_t.eval()) self.assertEqual(0, q.size().eval()) def testBlockingEnqueueManyBeforeClose(self): with self.test_session() as sess: q = tf.FIFOQueue(4, tf.float32) elems = [10.0, 20.0, 30.0] enqueue_op = q.enqueue_many((elems,)) blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],)) close_op = q.close() dequeued_t = q.dequeue() enqueue_op.run() def blocking_enqueue(): sess.run(blocking_enqueue_op) enqueue_thread = self.checkedThread(target=blocking_enqueue) enqueue_thread.start() # The close_op should run after the blocking_enqueue_op has blocked. # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) def close(): sess.run(close_op) close_thread = self.checkedThread(target=close) close_thread.start() # The dequeue will unblock both threads. self.assertEqual(10.0, dequeued_t.eval()) enqueue_thread.join() close_thread.join() for elem in [20.0, 30.0, 50.0, 60.0]: self.assertEqual(elem, dequeued_t.eval()) def testDoesNotLoseValue(self): with self.test_session(): q = tf.FIFOQueue(1, tf.float32) enqueue_op = q.enqueue((10.0,)) size_t = q.size() enqueue_op.run() for _ in range(500): self.assertEqual(size_t.eval(), [1]) def testSharedQueueSameSession(self): with self.test_session(): q1 = tf.FIFOQueue( 1, tf.float32, shared_name="shared_queue") q1.enqueue((10.0,)).run() q2 = tf.FIFOQueue( 1, tf.float32, shared_name="shared_queue") q1_size_t = q1.size() q2_size_t = q2.size() self.assertEqual(q1_size_t.eval(), [1]) self.assertEqual(q2_size_t.eval(), [1]) self.assertEqual(q2.dequeue().eval(), [10.0]) self.assertEqual(q1_size_t.eval(), [0]) self.assertEqual(q2_size_t.eval(), [0]) q2.enqueue((20.0,)).run() self.assertEqual(q1_size_t.eval(), [1]) self.assertEqual(q2_size_t.eval(), [1]) self.assertEqual(q1.dequeue().eval(), [20.0]) self.assertEqual(q1_size_t.eval(), [0]) self.assertEqual(q2_size_t.eval(), [0]) def testIncompatibleSharedQueueErrors(self): with self.test_session(): q_a_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_a") q_a_2 = tf.FIFOQueue(15, tf.float32, shared_name="q_a") q_a_1.queue_ref.eval() with self.assertRaisesOpError("capacity"): q_a_2.queue_ref.eval() q_b_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_b") q_b_2 = tf.FIFOQueue(10, tf.int32, shared_name="q_b") q_b_1.queue_ref.eval() with self.assertRaisesOpError("component types"): q_b_2.queue_ref.eval() q_c_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_c") q_c_2 = tf.FIFOQueue( 10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_c") q_c_1.queue_ref.eval() with self.assertRaisesOpError("component shapes"): q_c_2.queue_ref.eval() q_d_1 = tf.FIFOQueue( 10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d") q_d_2 = tf.FIFOQueue(10, tf.float32, shared_name="q_d") q_d_1.queue_ref.eval() with self.assertRaisesOpError("component shapes"): q_d_2.queue_ref.eval() q_e_1 = tf.FIFOQueue( 10, tf.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e") q_e_2 = tf.FIFOQueue( 10, tf.float32, shapes=[(1, 1, 2, 4)], shared_name="q_e") q_e_1.queue_ref.eval() with self.assertRaisesOpError("component shapes"): q_e_2.queue_ref.eval() q_f_1 = tf.FIFOQueue(10, tf.float32, shared_name="q_f") q_f_2 = tf.FIFOQueue( 10, (tf.float32, tf.int32), shared_name="q_f") q_f_1.queue_ref.eval() with self.assertRaisesOpError("component types"): q_f_2.queue_ref.eval() def testSelectQueue(self): with self.test_session(): num_queues = 10 qlist = list() for _ in xrange(num_queues): qlist.append(tf.FIFOQueue(10, tf.float32)) # Enqueue/Dequeue into a dynamically selected queue for _ in xrange(20): index = np.random.randint(num_queues) q = tf.FIFOQueue.from_list(index, qlist) q.enqueue((10.,)).run() self.assertEqual(q.dequeue().eval(), 10.0) def testSelectQueueOutOfRange(self): with self.test_session(): q1 = tf.FIFOQueue(10, tf.float32) q2 = tf.FIFOQueue(15, tf.float32) enq_q = tf.FIFOQueue.from_list(3, [q1, q2]) with self.assertRaisesOpError("Index must be in the range"): enq_q.dequeue().eval() def _blockingDequeue(self, sess, dequeue_op): with self.assertRaisesOpError("Dequeue operation was cancelled"): sess.run(dequeue_op) def _blockingDequeueMany(self, sess, dequeue_many_op): with self.assertRaisesOpError("Dequeue operation was cancelled"): sess.run(dequeue_many_op) def _blockingEnqueue(self, sess, enqueue_op): with self.assertRaisesOpError("Enqueue operation was cancelled"): sess.run(enqueue_op) def _blockingEnqueueMany(self, sess, enqueue_many_op): with self.assertRaisesOpError("Enqueue operation was cancelled"): sess.run(enqueue_many_op) def testResetOfBlockingOperation(self): with self.test_session() as sess: q_empty = tf.FIFOQueue(5, tf.float32, ()) dequeue_op = q_empty.dequeue() dequeue_many_op = q_empty.dequeue_many(1) q_full = tf.FIFOQueue(5, tf.float32) sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],))) enqueue_op = q_full.enqueue((6.0,)) enqueue_many_op = q_full.enqueue_many(([6.0],)) threads = [ self.checkedThread(self._blockingDequeue, args=(sess, dequeue_op)), self.checkedThread(self._blockingDequeueMany, args=(sess, dequeue_many_op)), self.checkedThread(self._blockingEnqueue, args=(sess, enqueue_op)), self.checkedThread(self._blockingEnqueueMany, args=(sess, enqueue_many_op))] for t in threads: t.start() time.sleep(0.1) sess.close() # Will cancel the blocked operations. for t in threads: t.join() def testBigEnqueueMany(self): with self.test_session() as sess: q = tf.FIFOQueue(5, tf.int32, ((),)) elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] enq = q.enqueue_many((elem,)) deq = q.dequeue() size_op = q.size() enq_done = [] def blocking_enqueue(): enq_done.append(False) # This will fill the queue and then block until enough dequeues happen. sess.run(enq) enq_done.append(True) thread = self.checkedThread(target=blocking_enqueue) thread.start() # The enqueue should start and then block. results = [] results.append(deq.eval()) # Will only complete after the enqueue starts. self.assertEqual(len(enq_done), 1) self.assertEqual(sess.run(size_op), 5) for _ in range(3): results.append(deq.eval()) time.sleep(0.1) self.assertEqual(len(enq_done), 1) self.assertEqual(sess.run(size_op), 5) # This dequeue will unblock the thread. results.append(deq.eval()) time.sleep(0.1) self.assertEqual(len(enq_done), 2) thread.join() for i in range(5): self.assertEqual(size_op.eval(), 5 - i) results.append(deq.eval()) self.assertEqual(size_op.eval(), 5 - i - 1) self.assertAllEqual(elem, results) def testBigDequeueMany(self): with self.test_session() as sess: q = tf.FIFOQueue(2, tf.int32, ((),)) elem = np.arange(4, dtype=np.int32) enq_list = [q.enqueue((e,)) for e in elem] deq = q.dequeue_many(4) results = [] def blocking_dequeue(): # Will only complete after 4 enqueues complete. results.extend(sess.run(deq)) thread = self.checkedThread(target=blocking_dequeue) thread.start() # The dequeue should start and then block. for enq in enq_list: # TODO(mrry): Figure out how to do this without sleeping. time.sleep(0.1) self.assertEqual(len(results), 0) sess.run(enq) # Enough enqueued to unblock the dequeue thread.join() self.assertAllEqual(elem, results) def testDtypes(self): with self.test_session() as sess: dtypes = [tf.float32, tf.float64, tf.int32, tf.uint8, tf.int16, tf.int8, tf.int64, tf.bool, tf.complex64, tf.complex128] shape = (32, 4, 128) q = tf.FIFOQueue(32, dtypes, [shape[1:]] * len(dtypes)) input_tuple = [] for dtype in dtypes: np_dtype = dtype.as_numpy_dtype np_array = np.random.randint(-10, 10, shape) if dtype == tf.bool: np_array = np_array > 0 elif dtype in (tf.complex64, tf.complex128): np_array = np.sqrt(np_array.astype(np_dtype)) else: np_array = np_array.astype(np_dtype) input_tuple.append(np_array) q.enqueue_many(input_tuple).run() output_tuple_t = q.dequeue_many(32) output_tuple = sess.run(output_tuple_t) for (input_elem, output_elem) in zip(input_tuple, output_tuple): self.assertAllEqual(input_elem, output_elem) def testDeviceColocation(self): with tf.device("/job:ps"): q = tf.FIFOQueue(32, [tf.int32], name="q") with tf.device("/job:worker/task:7"): dequeued_t = q.dequeue() self.assertDeviceEqual("/job:ps", dequeued_t.device) self.assertEqual([b"loc:@q"], dequeued_t.op.colocation_groups()) class FIFOQueueDictTest(tf.test.TestCase): def testConstructor(self): with tf.Graph().as_default(): q = tf.FIFOQueue(5, (tf.int32, tf.float32), names=("i", "j"), shared_name="foo", name="Q") self.assertTrue(isinstance(q.queue_ref, tf.Tensor)) self.assertEquals(tf.string_ref, q.queue_ref.dtype) self.assertProtoEquals(""" name:'Q' op:'FIFOQueue' attr { key: 'component_types' value { list { type: DT_INT32 type : DT_FLOAT } } } attr { key: 'shapes' value { list {} } } attr { key: 'capacity' value { i: 5 } } attr { key: 'container' value { s: '' } } attr { key: 'shared_name' value { s: 'foo' } } """, q.queue_ref.op.node_def) self.assertEqual(["i", "j"], q.names) def testConstructorWithShapes(self): with tf.Graph().as_default(): q = tf.FIFOQueue(5, (tf.int32, tf.float32), names=("i", "f"), shapes=(tf.TensorShape([1, 1, 2, 3]), tf.TensorShape([5, 8])), name="Q") self.assertTrue(isinstance(q.queue_ref, tf.Tensor)) self.assertEquals(tf.string_ref, q.queue_ref.dtype) self.assertProtoEquals(""" name:'Q' op:'FIFOQueue' attr { key: 'component_types' value { list { type: DT_INT32 type : DT_FLOAT } } } attr { key: 'shapes' value { list { shape { dim { size: 1 } dim { size: 1 } dim { size: 2 } dim { size: 3 } } shape { dim { size: 5 } dim { size: 8 } } } } } attr { key: 'capacity' value { i: 5 } } attr { key: 'container' value { s: '' } } attr { key: 'shared_name' value { s: '' } } """, q.queue_ref.op.node_def) self.assertEqual(["i", "f"], q.names) def testEnqueueDequeueOneComponent(self): with self.test_session() as sess: q = tf.FIFOQueue(10, tf.float32, shapes=((),), names="f") # Verify that enqueue() checks that when using names we must enqueue a # dictionary. with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"): enqueue_op = q.enqueue(10.0) with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"): enqueue_op = q.enqueue((10.0,)) # The dictionary keys must match the queue component names. with self.assertRaisesRegexp(ValueError, "match names of Queue"): enqueue_op = q.enqueue({}) with self.assertRaisesRegexp(ValueError, "match names of Queue"): enqueue_op = q.enqueue({"x": 12}) with self.assertRaisesRegexp(ValueError, "match names of Queue"): enqueue_op = q.enqueue({"f": 10.0, "s": "aa"}) enqueue_op = q.enqueue({"f": 10.0}) enqueue_op2 = q.enqueue({"f": 20.0}) enqueue_op3 = q.enqueue({"f": 30.0}) # Verify that enqueue_many() checks that when using names we must enqueue # a dictionary. with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"): enqueue_op4 = q.enqueue_many([40.0, 50.0]) # The dictionary keys must match the queue component names. with self.assertRaisesRegexp(ValueError, "match names of Queue"): enqueue_op4 = q.enqueue_many({}) with self.assertRaisesRegexp(ValueError, "match names of Queue"): enqueue_op4 = q.enqueue_many({"x": 12}) with self.assertRaisesRegexp(ValueError, "match names of Queue"): enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "s": ["aa", "bb"]}) enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0]}) dequeue = q.dequeue() dequeue_2 = q.dequeue_many(2) sess.run(enqueue_op) sess.run(enqueue_op2) sess.run(enqueue_op3) sess.run(enqueue_op4) f = sess.run(dequeue["f"]) self.assertEqual(10.0, f) f = sess.run(dequeue_2["f"]) self.assertEqual([20.0, 30.0], list(f)) f = sess.run(dequeue_2["f"]) self.assertEqual([40.0, 50.0], list(f)) def testEnqueueDequeueMultipleComponent(self): with self.test_session() as sess: q = tf.FIFOQueue(10, (tf.float32, tf.int32, tf.string), shapes=((), (), ()), names=("f", "i", "s")) # Verify that enqueue() checks that when using names we must enqueue a # dictionary. with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"): enqueue_op = q.enqueue((10.0, 123, "aa")) # The dictionary keys must match the queue component names. with self.assertRaisesRegexp(ValueError, "match names of Queue"): enqueue_op = q.enqueue({}) with self.assertRaisesRegexp(ValueError, "match names of Queue"): enqueue_op = q.enqueue({"x": 10.0}) with self.assertRaisesRegexp(ValueError, "match names of Queue"): enqueue_op = q.enqueue({"i": 12, "s": "aa"}) with self.assertRaisesRegexp(ValueError, "match names of Queue"): enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0, "x": 10.0}) enqueue_op = q.enqueue({"i": 123, "s": "aa", "f": 10.0}) enqueue_op2 = q.enqueue({"i": 124, "s": "bb", "f": 20.0}) enqueue_op3 = q.enqueue({"i": 125, "s": "cc", "f": 30.0}) # Verify that enqueue_many() checks that when using names we must enqueue # a dictionary. with self.assertRaisesRegexp(ValueError, "enqueue a dictionary"): enqueue_op4 = q.enqueue_many(([40.0, 50.0], [126, 127], ["dd", "ee"])) # The dictionary keys must match the queue component names. with self.assertRaisesRegexp(ValueError, "match names of Queue"): enqueue_op4 = q.enqueue_many({}) with self.assertRaisesRegexp(ValueError, "match names of Queue"): enqueue_op4 = q.enqueue_many({"x": [10.0, 20.0]}) with self.assertRaisesRegexp(ValueError, "match names of Queue"): enqueue_op4 = q.enqueue_many({"i": [12, 12], "s": ["aa", "bb"]}) with self.assertRaisesRegexp(ValueError, "match names of Queue"): enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "i": [126, 127], "s": ["dd", "ee"], "x": [1, 2]}) enqueue_op4 = q.enqueue_many({"f": [40.0, 50.0], "i": [126, 127], "s": ["dd", "ee"]}) dequeue = q.dequeue() dequeue_2 = q.dequeue_many(2) sess.run(enqueue_op) sess.run(enqueue_op2) sess.run(enqueue_op3) sess.run(enqueue_op4) i, f, s = sess.run([dequeue["i"], dequeue["f"], dequeue["s"]]) self.assertEqual(123, i) self.assertEqual(10.0, f) self.assertEqual(tf.compat.as_bytes("aa"), s) i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]]) self.assertEqual([124, 125], list(i)) self.assertTrue([20.0, 30.0], list(f)) self.assertTrue([tf.compat.as_bytes("bb"), tf.compat.as_bytes("cc")], list(s)) i, f, s = sess.run([dequeue_2["i"], dequeue_2["f"], dequeue_2["s"]]) self.assertEqual([126, 127], list(i)) self.assertTrue([40.0, 50.0], list(f)) self.assertTrue([tf.compat.as_bytes("dd"), tf.compat.as_bytes("ee")], list(s)) class FIFOQueueWithTimeoutTest(tf.test.TestCase): def testDequeueWithTimeout(self): with self.test_session( config=tf.ConfigProto(operation_timeout_in_ms=20)) as sess: q = tf.FIFOQueue(10, tf.float32) self.assertEqual(tf.compat.as_bytes(""), q.queue_ref.op.get_attr("container")) dequeued_t = q.dequeue() # Intentionally do not run any enqueue_ops so that dequeue will block # until operation_timeout_in_ms. with self.assertRaisesRegexp(tf.errors.DeadlineExceededError, "Timed out waiting for notification"): sess.run(dequeued_t) class QueueContainerTest(tf.test.TestCase): def testContainer(self): with tf.Graph().as_default(): with tf.container("test"): q = tf.FIFOQueue(10, tf.float32) self.assertEqual(tf.compat.as_bytes("test"), q.queue_ref.op.get_attr("container")) class FIFOQueueBenchmark(tf.test.Benchmark): """Benchmark FIFOQueue operations.""" def _build_graph(self): """Builds a graph that enqueues and dequeues a single float. Returns: A tuple with the graph init tensor and graph output tensor. """ q = tf.FIFOQueue(1, "float") init = q.enqueue(1.0) x = q.dequeue() q_inc = q.enqueue(x + 1) return init, q_inc # TODO(suharshs): Add benchmarks for: # - different capacities of the queue # - various sizes of tensors # - enqueue_many, dequeue_many def _run(self, num_iters): """Benchmarks enqueueing and dequeueing from a FIFOQueue. Args: num_iters: The number of iterations to run. Returns: The duration of the run in seconds. """ graph = tf.Graph() with graph.as_default(): init, output = self._build_graph() with tf.Session(graph=graph) as session: init.run() _ = session.run(output) # warm up. start_time = time.time() for _ in range(num_iters): _ = session.run(output) duration = time.time() - start_time print("%f secs per enqueue-dequeue" % (duration / num_iters)) self.report_benchmark( name="fifo_queue", iters=num_iters, wall_time=duration / num_iters) return duration if __name__ == "__main__": tf.test.main()
GenerateTFRecord.py
import warnings warnings.filterwarnings("ignore") import tensorflow as tf import numpy as np import traceback import cv2 import os import string import pickle from selenium import webdriver from selenium.webdriver.firefox.firefox_binary import FirefoxBinary from multiprocessing import Process,Lock from TableGeneration.Table import Table from multiprocessing import Process,Pool,cpu_count import random import argparse from TableGeneration.tools import * import numpy as np from selenium.webdriver import Firefox from selenium.webdriver import PhantomJS import warnings from TableGeneration.Transformation import * def warn(*args,**kwargs): pass class Logger: def __init__(self): pass #self.file=open('logtxt.txt','a+') def write(self,txt): file = open('logfile.txt', 'a+') file.write(txt) file.close() class GenerateTFRecord: def __init__(self, outpath,filesize,unlvimagespath,unlvocrpath,unlvtablepath,visualizeimgs,visualizebboxes,distributionfilepath): self.outtfpath = outpath #directory to store tfrecords self.filesize=filesize #number of images in each tfrecord self.unlvocrpath=unlvocrpath #unlv ocr ground truth files self.unlvimagespath=unlvimagespath #unlv images self.unlvtablepath=unlvtablepath #unlv ground truth of tabls self.visualizeimgs=visualizeimgs #wheter to store images separately or not self.distributionfile=distributionfilepath #pickle file containing UNLV distribution self.logger=Logger() #if we want to use logger and store output to file #self.logdir = 'logdir/' #self.create_dir(self.logdir) #logging.basicConfig(filename=os.path.join(self.logdir,'Log.log'), filemode='a+', format='%(name)s - %(levelname)s - %(message)s') self.num_of_max_vertices=900 #number of vertices (maximum number of words in any table) self.max_length_of_word=30 #max possible length of each word self.row_min=3 #minimum number of rows in a table (includes headers) self.row_max=15 #maximum number of rows in a table self.col_min=3 #minimum number of columns in a table self.col_max=9 #maximum number of columns in a table self.minshearval=-0.1 #minimum value of shear to apply to images self.maxshearval=0.1 #maxmimum value of shear to apply to images self.minrotval=-0.01 #minimum rotation applied to images self.maxrotval=0.01 #maximum rotation applied to images self.num_data_dims=5 #data dimensions to store in tfrecord self.max_height=768 #max image height self.max_width=1366 #max image width self.tables_cat_dist = self.get_category_distribution(self.filesize) self.visualizebboxes=visualizebboxes def get_category_distribution(self,filesize): tables_cat_dist=[0,0,0,0] firstdiv=filesize//2 tables_cat_dist[0]=firstdiv//2 tables_cat_dist[1]=firstdiv-tables_cat_dist[0] seconddiv=filesize-firstdiv tables_cat_dist[2]=seconddiv//2 tables_cat_dist[3]=seconddiv-tables_cat_dist[2] return tables_cat_dist def create_dir(self,fpath): #creates directory fpath if it does not exist if(not os.path.exists(fpath)): os.mkdir(fpath) def str_to_int(self,str): #converts each character in a word to equivalent int intsarr=np.array([ord(chr) for chr in str]) padded_arr=np.zeros(shape=(self.max_length_of_word),dtype=np.int64) padded_arr[:len(intsarr)]=intsarr return padded_arr def convert_to_int(self, arr): #simply converts array to a string return [int(val) for val in arr] def pad_with_zeros(self,arr,shape): #will pad the input array with zeros to make it equal to 'shape' dummy=np.zeros(shape,dtype=np.int64) dummy[:arr.shape[0],:arr.shape[1]]=arr return dummy def generate_tf_record(self, im, cellmatrix, rowmatrix, colmatrix, arr,tablecategory,imgindex,output_file_name): '''This function generates tfrecord files using given information''' cellmatrix=self.pad_with_zeros(cellmatrix,(self.num_of_max_vertices,self.num_of_max_vertices)) colmatrix = self.pad_with_zeros(colmatrix, (self.num_of_max_vertices, self.num_of_max_vertices)) rowmatrix = self.pad_with_zeros(rowmatrix, (self.num_of_max_vertices, self.num_of_max_vertices)) #im = np.array(cv2.imread(img_path, 0),dtype=np.int64) im=im.astype(np.int64) img_height, img_width=im.shape words_arr = arr[:, 1].tolist() no_of_words = len(words_arr) lengths_arr = self.convert_to_int(arr[:, 0]) vertex_features=np.zeros(shape=(self.num_of_max_vertices,self.num_data_dims),dtype=np.int64) lengths_arr=np.array(lengths_arr).reshape(len(lengths_arr),-1) sample_out=np.array(np.concatenate((arr[:,2:],lengths_arr),axis=1)) vertex_features[:no_of_words,:]=sample_out if(self.visualizebboxes): self.draw_matrices(im,arr,[rowmatrix,colmatrix,cellmatrix],imgindex,output_file_name) #vertex_text=np.chararray(shape=(self.num_of_max_vertices,self.max_length_of_word)) #vertex_text[:no_of_words,:]=list(map(self.str_to_chars, words_arr)) #vertex_text=words_arr+[""]*(self.num_of_max_vertices-len(words_arr)) vertex_text = np.zeros((self.num_of_max_vertices,self.max_length_of_word), dtype=np.int64) vertex_text[:no_of_words]=np.array(list(map(self.str_to_int,words_arr))) feature = dict() feature['image'] = tf.train.Feature(float_list=tf.train.FloatList(value=im.astype(np.float32).flatten())) feature['global_features'] = tf.train.Feature(float_list=tf.train.FloatList(value=np.array([img_height, img_width,no_of_words,tablecategory]).astype(np.float32).flatten())) feature['vertex_features'] = tf.train.Feature(float_list=tf.train.FloatList(value=vertex_features.astype(np.float32).flatten())) feature['adjacency_matrix_cells'] = tf.train.Feature(int64_list=tf.train.Int64List(value=cellmatrix.astype(np.int64).flatten())) feature['adjacency_matrix_cols'] = tf.train.Feature(int64_list=tf.train.Int64List(value=colmatrix.astype(np.int64).flatten())) feature['adjacency_matrix_rows'] = tf.train.Feature(int64_list=tf.train.Int64List(value=rowmatrix.astype(np.int64).flatten())) feature['vertex_text'] = tf.train.Feature(int64_list=tf.train.Int64List(value=vertex_text.astype(np.int64).flatten())) all_features = tf.train.Features(feature=feature) seq_ex = tf.train.Example(features=all_features) return seq_ex def generate_tables(self,driver,N_imgs,output_file_name): row_col_min=[self.row_min,self.col_min] #to randomly select number of rows row_col_max=[self.row_max,self.col_max] #to randomly select number of columns rc_arr = np.random.uniform(low=row_col_min, high=row_col_max, size=(N_imgs, 2)) #random row and col selection for N images all_table_categories=[0,0,0,0] #These 4 values will count the number of images for each of the category rc_arr[:,0]=rc_arr[:,0]+2 #increasing the number of rows by a fix 2. (We can comment out this line. Does not affect much) data_arr=[] exceptioncount=0 rc_count=0 #for iterating through row and col array for assigned_category,cat_count in enumerate(self.tables_cat_dist): for _ in range(cat_count): rows = int(round(rc_arr[rc_count][0])) cols = int(round(rc_arr[rc_count][1])) exceptcount=0 while(True): #This loop is to repeat and retry generating image if some an exception is encountered. try: #initialize table class table = Table(rows,cols,self.unlvimagespath,self.unlvocrpath,self.unlvtablepath,assigned_category+1,self.distributionfile) #get table of rows and cols based on unlv distribution and get features of this table #(same row, col and cell matrices, total unique ids, html conversion of table and its category) same_cell_matrix,same_col_matrix,same_row_matrix, id_count, html_content,tablecategory= table.create() #convert this html code to image using selenium webdriver. Get equivalent bounding boxes #for each word in the table. This will generate ground truth for our problem im,bboxes = html_to_img(driver, html_content, id_count) # apply_shear: bool - True: Apply Transformation, False: No Transformation | probability weight for shearing to be 25% #apply_shear = random.choices([True, False],weights=[0.25,0.75])[0] #if(apply_shear==True): if(assigned_category+1==4): #randomly select shear and rotation levels while(True): shearval = np.random.uniform(self.minshearval, self.maxshearval) rotval = np.random.uniform(self.minrotval, self.maxrotval) if(shearval!=0.0 or rotval!=0.0): break #If the image is transformed, then its categorycategory is 4 #transform image and bounding boxes of the words im, bboxes = Transform(im, bboxes, shearval, rotval, self.max_width, self.max_height) tablecategory=4 if(self.visualizeimgs): #if the image and equivalent html is need to be stored dirname=os.path.join('visualizeimgs/','category'+str(tablecategory)) f=open(os.path.join(dirname,'html',str(rc_count)+output_file_name.replace('.tfrecord','.html')),'w') f.write(html_content) f.close() im.save(os.path.join(dirname,'img',str(rc_count)+output_file_name.replace('.tfrecord','.png')), dpi=(600, 600)) # driver.quit() # 0/0 data_arr.append([[same_row_matrix, same_col_matrix, same_cell_matrix, bboxes,[tablecategory]],[im]]) all_table_categories[tablecategory-1]+=1 #print('Assigned category: ',assigned_category+1,', generated category: ',tablecategory) break except Exception as e: #traceback.print_exc() exceptcount+=1 if(exceptioncount>10): print('More than 10 exceptions occured for file: ',output_file_name) #if there are more than 10 exceptions, then return None return None #traceback.print_exc() #print('\nException No.', exceptioncount, ' File: ', str(output_file_name)) #logging.error("Exception Occured "+str(output_file_name),exc_info=True) rc_count+=1 if(len(data_arr)!=N_imgs): #If total number of images are not generated, then return None. print('Images not equal to the required size.') return None return data_arr,all_table_categories def draw_matrices(self,img,arr,matrices,imgindex,output_file_name): '''Call this fucntion to draw visualizations of a matrix on image''' no_of_words=len(arr) colors = np.random.randint(0, 255, (no_of_words, 3)) arr = arr[:, 2:] img=img.astype(np.uint8) img=np.dstack((img,img,img)) mat_names=['row','col','cell'] output_file_name=output_file_name.replace('.tfrecord','') for matname,matrix in zip(mat_names,matrices): im=img.copy() x=1 indices = np.argwhere(matrix[x] == 1) for index in indices: cv2.rectangle(im, (int(arr[index, 0])-3, int(arr[index, 1])-3), (int(arr[index, 2])+3, int(arr[index, 3])+3), (0,255,0), 1) x = 4 indices = np.argwhere(matrix[x] == 1) for index in indices: cv2.rectangle(im, (int(arr[index, 0])-3, int(arr[index, 1])-3), (int(arr[index, 2])+3, int(arr[index, 3])+3), (0, 0, 255), 1) img_name=os.path.join('bboxes/',output_file_name+'_'+str(imgindex)+'_'+matname+'.jpg') cv2.imwrite(img_name,im) def write_tf(self,filesize,threadnum): '''This function writes tfrecords. Input parameters are: filesize (number of images in one tfrecord), threadnum(thread id)''' options = tf.compat.v1.io.TFRecordOptions(tf.compat.v1.io.TFRecordCompressionType.GZIP) opts = Options() opts.set_headless() assert opts.headless #driver=PhantomJS() driver = Firefox(firefox_binary="/Applications/Firefox.app/Contents/MacOS/firefox-bin", firefox_profile="/Users/dmitry/Library/Application Support/Firefox/Profiles/d2nfi77v.default", executable_path="/Users/dmitry/Initflow/TIES/TIES_DataGeneration/geckodriver", options=opts) while(True): starttime = time.time() #randomly select a name of length=20 for tfrecords file. output_file_name = ''.join(random.choices(string.ascii_uppercase + string.digits, k=20)) + '.tfrecord' print('\nThread: ',threadnum,' Started:', output_file_name) #data_arr contains the images of generated tables and all_table_categories contains the table category of each of the table data_arr,all_table_categories = self.generate_tables(driver, filesize, output_file_name) if(data_arr is not None): if(len(data_arr)==filesize): with tf.io.TFRecordWriter(os.path.join(self.outtfpath,output_file_name+".gz"),options=options) as writer: try: for imgindex,subarr in enumerate(data_arr): arr=subarr[0] img=np.asarray(subarr[1][0],np.int64)[:,:,0] colmatrix = np.array(arr[1],dtype=np.int64) cellmatrix = np.array(arr[2],dtype=np.int64) rowmatrix = np.array(arr[0],dtype=np.int64) bboxes = np.array(arr[3]) tablecategory=arr[4][0] seq_ex = self.generate_tf_record(img, cellmatrix, rowmatrix, colmatrix, bboxes,tablecategory,imgindex,output_file_name) writer.write(seq_ex.SerializeToString()) print('\nThread :',threadnum,' Completed in ',time.time()-starttime,' ' ,output_file_name,'with len:',(len(data_arr))) print('category 1: ',all_table_categories[0],', category 2: ',all_table_categories[1],', category 3: ',all_table_categories[2],', category 4: ',all_table_categories[3]) except Exception as e: print('Exception occurred in write_tf function for file: ',output_file_name) traceback.print_exc() self.logger.write(traceback.format_exc()) # print('Thread :',threadnum,' Removing',output_file_name) # os.remove(os.path.join(self.outtfpath,output_file_name)) driver.stop_client() driver.quit() def write_to_tf(self,max_threads): '''This function starts tfrecords generation with number of threads = max_threads with each thread working on a single tfrecord''' if(not os.path.exists(self.distributionfile)): if((not os.path.exists(self.unlvtablepath)) or (not os.path.exists(self.unlvimagespath)) or (not os.path.exists(self.unlvocrpath))): print('UNLV dataset folders do not exist.') return #create all directories here if(self.visualizeimgs): self.create_dir('visualizeimgs') for tablecategory in range(1,5): dirname=os.path.join('visualizeimgs/','category'+str(tablecategory)) self.create_dir(dirname) self.create_dir(os.path.join(dirname,'html')) self.create_dir(os.path.join(dirname, 'img')) if(self.visualizebboxes): self.create_dir('bboxes') self.create_dir(self.outtfpath) #create output directory if it does not exist starttime=time.time() threads=[] for threadnum in range(max_threads): proc = Process(target=self.write_tf, args=(self.filesize, threadnum,)) proc.start() threads.append(proc) for proc in threads: proc.join() print(time.time()-starttime)
managers.py
# # Module providing the `SyncManager` class for dealing # with shared objects # # multiprocessing/managers.py # # Copyright (c) 2006-2008, R Oudkerk # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of author nor the names of any contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. # __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ] # # Imports # import os import sys import weakref import threading import array import Queue from traceback import format_exc from multiprocessing import Process, current_process, active_children, Pool, util, connection from multiprocessing.process import AuthenticationString from multiprocessing.forking import exit, Popen, assert_spawning, ForkingPickler from multiprocessing.util import Finalize, info try: from cPickle import PicklingError except ImportError: from pickle import PicklingError # # Register some things for pickling # def reduce_array(a): return array.array, (a.typecode, a.tostring()) ForkingPickler.register(array.array, reduce_array) view_types = [type(getattr({}, name)()) for name in ('items','keys','values')] # # Type for identifying shared objects # class Token(object): ''' Type to uniquely indentify a shared object ''' __slots__ = ('typeid', 'address', 'id') def __init__(self, typeid, address, id): (self.typeid, self.address, self.id) = (typeid, address, id) def __getstate__(self): return (self.typeid, self.address, self.id) def __setstate__(self, state): (self.typeid, self.address, self.id) = state def __repr__(self): return 'Token(typeid=%r, address=%r, id=%r)' % \ (self.typeid, self.address, self.id) # # Function for communication with a manager's server process # def dispatch(c, id, methodname, args=(), kwds={}): ''' Send a message to manager using connection `c` and return response ''' c.send((id, methodname, args, kwds)) kind, result = c.recv() if kind == '#RETURN': return result raise convert_to_error(kind, result) def convert_to_error(kind, result): if kind == '#ERROR': return result elif kind == '#TRACEBACK': assert type(result) is str return RemoteError(result) elif kind == '#UNSERIALIZABLE': assert type(result) is str return RemoteError('Unserializable message: %s\n' % result) else: return ValueError('Unrecognized message type') class RemoteError(Exception): def __str__(self): return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75) # # Functions for finding the method names of an object # def all_methods(obj): ''' Return a list of names of methods of `obj` ''' temp = [] for name in dir(obj): func = getattr(obj, name) if hasattr(func, '__call__'): temp.append(name) return temp def public_methods(obj): ''' Return a list of names of methods of `obj` which do not start with '_' ''' return [name for name in all_methods(obj) if name[0] != '_'] # # Server which is run in a process controlled by a manager # class Server(object): ''' Server class which runs in a process controlled by a manager object ''' public = ['shutdown', 'create', 'accept_connection', 'get_methods', 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref'] def __init__(self, registry, address, authkey, serializer): assert isinstance(authkey, bytes) self.registry = registry self.authkey = AuthenticationString(authkey) Listener, Client = listener_client[serializer] # do authentication later self.listener = Listener(address=address, backlog=16) self.address = self.listener.address self.id_to_obj = {'0': (None, ())} self.id_to_refcount = {} self.mutex = threading.RLock() self.stop = 0 def serve_forever(self): ''' Run the server forever ''' current_process()._manager_server = self try: try: while 1: try: c = self.listener.accept() except (OSError, IOError): continue t = threading.Thread(target=self.handle_request, args=(c,)) t.daemon = True t.start() except (KeyboardInterrupt, SystemExit): pass finally: self.stop = 999 self.listener.close() def handle_request(self, c): ''' Handle a new connection ''' funcname = result = request = None try: connection.deliver_challenge(c, self.authkey) connection.answer_challenge(c, self.authkey) request = c.recv() ignore, funcname, args, kwds = request assert funcname in self.public, '%r unrecognized' % funcname func = getattr(self, funcname) except Exception: msg = ('#TRACEBACK', format_exc()) else: try: result = func(c, *args, **kwds) except Exception: msg = ('#TRACEBACK', format_exc()) else: msg = ('#RETURN', result) try: c.send(msg) except Exception, e: try: c.send(('#TRACEBACK', format_exc())) except Exception: pass util.info('Failure to send message: %r', msg) util.info(' ... request was %r', request) util.info(' ... exception was %r', e) c.close() def serve_client(self, conn): ''' Handle requests from the proxies in a particular process/thread ''' util.debug('starting server thread to service %r', threading.current_thread().name) recv = conn.recv send = conn.send id_to_obj = self.id_to_obj while not self.stop: try: methodname = obj = None request = recv() ident, methodname, args, kwds = request obj, exposed, gettypeid = id_to_obj[ident] if methodname not in exposed: raise AttributeError( 'method %r of %r object is not in exposed=%r' % (methodname, type(obj), exposed) ) function = getattr(obj, methodname) try: res = function(*args, **kwds) except Exception, e: msg = ('#ERROR', e) else: typeid = gettypeid and gettypeid.get(methodname, None) if typeid: rident, rexposed = self.create(conn, typeid, res) token = Token(typeid, self.address, rident) msg = ('#PROXY', (rexposed, token)) else: msg = ('#RETURN', res) except AttributeError: if methodname is None: msg = ('#TRACEBACK', format_exc()) else: try: fallback_func = self.fallback_mapping[methodname] result = fallback_func( self, conn, ident, obj, *args, **kwds ) msg = ('#RETURN', result) except Exception: msg = ('#TRACEBACK', format_exc()) except EOFError: util.debug('got EOF -- exiting thread serving %r', threading.current_thread().name) sys.exit(0) except Exception: msg = ('#TRACEBACK', format_exc()) try: try: send(msg) except Exception, e: send(('#UNSERIALIZABLE', repr(msg))) except Exception, e: util.info('exception in thread serving %r', threading.current_thread().name) util.info(' ... message was %r', msg) util.info(' ... exception was %r', e) conn.close() sys.exit(1) def fallback_getvalue(self, conn, ident, obj): return obj def fallback_str(self, conn, ident, obj): return str(obj) def fallback_repr(self, conn, ident, obj): return repr(obj) fallback_mapping = { '__str__':fallback_str, '__repr__':fallback_repr, '#GETVALUE':fallback_getvalue } def dummy(self, c): pass def debug_info(self, c): ''' Return some info --- useful to spot problems with refcounting ''' self.mutex.acquire() try: result = [] keys = self.id_to_obj.keys() keys.sort() for ident in keys: if ident != '0': result.append(' %s: refcount=%s\n %s' % (ident, self.id_to_refcount[ident], str(self.id_to_obj[ident][0])[:75])) return '\n'.join(result) finally: self.mutex.release() def number_of_objects(self, c): ''' Number of shared objects ''' return len(self.id_to_obj) - 1 # don't count ident='0' def shutdown(self, c): ''' Shutdown this process ''' try: try: util.debug('manager received shutdown message') c.send(('#RETURN', None)) if sys.stdout != sys.__stdout__: util.debug('resetting stdout, stderr') sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ util._run_finalizers(0) for p in active_children(): util.debug('terminating a child process of manager') p.terminate() for p in active_children(): util.debug('terminating a child process of manager') p.join() util._run_finalizers() util.info('manager exiting with exitcode 0') except: import traceback traceback.print_exc() finally: exit(0) def create(self, c, typeid, *args, **kwds): ''' Create a new shared object and return its id ''' self.mutex.acquire() try: callable, exposed, method_to_typeid, proxytype = \ self.registry[typeid] if callable is None: assert len(args) == 1 and not kwds obj = args[0] else: obj = callable(*args, **kwds) if exposed is None: exposed = public_methods(obj) if method_to_typeid is not None: assert type(method_to_typeid) is dict exposed = list(exposed) + list(method_to_typeid) ident = '%x' % id(obj) # convert to string because xmlrpclib # only has 32 bit signed integers util.debug('%r callable returned object with id %r', typeid, ident) self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid) if ident not in self.id_to_refcount: self.id_to_refcount[ident] = 0 # increment the reference count immediately, to avoid # this object being garbage collected before a Proxy # object for it can be created. The caller of create() # is responsible for doing a decref once the Proxy object # has been created. self.incref(c, ident) return ident, tuple(exposed) finally: self.mutex.release() def get_methods(self, c, token): ''' Return the methods of the shared object indicated by token ''' return tuple(self.id_to_obj[token.id][1]) def accept_connection(self, c, name): ''' Spawn a new thread to serve this connection ''' threading.current_thread().name = name c.send(('#RETURN', None)) self.serve_client(c) def incref(self, c, ident): self.mutex.acquire() try: self.id_to_refcount[ident] += 1 finally: self.mutex.release() def decref(self, c, ident): self.mutex.acquire() try: assert self.id_to_refcount[ident] >= 1 self.id_to_refcount[ident] -= 1 if self.id_to_refcount[ident] == 0: del self.id_to_obj[ident], self.id_to_refcount[ident] util.debug('disposing of obj with id %r', ident) finally: self.mutex.release() # # Class to represent state of a manager # class State(object): __slots__ = ['value'] INITIAL = 0 STARTED = 1 SHUTDOWN = 2 # # Mapping from serializer name to Listener and Client types # listener_client = { 'pickle' : (connection.Listener, connection.Client), 'xmlrpclib' : (connection.XmlListener, connection.XmlClient) } # # Definition of BaseManager # class BaseManager(object): ''' Base class for managers ''' _registry = {} _Server = Server def __init__(self, address=None, authkey=None, serializer='pickle'): if authkey is None: authkey = current_process().authkey self._address = address # XXX not final address if eg ('', 0) self._authkey = AuthenticationString(authkey) self._state = State() self._state.value = State.INITIAL self._serializer = serializer self._Listener, self._Client = listener_client[serializer] def __reduce__(self): return type(self).from_address, \ (self._address, self._authkey, self._serializer) def get_server(self): ''' Return server object with serve_forever() method and address attribute ''' assert self._state.value == State.INITIAL return Server(self._registry, self._address, self._authkey, self._serializer) def connect(self): ''' Connect manager object to the server process ''' Listener, Client = listener_client[self._serializer] conn = Client(self._address, authkey=self._authkey) dispatch(conn, None, 'dummy') self._state.value = State.STARTED def start(self, initializer=None, initargs=()): ''' Spawn a server process for this manager object ''' assert self._state.value == State.INITIAL if initializer is not None and not hasattr(initializer, '__call__'): raise TypeError('initializer must be a callable') # pipe over which we will retrieve address of server reader, writer = connection.Pipe(duplex=False) # spawn process which runs a server self._process = Process( target=type(self)._run_server, args=(self._registry, self._address, self._authkey, self._serializer, writer, initializer, initargs), ) ident = ':'.join(str(i) for i in self._process._identity) self._process.name = type(self).__name__ + '-' + ident self._process.start() # get address of server writer.close() self._address = reader.recv() reader.close() # register a finalizer self._state.value = State.STARTED self.shutdown = util.Finalize( self, type(self)._finalize_manager, args=(self._process, self._address, self._authkey, self._state, self._Client), exitpriority=0 ) @classmethod def _run_server(cls, registry, address, authkey, serializer, writer, initializer=None, initargs=()): ''' Create a server, report its address and run it ''' if initializer is not None: initializer(*initargs) # create server server = cls._Server(registry, address, authkey, serializer) # inform parent process of the server's address writer.send(server.address) writer.close() # run the manager util.info('manager serving at %r', server.address) server.serve_forever() def _create(self, typeid, *args, **kwds): ''' Create a new shared object; return the token and exposed tuple ''' assert self._state.value == State.STARTED, 'server not yet started' conn = self._Client(self._address, authkey=self._authkey) try: id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds) finally: conn.close() return Token(typeid, self._address, id), exposed def join(self, timeout=None): ''' Join the manager process (if it has been spawned) ''' self._process.join(timeout) def _debug_info(self): ''' Return some info about the servers shared objects and connections ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'debug_info') finally: conn.close() def _number_of_objects(self): ''' Return the number of shared objects ''' conn = self._Client(self._address, authkey=self._authkey) try: return dispatch(conn, None, 'number_of_objects') finally: conn.close() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.shutdown() @staticmethod def _finalize_manager(process, address, authkey, state, _Client): ''' Shutdown the manager process; will be registered as a finalizer ''' if process.is_alive(): util.info('sending shutdown message to manager') try: conn = _Client(address, authkey=authkey) try: dispatch(conn, None, 'shutdown') finally: conn.close() except Exception: pass process.join(timeout=0.2) if process.is_alive(): util.info('manager still alive') if hasattr(process, 'terminate'): util.info('trying to `terminate()` manager process') process.terminate() process.join(timeout=0.1) if process.is_alive(): util.info('manager still alive after terminate') state.value = State.SHUTDOWN try: del BaseProxy._address_to_local[address] except KeyError: pass address = property(lambda self: self._address) @classmethod def register(cls, typeid, callable=None, proxytype=None, exposed=None, method_to_typeid=None, create_method=True): ''' Register a typeid with the manager type ''' if '_registry' not in cls.__dict__: cls._registry = cls._registry.copy() if proxytype is None: proxytype = AutoProxy exposed = exposed or getattr(proxytype, '_exposed_', None) method_to_typeid = method_to_typeid or \ getattr(proxytype, '_method_to_typeid_', None) if method_to_typeid: for key, value in method_to_typeid.items(): assert type(key) is str, '%r is not a string' % key assert type(value) is str, '%r is not a string' % value cls._registry[typeid] = ( callable, exposed, method_to_typeid, proxytype ) if create_method: def temp(self, *args, **kwds): util.debug('requesting creation of a shared %r object', typeid) token, exp = self._create(typeid, *args, **kwds) proxy = proxytype( token, self._serializer, manager=self, authkey=self._authkey, exposed=exp ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy temp.__name__ = typeid setattr(cls, typeid, temp) # # Subclass of set which get cleared after a fork # class ProcessLocalSet(set): def __init__(self): util.register_after_fork(self, lambda obj: obj.clear()) def __reduce__(self): return type(self), () # # Definition of BaseProxy # class BaseProxy(object): ''' A base for proxies of shared objects ''' _address_to_local = {} _mutex = util.ForkAwareThreadLock() def __init__(self, token, serializer, manager=None, authkey=None, exposed=None, incref=True): BaseProxy._mutex.acquire() try: tls_idset = BaseProxy._address_to_local.get(token.address, None) if tls_idset is None: tls_idset = util.ForkAwareLocal(), ProcessLocalSet() BaseProxy._address_to_local[token.address] = tls_idset finally: BaseProxy._mutex.release() # self._tls is used to record the connection used by this # thread to communicate with the manager at token.address self._tls = tls_idset[0] # self._idset is used to record the identities of all shared # objects for which the current process owns references and # which are in the manager at token.address self._idset = tls_idset[1] self._token = token self._id = self._token.id self._manager = manager self._serializer = serializer self._Client = listener_client[serializer][1] if authkey is not None: self._authkey = AuthenticationString(authkey) elif self._manager is not None: self._authkey = self._manager._authkey else: self._authkey = current_process().authkey if incref: self._incref() util.register_after_fork(self, BaseProxy._after_fork) def _connect(self): util.debug('making connection to manager') name = current_process().name if threading.current_thread().name != 'MainThread': name += '|' + threading.current_thread().name conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'accept_connection', (name,)) self._tls.connection = conn def _callmethod(self, methodname, args=(), kwds={}): ''' Try to call a method of the referrent and return a copy of the result ''' try: conn = self._tls.connection except AttributeError: util.debug('thread %r does not own a connection', threading.current_thread().name) self._connect() conn = self._tls.connection conn.send((self._id, methodname, args, kwds)) kind, result = conn.recv() if kind == '#RETURN': return result elif kind == '#PROXY': exposed, token = result proxytype = self._manager._registry[token.typeid][-1] token.address = self._token.address proxy = proxytype( token, self._serializer, manager=self._manager, authkey=self._authkey, exposed=exposed ) conn = self._Client(token.address, authkey=self._authkey) dispatch(conn, None, 'decref', (token.id,)) return proxy raise convert_to_error(kind, result) def _getvalue(self): ''' Get a copy of the value of the referent ''' return self._callmethod('#GETVALUE') def _incref(self): conn = self._Client(self._token.address, authkey=self._authkey) dispatch(conn, None, 'incref', (self._id,)) util.debug('INCREF %r', self._token.id) self._idset.add(self._id) state = self._manager and self._manager._state self._close = util.Finalize( self, BaseProxy._decref, args=(self._token, self._authkey, state, self._tls, self._idset, self._Client), exitpriority=10 ) @staticmethod def _decref(token, authkey, state, tls, idset, _Client): idset.discard(token.id) # check whether manager is still alive if state is None or state.value == State.STARTED: # tell manager this process no longer cares about referent try: util.debug('DECREF %r', token.id) conn = _Client(token.address, authkey=authkey) dispatch(conn, None, 'decref', (token.id,)) except Exception, e: util.debug('... decref failed %s', e) else: util.debug('DECREF %r -- manager already shutdown', token.id) # check whether we can close this thread's connection because # the process owns no more references to objects for this manager if not idset and hasattr(tls, 'connection'): util.debug('thread %r has no more proxies so closing conn', threading.current_thread().name) tls.connection.close() del tls.connection def _after_fork(self): self._manager = None try: self._incref() except Exception, e: # the proxy may just be for a manager which has shutdown util.info('incref failed: %s' % e) def __reduce__(self): kwds = {} if Popen.thread_is_spawning(): kwds['authkey'] = self._authkey if getattr(self, '_isauto', False): kwds['exposed'] = self._exposed_ return (RebuildProxy, (AutoProxy, self._token, self._serializer, kwds)) else: return (RebuildProxy, (type(self), self._token, self._serializer, kwds)) def __deepcopy__(self, memo): return self._getvalue() def __repr__(self): return '<%s object, typeid %r at %s>' % \ (type(self).__name__, self._token.typeid, '0x%x' % id(self)) def __str__(self): ''' Return representation of the referent (or a fall-back if that fails) ''' try: return self._callmethod('__repr__') except Exception: return repr(self)[:-1] + "; '__str__()' failed>" # # Function used for unpickling # def RebuildProxy(func, token, serializer, kwds): ''' Function used for unpickling proxy objects. If possible the shared object is returned, or otherwise a proxy for it. ''' server = getattr(current_process(), '_manager_server', None) if server and server.address == token.address: return server.id_to_obj[token.id][0] else: incref = ( kwds.pop('incref', True) and not getattr(current_process(), '_inheriting', False) ) return func(token, serializer, incref=incref, **kwds) # # Functions to create proxies and proxy types # def MakeProxyType(name, exposed, _cache={}): ''' Return an proxy type whose methods are given by `exposed` ''' exposed = tuple(exposed) try: return _cache[(name, exposed)] except KeyError: pass dic = {} for meth in exposed: exec '''def %s(self, *args, **kwds): return self._callmethod(%r, args, kwds)''' % (meth, meth) in dic ProxyType = type(name, (BaseProxy,), dic) ProxyType._exposed_ = exposed _cache[(name, exposed)] = ProxyType return ProxyType def AutoProxy(token, serializer, manager=None, authkey=None, exposed=None, incref=True): ''' Return an auto-proxy for `token` ''' _Client = listener_client[serializer][1] if exposed is None: conn = _Client(token.address, authkey=authkey) try: exposed = dispatch(conn, None, 'get_methods', (token,)) finally: conn.close() if authkey is None and manager is not None: authkey = manager._authkey if authkey is None: authkey = current_process().authkey ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed) proxy = ProxyType(token, serializer, manager=manager, authkey=authkey, incref=incref) proxy._isauto = True return proxy # # Types/callables which we will register with SyncManager # class Namespace(object): def __init__(self, **kwds): self.__dict__.update(kwds) def __repr__(self): items = self.__dict__.items() temp = [] for name, value in items: if not name.startswith('_'): temp.append('%s=%r' % (name, value)) temp.sort() return 'Namespace(%s)' % str.join(', ', temp) class Value(object): def __init__(self, typecode, value, lock=True): self._typecode = typecode self._value = value def get(self): return self._value def set(self, value): self._value = value def __repr__(self): return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) value = property(get, set) def Array(typecode, sequence, lock=True): return array.array(typecode, sequence) # # Proxy types used by SyncManager # class IteratorProxy(BaseProxy): # XXX remove methods for Py3.0 and Py2.6 _exposed_ = ('__next__', 'next', 'send', 'throw', 'close') def __iter__(self): return self def __next__(self, *args): return self._callmethod('__next__', args) def next(self, *args): return self._callmethod('next', args) def send(self, *args): return self._callmethod('send', args) def throw(self, *args): return self._callmethod('throw', args) def close(self, *args): return self._callmethod('close', args) class AcquirerProxy(BaseProxy): _exposed_ = ('acquire', 'release') def acquire(self, blocking=True): return self._callmethod('acquire', (blocking,)) def release(self): return self._callmethod('release') def __enter__(self): return self._callmethod('acquire') def __exit__(self, exc_type, exc_val, exc_tb): return self._callmethod('release') class ConditionProxy(AcquirerProxy): # XXX will Condition.notfyAll() name be available in Py3.0? _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) def notify(self): return self._callmethod('notify') def notify_all(self): return self._callmethod('notify_all') class EventProxy(BaseProxy): _exposed_ = ('is_set', 'set', 'clear', 'wait') def is_set(self): return self._callmethod('is_set') def set(self): return self._callmethod('set') def clear(self): return self._callmethod('clear') def wait(self, timeout=None): return self._callmethod('wait', (timeout,)) class NamespaceProxy(BaseProxy): _exposed_ = ('__getattribute__', '__setattr__', '__delattr__') def __getattr__(self, key): if key[0] == '_': return object.__getattribute__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__getattribute__', (key,)) def __setattr__(self, key, value): if key[0] == '_': return object.__setattr__(self, key, value) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__setattr__', (key, value)) def __delattr__(self, key): if key[0] == '_': return object.__delattr__(self, key) callmethod = object.__getattribute__(self, '_callmethod') return callmethod('__delattr__', (key,)) class ValueProxy(BaseProxy): _exposed_ = ('get', 'set') def get(self): return self._callmethod('get') def set(self, value): return self._callmethod('set', (value,)) value = property(get, set) BaseListProxy = MakeProxyType('BaseListProxy', ( '__add__', '__contains__', '__delitem__', '__delslice__', '__getitem__', '__getslice__', '__len__', '__mul__', '__reversed__', '__rmul__', '__setitem__', '__setslice__', 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove', 'reverse', 'sort', '__imul__' )) # XXX __getslice__ and __setslice__ unneeded in Py3.0 class ListProxy(BaseListProxy): def __iadd__(self, value): self._callmethod('extend', (value,)) return self def __imul__(self, value): self._callmethod('__imul__', (value,)) return self DictProxy = MakeProxyType('DictProxy', ( '__contains__', '__delitem__', '__getitem__', '__len__', '__setitem__', 'clear', 'copy', 'get', 'has_key', 'items', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values' )) ArrayProxy = MakeProxyType('ArrayProxy', ( '__len__', '__getitem__', '__setitem__', '__getslice__', '__setslice__' )) # XXX __getslice__ and __setslice__ unneeded in Py3.0 PoolProxy = MakeProxyType('PoolProxy', ( 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join', 'map', 'map_async', 'terminate' )) PoolProxy._method_to_typeid_ = { 'apply_async': 'AsyncResult', 'map_async': 'AsyncResult', 'imap': 'Iterator', 'imap_unordered': 'Iterator' } # # Definition of SyncManager # class SyncManager(BaseManager): ''' Subclass of `BaseManager` which supports a number of shared object types. The types registered are those intended for the synchronization of threads, plus `dict`, `list` and `Namespace`. The `multiprocessing.Manager()` function creates started instances of this class. ''' SyncManager.register('Queue', Queue.Queue) SyncManager.register('JoinableQueue', Queue.Queue) SyncManager.register('Event', threading.Event, EventProxy) SyncManager.register('Lock', threading.Lock, AcquirerProxy) SyncManager.register('RLock', threading.RLock, AcquirerProxy) SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy) SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore, AcquirerProxy) SyncManager.register('Condition', threading.Condition, ConditionProxy) SyncManager.register('Pool', Pool, PoolProxy) SyncManager.register('list', list, ListProxy) SyncManager.register('dict', dict, DictProxy) SyncManager.register('Value', Value, ValueProxy) SyncManager.register('Array', Array, ArrayProxy) SyncManager.register('Namespace', Namespace, NamespaceProxy) # types returned by methods of PoolProxy SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False) SyncManager.register('AsyncResult', create_method=False)
data_loader_base.py
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from queue import Queue, Empty from threading import Thread, Event class BaseDataLoader(object): def __len__(self): """ Length of the batches to be loaded. """ raise NotImplementedError() def _iterate(self): """ Interface for the implimentation of iterate batches """ raise NotImplementedError() def __iter__(self): """ Starting iteration and get batchs """ for batch in self._iterate(): yield self._process_batch(batch) def _process_batch(self, batch): """ Hook to modify batch before output. Will be override by trainer to reshape the data as needed. Please do not override it. """ return batch class AsyncDataLoaderMixin(object): """ Async Mixin on top of implementation of BaseDataLoader. It contains a seperate thread which reads batch from self._iterate() and push them in the queue. The self.__iter__() function will pop the batch from the queue. If async_loader_queue_size is set to 0, the data loader will not work in async mode. For example: class PytorchAsyncDataLoader(AsyncDataLoaderMixin, PytorchDataLoader): """ def __init__(self, async_loader_queue_size=5, *args, **kwargs): """ initialize the async data loader. Need to add this in the __init__() of the implementation """ self.async_loader_queue_size = async_loader_queue_size super().__init__(*args, **kwargs) print(f"Apply the AsyncDataLoaderMixin on top of the data loader, async_loader_queue_size={async_loader_queue_size}. ") if self.async_loader_queue_size > 0: self.finished_event = Event() self.queue = Queue(self.async_loader_queue_size) self.thread = Thread(target=self._async_worker) self.thread.daemon = True self.started = False def close_async_loader(self): """ Close the async data loader. """ print("Closing the AsyncDataLoaderMixin.") if self.async_loader_queue_size > 0 and self.started: self.finished_event.set() while True: try: # Drain buffer self.queue.get_nowait() except Empty: break self.thread.join() def _async_worker(self): """ Start worker thread to load data asynchronously. User need to implement self._iterate() to read the data. """ try: while not self.finished_event.is_set(): for batch in self._iterate(): if self.finished_event.is_set(): break self.queue.put(batch) self.queue.put(None) except Exception as ex: self.queue.put(ex) self.queue.put(None) finally: self.queue.put(None) def __iter__(self): """ Override the __iter__() to iterate data asynchronously to produce batchs. Will procude batchs from the queue which were generated by self._iterate(). """ print("Start generating batches from async data loader.") if self.async_loader_queue_size > 0: if not self.started: self.started = True self.thread.start() while True: batch = self.queue.get() if batch is None: break if isinstance(batch, Exception): raise batch yield self._process_batch(batch) else: for batch in self._iterate(): yield self._process_batch(batch)
kube_env.py
#!/usr/bin/env python3 import argparse import csv import time import logging import sys import os from multiprocessing import Process from concurrent.futures import ThreadPoolExecutor from pathlib import Path import requests import kube_util as util log = logging.getLogger(__name__) FILE_DIR = Path(__file__).parent.resolve() ROOT_DIR = FILE_DIR.parent ISTIO_DIR = FILE_DIR.joinpath("istio-1.9.3") ISTIO_BIN = ISTIO_DIR.joinpath("bin/istioctl") YAML_DIR = FILE_DIR.joinpath("yaml_crds") TOOLS_DIR = FILE_DIR.joinpath("tools") ONLINE_BOUTIQUE_DIR = FILE_DIR.joinpath("microservices-demo") HOTEL_RESERVATION_DIR = FILE_DIR.joinpath("DeathStarBench/hotelReservation") TRAIN_TICKET_DIR = FILE_DIR.joinpath("train-ticket/deployment/kubernetes-manifests/k8s-with-istio") PROJECT_ID = "dynamic-tracing" APPLY_CMD = "kubectl apply -f " DELETE_CMD = "kubectl delete -f " CONFIG_MATRIX = { 'BK': { 'minikube_startup_command': "minikube start --cpus=2 --memory 4096 --disk-size 32g", 'gcloud_startup_command':"gcloud container clusters create demo --enable-autoupgrade \ --num-nodes=5 ", 'deploy_cmd': f"{APPLY_CMD} {YAML_DIR}/bookinfo-services.yaml && \ {APPLY_CMD} {YAML_DIR}/bookinfo-apps.yaml && \ {APPLY_CMD} {ISTIO_DIR}/samples/bookinfo/networking/bookinfo-gateway.yaml && \ {APPLY_CMD} {ISTIO_DIR}/samples/bookinfo/networking/destination-rule-reviews.yaml ", 'undeploy_cmd': f"{ISTIO_DIR}/samples/bookinfo/platform/kube/cleanup.sh" }, 'OB': { 'minikube_startup_command': "minikube start --cpus=4 --memory 4096 --disk-size 32g", 'gcloud_startup_command':"gcloud container clusters create demo --enable-autoupgrade \ --num-nodes=7 ", 'deploy_cmd': f"{APPLY_CMD} {ONLINE_BOUTIQUE_DIR}/release ", 'undeploy_cmd': f"{DELETE_CMD} {ONLINE_BOUTIQUE_DIR}/release " }, 'HR': { 'minikube_startup_command': None, 'gcloud_startup_command':"gcloud container clusters create demo --enable-autoupgrade \ --num-nodes=7 ", 'deploy_cmd': f"{APPLY_CMD} {HOTEL_RESERVATION_DIR}/kubernetes ", 'undeploy_cmd': f"{DELETE_CMD} {HOTEL_RESERVATION_DIR}/kubernetes ", }, 'TT': { 'minikube_startup_command': None, 'gcloud_startup_command':"gcloud container clusters create demo --enable-autoupgrade \ --num-nodes=8 ", 'deploy_cmd': f"{ISTIO_BIN} kube-inject -f {TRAIN_TICKET_DIR}/ts-deployment-part1.yml > dpl1.yml && " + f"{APPLY_CMD} dpl1.yml && " + f"{ISTIO_BIN} kube-inject -f {TRAIN_TICKET_DIR}/ts-deployment-part2.yml > dpl2.yml && " + f"{APPLY_CMD} dpl2.yml && " + f"{ISTIO_BIN} kube-inject -f {TRAIN_TICKET_DIR}/ts-deployment-part3.yml > dpl3.yml && " + f"{APPLY_CMD} dpl3.yml && " + f"{APPLY_CMD} {TRAIN_TICKET_DIR}/trainticket-gateway.yaml && " + " rm dpl1.yml dpl2.yml dpl3.yml ", 'undeploy_cmd': f"{DELETE_CMD} {TRAIN_TICKET_DIR}/ts-deployment-part1.yml && " + f"{DELETE_CMD} {TRAIN_TICKET_DIR}/ts-deployment-part2.yml && " + f"{DELETE_CMD} {TRAIN_TICKET_DIR}/ts-deployment-part3.yml " }, } FILTER_DIR = FILE_DIR.joinpath("../tracing_compiler/filter_envoy") DISTRIBUTED_FILTER_DIR = FILE_DIR.joinpath( "../tracing_compiler/distributed_filter_envoy") CM_FILTER_NAME = "rs-filter" # the kubernetes python API sucks, but keep this for later # from kubernetes import client # from kubernetes.client.configuration import Configuration # from kubernetes.utils import create_from_yaml # from kubernetes.config import kube_config # def get_e2e_configuration(): # config = Configuration() # config.host = None # kube_config.load_kube_config(client_configuration=config) # log.info('Running test against : %s' % config.host) # return config # conf = get_e2e_configuration() # k8s_client = client.api_client.ApiClient(configuration=conf) # create_from_yaml(k8s_client, f"{bookinfo_dir}/platform/kube/bookinfo.yaml") ############## PLATFORM RELATED FUNCTIONS ############################### def inject_istio(): cmd = f"{ISTIO_BIN} install --set profile=demo " cmd += "--set meshConfig.enableTracing=true --skip-confirmation " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result cmd = "kubectl label namespace default istio-injection=enabled --overwrite" result = util.exec_process(cmd) cmd = f"{ISTIO_BIN} install --set profile=demo -n storage " cmd += "--set meshConfig.enableTracing=true --skip-confirmation " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result cmd = "kubectl label namespace storage istio-injection=enabled --overwrite" result = util.exec_process(cmd) return result def deploy_addons(addons): apply_cmd = "kubectl apply -f " url = "https://raw.githubusercontent.com/istio/istio/release-1.9" cmd = "" if "kiali" in addons: addons.append("kiali") for (idx, addon) in enumerate(addons): if addon == "prometheus-mod": cmd += f"{apply_cmd} {YAML_DIR}/prometheus-mod.yaml" else: cmd += f"{apply_cmd} {url}/samples/addons/{addon}.yaml" if idx < len(addons) - 1: cmd += " && " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result cmd = "kubectl get deploy -n istio-system -o name --all-namespaces " deployments = util.get_output_from_proc(cmd).decode("utf-8").strip() deployments = deployments.split("\n") for depl in deployments: wait_cmd = "kubectl rollout status -n istio-system " wait_cmd += f"{depl} -w --timeout=180s" _ = util.exec_process(wait_cmd) log.info("Addons are ready.") return util.EXIT_SUCCESS def remove_addons(addons): remove_cmd = "kubectl delete -f" url = "https://raw.githubusercontent.com/istio/istio/release-1.9" cmd = "" for (idx, addon) in enumerate(addons): if addon == "prometheus-mod": cmd += f"{remove_cmd} {YAML_DIR}/prometheus-mod.yaml --ignore-not-found=true" else: cmd += f"{remove_cmd} {url}/samples/addons/{addon}.yaml --ignore-not-found=true" if idx < len(addons) - 1: cmd += " && " return util.exec_process(cmd) def application_wait(): cmd = "kubectl get deploy -o name" deployments = util.get_output_from_proc(cmd).decode("utf-8").strip() deployments = deployments.split("\n") for depl in deployments: wait_cmd = f"kubectl rollout status {depl} -w --timeout=180s" _ = util.exec_process(wait_cmd) log.info("Application is ready.") return util.EXIT_SUCCESS def inject_failure(): cmd = f"kubectl apply -f {YAML_DIR}/fault-injection.yaml " result = util.exec_process(cmd) return result def remove_failure(): cmd = f"kubectl delete -f {YAML_DIR}/fault-injection.yaml " result = util.exec_process(cmd) return result def check_kubernetes_status(): cmd = "kubectl cluster-info" result = util.exec_process(cmd, stdout=util.subprocess.PIPE, stderr=util.subprocess.PIPE) return result def start_kubernetes(platform, multizonal, application): if platform == "GCP": # 1. Create cluster enabled with Istio already cmd = CONFIG_MATRIX[application]['gcloud_startup_command'] if multizonal: cmd += "--region us-central1-a --node-locations us-central1-b " cmd += "us-central1-c us-central1-a " else: cmd += "--zone=us-central1-a " result = util.exec_process(cmd) cmd = f"gcloud services enable container.googleapis.com --project {PROJECT_ID} &&" cmd += f"gcloud services enable monitoring.googleapis.com cloudtrace.googleapis.com " cmd += f"clouddebugger.googleapis.com cloudprofiler.googleapis.com --project {PROJECT_ID}" result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result # 2. Create storage namespace cmd = "kubectl create namespace storage" result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result else: # 1. Create cluster if CONFIG_MATRIX[application]['minikube_startup_command'] != None: cmd = CONFIG_MATRIX[application]['minikube_startup_command'] result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result else: return "APPLICATION IS NOT SUPPORTED ON MINIKUBE" # 2. Create storage namespace cmd = "kubectl create namespace storage" result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result return result def stop_kubernetes(platform): if platform == "GCP": cmd = "gcloud container clusters delete " cmd += "demo --zone us-central1-a --quiet " else: # delete minikube cmd = "minikube delete" result = util.exec_process(cmd) return result def get_gateway_info(platform): ingress_host = "" ingress_port = "" if platform == "GCP": cmd = "kubectl -n istio-system get service istio-ingressgateway " cmd += "-o jsonpath={.status.loadBalancer.ingress[0].ip} " ingress_host = util.get_output_from_proc(cmd).decode("utf-8").replace( "'", "") cmd = "kubectl -n istio-system get service istio-ingressgateway " cmd += " -o jsonpath={.spec.ports[?(@.name==\"http2\")].port}" ingress_port = util.get_output_from_proc(cmd).decode("utf-8").replace( "'", "") else: cmd = "minikube ip" ingress_host = util.get_output_from_proc(cmd).decode("utf-8").rstrip() cmd = "kubectl -n istio-system get service istio-ingressgateway" cmd += " -o jsonpath={.spec.ports[?(@.name==\"http2\")].nodePort}" ingress_port = util.get_output_from_proc(cmd).decode("utf-8") log.debug("Ingress Host: %s", ingress_host) log.debug("Ingress Port: %s", ingress_port) gateway_url = f"{ingress_host}:{ingress_port}" log.debug("Gateway: %s", gateway_url) return ingress_host, ingress_port, gateway_url def burst_loop(url): NUM_REQUESTS = 500 MAX_THREADS = 32 def timeout_request(_): try: # the timeout effectively makes this request async requests.get(url, timeout=0.001) except requests.exceptions.ReadTimeout: pass log.info("Starting burst...") # quick hack until I found a better way with ThreadPoolExecutor(max_workers=MAX_THREADS) as p: for _ in p.map(timeout_request, range(NUM_REQUESTS)): pass log.info("Done with burst...") def do_burst(platform): _, _, gateway_url = get_gateway_info(platform) url = f"http://{gateway_url}/productpage" p = Process(target=burst_loop, args=(url, )) p.start() # do not care about killing that process def start_fortio(gateway_url): cmd = f"{FILE_DIR}/bin/fortio " cmd += "load -c 50 -qps 300 -jitter -t 0 -loglevel Warning " cmd += f"http://{gateway_url}/productpage" fortio_proc = util.start_process(cmd, preexec_fn=os.setsid) return fortio_proc ############### FILTER RELATED FUNCTIONS ###################################### def build_filter(filter_dir): # TODO: Move this into a script in the filter dir log.info("Building filter...") cmd = "cargo +nightly build -Z unstable-options " cmd += "--target=wasm32-unknown-unknown --release " cmd += f"--out-dir {filter_dir}/wasm_bins " cmd += f"--target-dir {filter_dir}/target " cmd += f"--manifest-path {filter_dir}/Cargo.toml " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result # Also build the aggregation filter if it's not an empty or loop filter if 'rs-empty-filter' not in str(filter_dir) and 'rs-loop-filter' not in str(filter_dir): cmd = "cargo +nightly build -Z unstable-options " cmd += "--target=wasm32-unknown-unknown --release " cmd += f"--out-dir {filter_dir}/wasm_bins " cmd += f"--target-dir {filter_dir}/target " cmd += f"--manifest-path {filter_dir}/agg/Cargo.toml " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result log.info("Build successful!") return result def undeploy_filter(platform, multizonal): # delete the config map delete_config_map() cmd = f"kubectl delete -f {YAML_DIR}/filter.yaml " result = util.exec_process(cmd, allow_failures=True) if result != util.EXIT_SUCCESS: log.warning("Failed to delete the filter.") # restore the original bookinfo return deploy_application() def patch_application(): cmd = "kubectl get deploy -o name" deployments = util.get_output_from_proc(cmd).decode("utf-8").strip() deployments = deployments.split("\n") for depl in deployments: patch_cmd = f"kubectl patch {depl} " patch_cmd += f"--patch-file {YAML_DIR}/cm_patch.yaml " result = util.exec_process(patch_cmd) if result != util.EXIT_SUCCESS: log.error("Failed to patch %s.", depl) # we also patch storage patch_cmd = "kubectl patch -n storage deployment.apps/storage-upstream " patch_cmd += f"--patch-file {YAML_DIR}/cm_patch.yaml " result = util.exec_process(patch_cmd) if result != util.EXIT_SUCCESS: log.error("Failed to patch storage.") return result def create_conf_map(filter_dir): cmd = f"kubectl create configmap {CM_FILTER_NAME} " cmd += f"--from-file {filter_dir}/wasm_bins/filter.wasm " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: log.error("Failed to create config map.") return result # also refresh the aggregation filter cmd = f"kubectl -n storage create configmap {CM_FILTER_NAME} " cmd += f"--from-file {filter_dir}/wasm_bins/agg_filter.wasm " return util.exec_process(cmd) def delete_config_map(): cmd = f"kubectl delete configmap {CM_FILTER_NAME} " result = util.exec_process(cmd, allow_failures=True) if result != util.EXIT_SUCCESS: log.warning("Failed to delete the config map, it does not exist.") # repeat this process for stage cmd = f"kubectl delete -n storage configmap {CM_FILTER_NAME} " return util.exec_process(cmd, allow_failures=True) def update_conf_map(filter_dir): # delete the config map result = delete_config_map() if result != util.EXIT_SUCCESS: log.warning("Assuming a patch is required.") result = create_conf_map(filter_dir) if result != util.EXIT_SUCCESS: return result # update the containers with the config map return patch_application() # "refresh" the filter by recreating the config map return create_conf_map(filter_dir) def deploy_filter(filter_dir): # check if the config map already exists # we assume that if the config map does not exist in default # it also does not exist in storage cmd = f"kubectl get configmaps {CM_FILTER_NAME} " result = util.exec_process(cmd, allow_failures=True) if result == util.EXIT_SUCCESS: # Config map exists, assume that the deployment is already modded log.warning("Config map %s already exists!", CM_FILTER_NAME) # delete and recreate the config map return update_conf_map(filter_dir) # create the config map with the filter result = create_conf_map(filter_dir) if result != util.EXIT_SUCCESS: return result # update the containers with the config map result = patch_application() if result != util.EXIT_SUCCESS: return result # now activate the filter cmd = f"kubectl apply -f {YAML_DIR}/filter.yaml" return util.exec_process(cmd) def refresh_filter(filter_dir): start_time = time.time() # delete and recreate the config map update_conf_map(filter_dir) # activate the filter cmd = f"kubectl apply -f {YAML_DIR}/filter.yaml" result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result # this is equivalent to a deployment restart right now cmd = "kubectl rollout restart deployments --namespace=default" result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result # also reset storage since we are working with a different filter now cmd = "kubectl rollout restart deployment storage-upstream -n=storage " result = util.exec_process(cmd) if result != util.EXIT_SUCCESS: return result result = application_wait() if result != util.EXIT_SUCCESS: return result end_time = time.time() log.info("To update filter, took %d", end_time-start_time) with open("update_times.csv", 'a+') as csv_file: w = csv.writer(csv_file) w.writerow([end_time-start_time]) return application_wait() def handle_filter(args): if args.build_filter: return build_filter(args.filter_dir) if args.deploy_filter: return deploy_filter(args.filter_dir) if args.undeploy_filter: return undeploy_filter() if args.refresh_filter: return refresh_filter(args.filter_dir) log.warning("No command line input provided. Doing nothing.") return util.EXIT_SUCCESS ################### APPLICATION SPECIFIC FUNCTIONS ########################### def deploy_application(application): if check_kubernetes_status() != util.EXIT_SUCCESS: log.error("Kubernetes is not set up." " Did you run the deployment script?") sys.exit(util.EXIT_FAILURE) cmd = CONFIG_MATRIX[application]['deploy_cmd'] cmd += f" && {APPLY_CMD} {YAML_DIR}/storage.yaml && " cmd += f"{APPLY_CMD} {YAML_DIR}/istio-config.yaml && " cmd += f"{APPLY_CMD} {YAML_DIR}/root-cluster.yaml " result = util.exec_process(cmd) application_wait() return result def remove_application(application): cmd = CONFIG_MATRIX[application]['undeploy_cmd'] cmd += f" && {DELETE_CMD} {YAML_DIR}/storage.yaml && " cmd += f"{DELETE_CMD} {YAML_DIR}/root-cluster.yaml " result = util.exec_process(cmd) return result def setup_application_deployment(platform, multizonal, application): result = start_kubernetes(platform, multizonal, application) if result != util.EXIT_SUCCESS: return result result = inject_istio() if result != util.EXIT_SUCCESS: return result result = deploy_application(application) if result != util.EXIT_SUCCESS: return result return result def main(args): # single commands to execute if args.setup: return setup_application_deployment(args.platform, args.multizonal, args.application) if args.deploy_application: return deploy_application(args.application) if args.remove_application: return remove_application(args.application) if args.deploy_addons: return deploy_addons(args.deploy_addons) if args.remove_addons: return remove_addons(args.remove_addons) if args.clean: return stop_kubernetes(args.platform) if args.burst: return do_burst(args.platform) return handle_filter(args) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-l", "--log-file", dest="log_file", default="model.log", help="Specifies name of the log file.") parser.add_argument( "-ll", "--log-level", dest="log_level", default="INFO", choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"], help="The log level to choose.") parser.add_argument("-p", "--platform", dest="platform", default="KB", choices=["MK", "GCP"], help="Which platform to run the scripts on." "MK is minikube, GCP is Google Cloud Compute") parser.add_argument("-a", "--application", dest="application", default="BK", choices=["BK", "HR", "OB", "TT"], help="Which application to deploy." "BK is bookinfo, HR is hotel reservation, and OB is online boutique") parser.add_argument("-m", "--multi-zonal", dest="multizonal", action="store_true", help="If you are running on GCP," " do you want a multi-zone cluster?") parser.add_argument("-s", "--setup", dest="setup", action="store_true", help="Just do a deployment. " "This means installing the application and Kubernetes." " Do not run any experiments.") parser.add_argument("-c", "--clean", dest="clean", action="store_true", help="Clean up an existing deployment. ") parser.add_argument("-fd", "--filter-dir", dest="filter_dir", default=FILTER_DIR, help="The directory of the filter") parser.add_argument("-db", "--deploy-application", dest="deploy_application", action="store_true", help="Deploy the app. ") parser.add_argument("-rb", "--remove-application", dest="remove_application", action="store_true", help="remove the app. ") parser.add_argument("-bf", "--build-filter", dest="build_filter", action="store_true", help="Build the WASM filter. ") parser.add_argument("-df", "--deploy-filter", dest="deploy_filter", action="store_true", help="Deploy the WASM filter. ") parser.add_argument("-uf", "--undeploy-filter", dest="undeploy_filter", action="store_true", help="Remove the WASM filter. ") parser.add_argument("-rf", "--refresh-filter", dest="refresh_filter", action="store_true", help="Refresh the WASM filter. ") parser.add_argument("-b", "--burst", dest="burst", action="store_true", help="Burst with HTTP requests to cause" " congestion and queue buildup.") parser.add_argument("-da", "--deploy-addons", dest="deploy_addons", nargs="+", type=str, default=[], help="Deploy addons. ") parser.add_argument("-ra", "--remove-addons", dest="remove_addons", nargs="+", type=str, default=[], help="Remove addons. ") # Parse options and process argv arguments = parser.parse_args() # configure logging logging.basicConfig(filename=arguments.log_file, format="%(levelname)s:%(message)s", level=getattr(logging, arguments.log_level), filemode="w") stderr_log = logging.StreamHandler() stderr_log.setFormatter(logging.Formatter("%(levelname)s:%(message)s")) logging.getLogger().addHandler(stderr_log) sys.exit(main(arguments))
texteditorbase.py
from PIL import Image from PIL import ImageTk from tkinter import colorchooser, filedialog, messagebox, font import tkinter as tk from os.path import exists, split, splitext import platform if platform.system() == 'Linux': try: import cups except: pass elif platform.system() == 'Windows': try: from win32print import GetDefaultPrinter from win32api import ShellExecute except: pass from pickle import dump, load from time import sleep import webbrowser from threading import Thread from idlelib.colorizer import ColorDelegator, make_pat from idlelib.percolator import Percolator import re app_name = "SafGin Text" class SafGinText: def start(self): window = tk.Tk() base = TextEditorBase(window) base.texteditorbase() window.mainloop() # def _nw(self): # win = Tk() # base = TextEditorBase(win) # base.texteditorbase(False) # win.mainloop() # Base Class class TextEditorBase(SafGinText): def __init__(self,window): self.window = window self.__syntaxhighlight = False def texteditorbase(self): img = Image.open('media_file/sgtexteditor_iconphoto.png') img = img.resize((18, 18), Image.ANTIALIAS) self.img = ImageTk.PhotoImage(img) self.__startup_loader() self.__window_geometry() self.window.iconphoto(True,self.img) self.window.grid_rowconfigure(0, weight=1) self.window.grid_columnconfigure(0, weight=1) # string vars self.font_style = tk.StringVar() self.font_style.set(self.style) self.font_size = tk.StringVar() self.font_size.set(self.size) self.tripemp = tk.StringVar() # triple emphasis self.tripemp.set("None") self.statusL_text = tk.StringVar() self.statusL_text.set(f"{self.path}") # Text Frame self.bodyframe = tk.Frame(self.window) self.text = tk.Text(self.bodyframe, font=(self.style, self.size)) self.__startupopen() self.scrollbary = tk.Scrollbar(self.bodyframe, command=self.text.yview) # self.scrollbarx = tk.Scrollbar(self.bodyframe, command=self.text.xview, orient="horizontal") #self.scrollbarx.pack(side="bottom", fill="x") self.scrollbary.pack(side="right", fill="y") self.text.pack(expand=True, fill="both") self.text.config(yscrollcommand=self.scrollbary.set, undo=True,wrap="word",tabs=40) # xscrollcommand=self.scrollbarx.set self.bodyframe.grid(row=0,column=0,sticky="n"+"w"+"e"+"s") # bottom frame self.bottomframe = tk.Frame(self.window) self.status_label = tk.Label(self.bottomframe, textvariable=self.statusL_text) # creating main menu bar and menus self.menubar = tk.Menu(self.window, background="blue",fg="white") self.window.config(menu=self.menubar) self.window.config(menu=self.menubar) self.filemenu = tk.Menu(self.menubar, tearoff=0) self.editmenu = tk.Menu(self.menubar, tearoff=0) self.thememenu = tk.Menu(self.menubar, tearoff=0) self.helpmenu = tk.Menu(self.menubar, tearoff=0) # configs self.bottomframe.grid(row=1,column=0,sticky="n"+"w"+"e"+"s") self.status_label.pack(anchor="w",side="left") # File Menu self.menubar.add_cascade(label='File', menu=self.filemenu) self.filemenu.add_command(label="New", command=self.__new, accelerator="Ctrl+N") # self.filemenu.add_command(label="New Window", command=self._nw) self.filemenu.add_command(label='Open...', command=self.__fopen, accelerator="Ctrl+O") self.filemenu.add_command(label='Save', command=self.__fsave, accelerator="Ctrl+S") self.filemenu.add_command(label='Save As...', command=self.__fsave_as, accelerator="Ctrl+Shift+S") self.filemenu.add_separator() self.filemenu.add_command(label="Editor Settings", command=self.__es_window) self.filemenu.add_separator() self.filemenu.add_command(label='Print 🖶', command=self.__print_file, accelerator="Ctrl+P") self.filemenu.add_separator() self.filemenu.add_command(label="Exit", command=self.__on_closing, accelerator="Ctrl+Q") # Edit Menu self.menubar.add_cascade(label="Edit", menu=self.editmenu) self.editmenu.add_command(label="↶ Undo", command=self.__undo, accelerator="Ctrl+Z") self.editmenu.add_command(label="↷ Redo", command=self.__redo, accelerator="Ctrl+Y") self.editmenu.add_separator() self.editmenu.add_command(label="Cut", command=self.__cut, accelerator="Ctrl+X") self.editmenu.add_command(label="Copy", command=self.__copy, accelerator="Ctrl+C") self.editmenu.add_command(label="Paste", command=self.__paste, accelerator="Ctrl+V") self.editmenu.add_separator() self.editmenu.add_command(label="Select All", command=self.__selectall, accelerator="Ctrl+A") self.editmenu.add_command(label="Delete All", command=self.__delete_all, accelerator="Shift+Del") # Theme Menu self.menubar.add_cascade(label="Themes", menu=self.thememenu) self.thememenu.add_command(label="Light", command=lambda: self.__set_theme(0)) self.thememenu.add_command(label="Dark", command=lambda: self.__set_theme(1)) self.thememenu.add_command(label="Terminal", command=lambda: self.__set_theme(2)) # Help Menu self.menubar.add_cascade(label="Help", menu=self.helpmenu) self.helpmenu.add_command(label=f"About {app_name}", command=lambda: self.__about()) self.helpmenu.add_command(label="Version Info", command=self.__version_info) self.helpmenu.add_separator() self.helpmenu.add_command(label="Repository", command=lambda: webbrowser.open("https://github.com/SatzGOD/safgin-text")) self.helpmenu.add_separator() self.helpmenu.add_command(label="Report a problem ⚠", command=lambda: webbrowser.open("https://github.com/SatzGOD/safgin-text/issues/new")) self.__window_keybinds() # To update the state of text in text box(if it saved or not) Thread(target=self.__textfileactivity, daemon=True).start() # to set theme on startup self.__themeSwitcher() self.window.update() # to update idle tasks if any... # custom quit protocol self.window.protocol("WM_DELETE_WINDOW", self.__on_closing) # Helper Functions ................................................................................................ def __startup_loader(self): self.window_cords = {'w': None, 'h': None, 'x': None, 'y': None} try: with open('data', 'rb') as f: loadeddata = load(f) self.path = loadeddata['path'] self.theme = loadeddata['theme'] self.style = loadeddata['fontstyle'] self.size = loadeddata['fontsize'] self.window_cords['w'] = loadeddata['w'] self.window_cords['h'] = loadeddata['h'] self.window_cords['x'] = loadeddata['x'] self.window_cords['y'] = loadeddata['y'] except: # File Path self.path = "" # for themes self.theme = 0 self.size = "15" self.style = "Consolas" def __dumpjson_and_destroy(self): data = {'x': self.window.winfo_x(), 'y': self.window.winfo_y(), 'w': self.window.winfo_width(), 'h': self.window.winfo_height(), 'path': self.path, 'theme': self.theme, 'fontstyle': self.font_style.get(), 'fontsize': self.font_size.get()} with open('data', 'wb') as f: dump(data, f) self.window.destroy() def __syntax_highlighter(self): if self.path == "" or splitext(self.path)[-1] == '.txt': try: self.prec.close() self.__syntaxhighlight = False except: pass else: if not self.__syntaxhighlight: self.cdg = ColorDelegator() self.cdg.prog = re.compile(r'\b(?P<MYGROUP>tkinter)\b|' + make_pat(), re.S) self.cdg.idprog = re.compile(r'\s+(\w+)', re.S) self.cdg.tagdefs['MYGROUP'] = {'foreground': ''} self.cdg.tagdefs['COMMENT'] = {'foreground': 'grey'} self.cdg.tagdefs['KEYWORD'] = {'foreground': '#FD9622'} self.cdg.tagdefs['BUILTIN'] = {'foreground': '#A47EEA'} self.cdg.tagdefs['STRING'] = {'foreground': '#8DD12A'} self.cdg.tagdefs['DEFINITION'] = {'foreground': '#51CBEE'} self.prec = Percolator(self.text) self.prec.insertfilter(self.cdg) self.__syntaxhighlight = True else: pass def __on_closing(self): if exists(self.path): with open(self.path, 'r') as f: if f.read() != self.text.get(1.0, "end"): ask = messagebox.askyesnocancel(title="Quit", message=f"Do you want to save changes to this \n{self.path} File?") if ask == True: self.__fsave() self.__dumpjson_and_destroy() elif ask == False: self.__dumpjson_and_destroy() else: pass else: self.__dumpjson_and_destroy() elif self.text.get(1.0, "end") > " ": ask = messagebox.askyesnocancel(title="Quit", message=f"Do you want to save changes to this Untitled File?") if ask == True: self.__fsave() try: with open(self.path, 'r') as f: if f.read() == self.text.get(1.0, "end")[:-1]: self.__dumpjson_and_destroy() except: pass elif ask == False: self.__dumpjson_and_destroy() else: pass else: self.__dumpjson_and_destroy() # text file activity detector def __textfileactivity(self): while True: if exists(self.path): with open(self.path, 'rt') as f: if f.read() == self.text.get(1.0, "end"): self.window.title(f"{(split(self.path)[1])} - {app_name}") else: self.window.title(f"*{(split(self.path)[1])} - {app_name}") self.statusL_text.set(f"{self.path}") else: if self.text.get(1.0, "end") > " ": self.window.title(f"*Untitled - {app_name}") else: self.window.title(f"Untitled - {app_name}") sleep(0.1) # for smooth experience # window geometry setter def __window_geometry(self): window_width = self.window_cords['w'] if self.window_cords['w'] != None else 720 window_height = self.window_cords['h'] if self.window_cords['h'] != None else 480 x = self.window_cords['x'] if self.window_cords['w'] != None else int( (self.window.winfo_screenwidth() / 2) - (window_width / 2)) y = self.window_cords['y'] if self.window_cords['w'] != None else int( (self.window.winfo_screenheight() / 2) - (window_height / 2)) self.window.geometry("{}x{}+{}+{}".format(window_width, window_height, x, y)) def __window_keybinds(self): # key binds # To Zoom in and Zoom out the text self.window.bind("<Control-plus>", lambda _: self.__font_changer(self.font_size.set(str(int(self.font_size.get()) + 5))) if ( int(self.font_size.get()) < 120) else self.font_size.set(120)) # ctr + plus self.window.bind("<Control-minus>", lambda _: self.__font_changer(self.font_size.set(str(int(self.font_size.get()) - 5))) if ( int(self.font_size.get()) > 5) else self.font_size.set(5)) # ctr + minus self.window.bind("<Control-KP_Add>", lambda _: self.__font_changer(self.font_size.set(str(int(self.font_size.get()) + 5))) if ( int(self.font_size.get()) < 120) else self.font_size.set(120)) # ctr + plus self.window.bind("<Control-KP_Subtract>", lambda _: self.__font_changer(self.font_size.set(str(int(self.font_size.get()) - 5))) if ( int(self.font_size.get()) > 5) else self.font_size.set(5)) # ctr + minus # To Save self.window.bind("<Control-S>", lambda _: self.__fsave()) # ctr + S self.window.bind("<Control-s>", lambda _: self.__fsave()) # ctr + s # To Save as self.window.bind("<Control-Shift-S>", lambda _: self.__fsave_as()) # ctr + shift + S self.window.bind("<Control-Shift-s>", lambda _: self.__fsave_as()) # ctr + shift + s # To Open self.window.bind("<Control-O>", lambda _: self.__fopen()) # ctr + O self.window.bind("<Control-o>", lambda _: self.__fopen()) # ctr + o # To New self.window.bind("<Control-N>", lambda _: self.__new()) # ctr + N self.window.bind("<Control-n>", lambda _: self.__new()) # ctr + n # To Delete All self.window.bind("<Shift-Delete>", lambda _: self.__delete_all()) # shift + del # To Print self.window.bind("<Control-P>", lambda _: self.__print_file()) # ctr + P self.window.bind("<Control-p>", lambda _: self.__print_file()) # ctr + p # to quit self.window.bind("<Control-Q>", lambda _: self.__on_closing()) self.window.bind("<Control-q>", lambda _: self.__on_closing()) self.bottomframe.bind("<ButtonPress-1>",lambda _: self.window.attributes("-alpha", 0.4)) self.bottomframe.bind("<ButtonRelease-1>", lambda _: self.window.attributes("-alpha", 1.0)) self.__fullscreen = False def fullscreen(_): if not self.__fullscreen: self.window.attributes('-fullscreen', True) self.__fullscreen = True else: self.window.attributes('-fullscreen', False) self.__fullscreen = False self.window.bind("<F11>", fullscreen) def __new(self): self.text.config(undo=False) self.window.title(f"Untitled - {app_name}") self.__delete_all() self.path = "" self.text.config(undo=True) self.statusL_text.set("") self.__syntax_highlighter() def __fopen(self): self.text.config(undo=False) opath = filedialog.askopenfilename(title='Open File', filetypes=( ("text file", "*.txt"), ("all files", "*.*"), ("Python File", "*.py"), ("HTML File", "*.html"))) if exists(opath): with open(opath, 'r', encoding="utf8",errors="ignore") as f: self.__delete_all() self.text.insert(1.0, f.read()[:-1]) self.window.title(f"{(split(opath)[1])} - {app_name}") self.path = opath self.statusL_text.set(opath) self.text.config(undo=True) self.__syntax_highlighter() else: pass def __startupopen(self): if exists(self.path): with open(self.path, 'rt', encoding="utf8") as f: self.__delete_all() self.text.insert(1.0, f.read()[:-1]) self.window.title(f"{(split(self.path)[1])} - {app_name}") self.__syntax_highlighter() else: self.window.title("Untitled - TextEditor") self.__syntax_highlighter() # to save as a new file or save within an existing file def __fsave_as(self): spath = filedialog.asksaveasfile(title="Save as File",defaultextension=".txt", filetypes=( ("text File", "*.txt"), ("HTML File", "*.html"), ("Python File", "*.py"), ("all File", "*.*"))) if spath != None and exists(spath.name): filetext = self.text.get(1.0, "end") spath.write(filetext) spath.close() self.window.title(f"{(split(spath.name)[1])} - {app_name}") self.path = spath.name self.statusL_text.set(f"{self.path} (Saved)") self.__syntax_highlighter() else: pass def __fsave(self): if exists(self.path): with open(self.path, 'w') as f: filetext = self.text.get(1.0, "end") f.write(filetext) self.window.title(f"{(split(self.path)[1])} - {app_name}") self.statusL_text.set(f"{self.path} (Saved)") else: self.__fsave_as() def __print_file(self): if platform.system() == 'Windows': printer = GetDefaultPrinter() if printer: self.statusL_text.set(printer) ask = messagebox.askokcancel(title="Print", message=f"Click ok to print this file \n{self.path} ") if ask and exists(self.path): ShellExecute(0, "print", self.path, None, ".", 0) else: self.statusL_text.set("No Printer Available") messagebox.showwarning(title=f"{app_name}", message="Cannot Detect a printer:" "\nBe sure that your printer is connected properly and use " "Control Panel to verify that the printer is configured properly.") self.statusL_text.set(f"{self.path}") elif platform.system() == 'Linux': try: conn = cups.Connection() printer = conn.getPrinters() printer = printer.keys()[0] if printer: self.statusL_text.set(printer) ask = messagebox.askokcancel(title="Print", message=f"Click ok to print this file \n{self.path} ") if ask and exists(self.path): conn.printFile(printer, self.path, "print", options={'media': '216x280mm'}) except: messagebox.showerror(title="Print Error",message="failed to connect to server, Please make sure your device is connected to the printer!") def __cut(self): self.text.event_generate("<<Cut>>") # copy the selected text def __copy(self): self.text.event_generate("<<Copy>>") # paste the text from the clipboard def __paste(self): self.text.event_generate("<<Paste>>") # select all the text from the text box def __selectall(self): self.text.tag_add('sel', 1.0, "end") # delete all text from the text box def __delete_all(self): self.text.delete(1.0, "end") def __undo(self): try: self.text.edit_undo() except: pass def __redo(self): try: self.text.edit_redo() except: pass def __color_fchanger(self): fcolor = colorchooser.askcolor(title="Choose a color for font")[1] self.fcolorbutton.config(bg=fcolor) self.text.config(fg=fcolor) def __color_bchanger(self): bcolor = colorchooser.askcolor(title="Choose a color for paper")[1] self.bcolorbutton.config(bg=bcolor) self.text.config(bg=bcolor) def __font_changer(self, *args): self.text.config(font=(self.font_style.get(), self.font_size.get())) def __tripemp_func(self, *args): def __helper(style): if style in current_tag: self.text.tag_remove(style, "sel.first", "sel.last") self.tripemp.set("None") else: self.text.tag_add(style, "sel.first", "sel.last") try: if self.tripemp.get() == self.tripemp_list[0]: self.__selectall() bold_font = font.Font(self.text, self.text.cget("font")) bold_font.configure(weight="bold") self.text.tag_configure("bold", font=bold_font) current_tag = self.text.tag_names("sel.first") __helper("bold") elif self.tripemp.get() == self.tripemp_list[1]: self.__selectall() italic_font = font.Font(self.text, self.text.cget("font")) italic_font.configure(slant="italic") self.text.tag_configure("italic", font=italic_font) current_tag = self.text.tag_names("sel.first") __helper("italic") elif self.tripemp.get() == self.tripemp_list[2]: self.__selectall() underline_font = font.Font(self.text, self.text.cget("font")) underline_font.configure(underline=True) self.text.tag_configure("underline", font=underline_font) current_tag = self.text.tag_names("sel.first") __helper("underline") else: self.tripemp.set("None") except: self.tripemp.set("None") def __about(self): messagebox.showinfo(f"About {app_name}", "A python text editor application by Satz!\nSource Code at SatzGOD github or Click `Repository` in the Help Menu.\n" "\ninstagram: @satz_._") def __version_info(self): messagebox.showinfo(title="Version Info", message=f"\nAbout This Version:-" f"\n{app_name} v2.3.6 " f"\nWhat's New?\n" f"Click StatusBar to Transparent background and Added new F11 to full screen feature." f"\nMinor Changes:\n" f"Upgraded Editor Settings Window and Fixed Some bugs and glitches." ) def __es_window(self): self.tripemp_list = ["Bold", "Italics", "Underline"] self.filemenu.entryconfig(5, state="disabled") self.fw = tk.Toplevel() self.fw.overrideredirect(1) self.fw.attributes("-alpha", 0.75) self.fw.grid_rowconfigure(0,weight=1) self.fw.grid_columnconfigure(0, weight=1) self.eswtitle = Titlebar(self.fw,self.img,maximize=False,minimze=False,onhold=False,closef=self.__fwonclosing) self.eswtitle.grid(row=0,column=0,sticky="we") self.eswtitle.set_title("Editor Settings") self.fw.attributes('-topmost', True) self.fw.resizable(False, False) self.fw.title("Editor Settings") width, height = 300, 160 x = int((self.window.winfo_screenwidth() / 2) - (width / 2)) y = int((self.window.winfo_screenheight() / 2) - (height / 2)) self.fw.geometry(f"{width}x{height}-{x}+{y}") self.frame = tk.Frame(self.fw) self.l1 = tk.Label(self.frame, text="Font Family:") self.l1.grid(row=0, column=0, sticky="w", pady=3) self.stylebox = tk.OptionMenu(self.frame, self.font_style, *font.families(), command=self.__font_changer) self.stylebox.grid(row=0, column=1, sticky="e", pady=3) self.l2 = tk.Label(self.frame, text="Font Style:") self.l2.grid(row=1, column=0, sticky="w", pady=3) self.tripempbox = tk.OptionMenu(self.frame, self.tripemp, *self.tripemp_list, command=self.__tripemp_func) self.tripempbox.grid(row=1, column=1, sticky="w", pady=3) self.l3 = tk.Label(self.frame, text="Font Size:") self.l3.grid(row=2, column=0, sticky="w", pady=3) self.sizebox = tk.Spinbox(self.frame, from_=1, to_=120, textvariable=self.font_size, width=4, command=self.__font_changer) self.sizebox.grid(row=2, column=1, sticky="w", pady=3) self.fcolorbutton = tk.Button(self.frame, text="Font color", command=self.__color_fchanger) self.fcolorbutton.grid(row=3, column=0, sticky="w", pady=3, padx=2) self.bcolorbutton = tk.Button(self.frame, text="Paper color", command=self.__color_bchanger) self.bcolorbutton.grid(row=3, column=1, sticky="w", pady=3) self.frame.grid(row=1, column=0) self.__ts_esw() self.fw.mainloop() def __fwonclosing(self): self.filemenu.entryconfig(5, state="normal") self.fw.destroy() def __set_theme(self, newstate): self.theme = newstate self.__themeSwitcher() try: self.__ts_esw() except: pass def __themeSwitcher(self): if self.theme == 0: white = "#FCFDFD" defsyswhite = "#C7CCD1" black = "#000001" relief = "flat" highlightgrey = "#D9DDE0" font, size = "Consolas", "10" self.window.config(bg=defsyswhite) self.bottomframe.config(bg=defsyswhite) self.text.config(fg=black, bg=white, insertbackground=black) self.status_label.config(fg=black, bg=defsyswhite) self.menubar.config(bg=defsyswhite, fg=black, relief=relief, activebackground=highlightgrey, selectcolor=highlightgrey, font=(font, size), activeforeground=black) self.filemenu.config(bg=defsyswhite, fg=black, relief=relief, activebackground=highlightgrey, selectcolor=highlightgrey, font=(font, size), activeforeground=black) self.editmenu.config(bg=defsyswhite, fg=black, relief=relief, activebackground=highlightgrey, selectcolor=highlightgrey, font=(font, size), activeforeground=black) self.thememenu.config(bg=defsyswhite, fg=black, relief=relief, activebackground=highlightgrey, selectcolor=highlightgrey, font=(font, size), activeforeground=black) self.helpmenu.config(bg=defsyswhite, fg=black, relief=relief, activebackground=highlightgrey, selectcolor=highlightgrey, font=(font, size), activeforeground=black) elif self.theme == 1: white = "white" textwhite = '#ebebeb' sysdark = "#2E3238" darkgrey = "#2B2B2B" textbg = "#282923" lightgrey = "#414141" relief = "flat" font, size = "Consolas", "10" self.window.config(bg=sysdark) self.bottomframe.config(bg=sysdark) self.text.config(fg=textwhite, bg=textbg, insertbackground=white) self.status_label.config(bg=sysdark, fg=white) self.menubar.config(bg=darkgrey, fg=white, relief=relief, activebackground=lightgrey,activeforeground=white, selectcolor=lightgrey, font=(font, size)) self.filemenu.config(bg=darkgrey, fg=white, relief=relief, activebackground=lightgrey,activeforeground=white, selectcolor=lightgrey, font=(font, size)) self.editmenu.config(bg=darkgrey, fg=white, relief=relief, activebackground=lightgrey,activeforeground=white, selectcolor=lightgrey, font=(font, size)) self.thememenu.config(bg=darkgrey, fg=white, relief=relief, activebackground=lightgrey,activeforeground=white, selectcolor=lightgrey, font=(font, size)) self.helpmenu.config(bg=darkgrey, fg=white, relief=relief, activebackground=lightgrey,activeforeground=white, selectcolor=lightgrey, font=(font, size)) elif self.theme == 2: white = "#FFFFFF" yelow = "#BDBE00" black = "#000000" green = "#00BF00" relief = "flat" font, size = "Consolas", "10" self.window.config(bg=black) self.bottomframe.config(bg=black) self.text.config(fg=white, bg=black, insertbackground=white) self.status_label.config(bg=black, fg=yelow) self.menubar.config(bg=black, fg=white, relief=relief, activebackground=green,activeforeground=white, selectcolor=green, font=(font, size)) self.filemenu.config(bg=black, fg=white, relief=relief, activebackground=green,activeforeground=white, selectcolor=green, font=(font, size)) self.editmenu.config(bg=black, fg=white, relief=relief, activebackground=green,activeforeground=white, selectcolor=green, font=(font, size)) self.thememenu.config(bg=black, fg=white, relief=relief, activebackground=green,activeforeground=white, selectcolor=green, font=(font, size)) self.helpmenu.config(bg=black, fg=white, relief=relief, activebackground=green,activeforeground=white, selectcolor=green, font=(font, size)) def __ts_esw(self): if self.theme == 0: defsyswhite = "#C7CCD1" black = "#000001" relief = "groove" highlightgrey = "#D9DDE0" self.stylebox.config(fg=black, bg=defsyswhite, activebackground=highlightgrey, activeforeground=black, relief=relief, highlightthickness=False) self.sizebox.config(fg=black, bg=defsyswhite, relief=relief, highlightthickness=3, highlightbackground=defsyswhite) self.fcolorbutton.config(fg=black, bg=defsyswhite, activebackground=highlightgrey, activeforeground=black, relief=relief) self.bcolorbutton.config(fg=black, bg=defsyswhite, activebackground=highlightgrey, activeforeground=black, relief=relief) self.tripempbox.config(fg=black, bg=defsyswhite, activebackground=highlightgrey, activeforeground=black, relief=relief, highlightthickness=False) self.fw.config(bg=defsyswhite) self.frame.config(bg=defsyswhite) self.l1.config(bg=defsyswhite, fg=black) self.l2.config(bg=defsyswhite, fg=black) self.l3.config(bg=defsyswhite, fg=black) self.eswtitle.config(bg=defsyswhite,fg=black,abg=highlightgrey,afg=black) elif self.theme == 1: white = "white" darkgrey = "#2B2B2B" lightdarkgrey = "#2E3238" lightgrey = "#414141" relief = "groove" self.stylebox.config(bg=lightdarkgrey, fg=white, activebackground=lightgrey, activeforeground=white, relief=relief, highlightthickness=False) self.sizebox.config(bg=lightdarkgrey, fg=white, relief=relief, highlightthickness=3, highlightbackground=lightdarkgrey) self.fcolorbutton.config(bg=lightdarkgrey, fg=white, activebackground=lightgrey, activeforeground=white, relief=relief) self.bcolorbutton.config(bg=lightdarkgrey, fg=white, activebackground=lightgrey, activeforeground=white, relief=relief) self.tripempbox.config(bg=lightdarkgrey, fg=white, activebackground=lightgrey, activeforeground=white, relief=relief, highlightthickness=False) self.fw.config(bg=darkgrey) self.frame.config(bg=darkgrey) self.l1.config(bg=darkgrey, fg=white) self.l2.config(bg=darkgrey, fg=white) self.l3.config(bg=darkgrey, fg=white) self.eswtitle.config(bg=darkgrey, fg=white, abg=lightgrey, afg=white) elif self.theme == 2: white = "#FFFFFF" black = "#000000" green = "#00BF00" relief = "groove" self.stylebox.config(bg=black, fg=white, activebackground=green, activeforeground=white, relief=relief, highlightthickness=False) self.sizebox.config(bg=black, fg=white, relief=relief, highlightthickness=3, highlightbackground=black) self.fcolorbutton.config(bg=black, fg=white, activebackground=green, activeforeground=white, relief=relief) self.bcolorbutton.config(bg=black, fg=white, activebackground=green, activeforeground=white, relief=relief) self.tripempbox.config(bg=black, fg=white, activebackground=green, activeforeground=white, relief=relief, highlightthickness=False) self.fw.config(bg=black) self.frame.config(bg=black) self.l1.config(bg=black, fg=white) self.l2.config(bg=black, fg=white) self.l3.config(bg=black, fg=white) self.eswtitle.config(bg=black, fg=white, abg=green, afg=white) # Custom Title Bar class Titlebar: def __init__(self,master,icon,*,maximize=True,minimze=True,onhold=True,closef=None): self.master = master self.max = maximize self.mini = minimze self.onhold = onhold self.closef = closef self.__ovri = False self.title_bar = tk.Frame(master,bd=0,relief="flat") # self.title_bar.grid(row=0, column=0, sticky="nsew") self.title_bar.bind("<ButtonPress-1>", self.__start_move) self.title_bar.bind("<ButtonRelease-1>", self.__stop_move) self.title_bar.bind("<B1-Motion>",self.__move_window) self.title_bar.bind("<Map>",self.__screen_appear) self.master.bind("<Button-2>",self.__show_overrides) self.appicon = tk.Label(self.title_bar,image=icon) self.appicon.pack(side="left",anchor="w") self.close = tk.Button(self.title_bar, text="✕", relief="flat", height=1, width=4, font="consolas", bd=1, command=self.closef if self.closef != None else self.master.destroy) self.close.pack(anchor="e", side="right") if self.max: self.maxd = tk.Button(self.title_bar, text="🗖", relief="flat", height=1, width=4, font="consolas", bd=1, command=self.__maxd, activebackground="grey") self.maxd.pack(anchor="e", side="right") self.__maxdstate = None if self.mini: self.min = tk.Button(self.title_bar, text="―", relief="flat", height=1, width=4, font="consolas", comman=self.__min, bd=1, activebackground="grey") self.min.pack(anchor="e", side="right") self.title_label = tk.Label(self.title_bar,text=app_name) self.title_label.pack(side="left",anchor="w") def set_title(self,text): self.title_label.config(text=text) def grid(self,*args,**kwargs): self.title_bar.grid(*args,**kwargs) def pack(self,*args,**kwargs): self.title_bar.pack(*args,**kwargs) def place(self, *args, **kwargs): self.title_bar.place(*args,**kwargs) def __start_move(self, event): self.x = event.x self.y = event.y if self.onhold: self.master.attributes("-alpha", 0.75) def __stop_move(self, event): self.x = None self.y = None if self.onhold: self.master.attributes("-alpha", 1.0) def __show_overrides(self,_): if not self.__ovri: self.master.overrideredirect(0) self.master.deiconify() self.title_bar.grid_forget() self.__ovri = True else: self.master.overrideredirect(1) self.title_bar.grid(row=0,column=0,sticky="nsew") self.__ovri = False def __move_window(self,event): deltax = event.x - self.x deltay = event.y - self.y x = self.master.winfo_x() + deltax y = self.master.winfo_y() + deltay self.master.geometry(f"+{x}+{y}") def __maxd(self): if self.__maxdstate == None or not self.__maxdstate: self.px, self.py = self.master.winfo_x(), self.master.winfo_y() self.pw, self.ph = self.master.winfo_width(),self.master.winfo_height() w, h = self.master.winfo_screenwidth(), self.master.winfo_screenheight() self.master.geometry("%dx%d+0+0" % (w,h)) self.maxd.config(text="🗗") self.__maxdstate = True else: self.master.geometry(f"{self.pw}x{self.ph}+{self.px}+{self.py}") self.maxd.config(text="🗖") self.__maxdstate = False def __min(self): self.master.overrideredirect(0) self.master.iconify() def __screen_appear(self,_): # self.master.overrideredirect(1) pass def config(self,bg,fg,abg,afg): self.title_bar.config(bg=bg) self.appicon.config(bg=bg, fg=fg, activebackground=abg, activeforeground=afg) self.close.config(bg=bg, fg=fg, activebackground="red", activeforeground="white") self.title_label.config(bg=bg, fg=fg, activebackground=abg, activeforeground=afg) self.close.bind("<Enter>", lambda _: self.close.config(bg="red", fg="white")) self.close.bind("<Leave>", lambda _: self.close.config(bg=bg, fg=fg)) if self.max: self.maxd.config(bg=bg, fg=fg, activebackground=abg, activeforeground=afg) self.min.config(bg=bg, fg=fg, activebackground=abg, activeforeground=afg) self.maxd.bind("<Enter>", lambda _: self.maxd.config(bg="grey")) self.maxd.bind("<Leave>", lambda _: self.maxd.config(bg=bg)) if self.mini: self.min.bind("<Enter>", lambda _: self.min.config(bg="grey")) self.min.bind("<Leave>", lambda _: self.min.config(bg=bg))
test_io.py
"""Unit tests for io.py.""" from __future__ import print_function from __future__ import unicode_literals import os import sys import time import array import threading import random import unittest from itertools import cycle, count from test import test_support import codecs import io # The module under test class MockRawIO(io.RawIOBase): def __init__(self, read_stack=()): self._read_stack = list(read_stack) self._write_stack = [] def read(self, n=None): try: return self._read_stack.pop(0) except: return b"" def write(self, b): self._write_stack.append(b[:]) return len(b) def writable(self): return True def fileno(self): return 42 def readable(self): return True def seekable(self): return True def seek(self, pos, whence): pass def tell(self): return 42 class MockFileIO(io.BytesIO): def __init__(self, data): self.read_history = [] io.BytesIO.__init__(self, data) def read(self, n=None): res = io.BytesIO.read(self, n) self.read_history.append(None if res is None else len(res)) return res class MockNonBlockWriterIO(io.RawIOBase): def __init__(self, blocking_script): self._blocking_script = list(blocking_script) self._write_stack = [] def write(self, b): self._write_stack.append(b[:]) n = self._blocking_script.pop(0) if (n < 0): raise io.BlockingIOError(0, "test blocking", -n) else: return n def writable(self): return True class IOTest(unittest.TestCase): def tearDown(self): test_support.unlink(test_support.TESTFN) def write_ops(self, f): self.assertEqual(f.write(b"blah."), 5) f.truncate(0) self.assertEqual(f.tell(), 5) f.seek(0) self.assertEqual(f.write(b"blah."), 5) self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(b"Hello."), 6) self.assertEqual(f.tell(), 6) self.assertEqual(f.seek(-1, 1), 5) self.assertEqual(f.tell(), 5) self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9) self.assertEqual(f.seek(0), 0) self.assertEqual(f.write(b"h"), 1) self.assertEqual(f.seek(-1, 2), 13) self.assertEqual(f.tell(), 13) self.assertEqual(f.truncate(12), 12) self.assertEqual(f.tell(), 13) self.assertEqual(f.write(b"hij"), 3) self.assertEqual(f.seek(0,1), 16) self.assertEqual(f.tell(), 16) self.assertEqual(f.truncate(12), 12) self.assertRaises(TypeError, f.seek, 0.0) def read_ops(self, f, buffered=False): data = f.read(5) self.assertEqual(data, b"hello") data = bytearray(data) self.assertEqual(f.readinto(data), 5) self.assertEqual(data, b" worl") self.assertEqual(f.readinto(data), 2) self.assertEqual(len(data), 5) self.assertEqual(data[:2], b"d\n") self.assertEqual(f.seek(0), 0) self.assertEqual(f.read(20), b"hello world\n") self.assertEqual(f.read(1), b"") self.assertEqual(f.readinto(bytearray(b"x")), 0) self.assertEqual(f.seek(-6, 2), 6) self.assertEqual(f.read(5), b"world") self.assertEqual(f.read(0), b"") self.assertEqual(f.readinto(bytearray()), 0) self.assertEqual(f.seek(-6, 1), 5) self.assertEqual(f.read(5), b" worl") self.assertEqual(f.tell(), 10) f.seek(0) f.read(2) f.seek(0, 1) self.assertEqual(f.tell(), 2) self.assertRaises(TypeError, f.seek, 0.0) if buffered: f.seek(0) self.assertEqual(f.read(), b"hello world\n") f.seek(6) self.assertEqual(f.read(), b"world\n") self.assertEqual(f.read(), b"") LARGE = 2**31 def large_file_ops(self, f): assert f.readable() assert f.writable() self.assertEqual(f.seek(self.LARGE), self.LARGE) self.assertEqual(f.tell(), self.LARGE) self.assertEqual(f.write(b"xxx"), 3) self.assertEqual(f.tell(), self.LARGE + 3) self.assertEqual(f.seek(-1, 1), self.LARGE + 2) self.assertEqual(f.truncate(), self.LARGE + 2) self.assertEqual(f.tell(), self.LARGE + 2) self.assertEqual(f.seek(0, 2), self.LARGE + 2) self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1) self.assertEqual(f.tell(), self.LARGE + 2) self.assertEqual(f.seek(0, 2), self.LARGE + 1) self.assertEqual(f.seek(-1, 2), self.LARGE) self.assertEqual(f.read(2), b"x") def test_raw_file_io(self): f = io.open(test_support.TESTFN, "wb", buffering=0) self.assertEqual(f.readable(), False) self.assertEqual(f.writable(), True) self.assertEqual(f.seekable(), True) self.write_ops(f) f.close() f = io.open(test_support.TESTFN, "rb", buffering=0) self.assertEqual(f.readable(), True) self.assertEqual(f.writable(), False) self.assertEqual(f.seekable(), True) self.read_ops(f) f.close() def test_buffered_file_io(self): f = io.open(test_support.TESTFN, "wb") self.assertEqual(f.readable(), False) self.assertEqual(f.writable(), True) self.assertEqual(f.seekable(), True) self.write_ops(f) f.close() f = io.open(test_support.TESTFN, "rb") self.assertEqual(f.readable(), True) self.assertEqual(f.writable(), False) self.assertEqual(f.seekable(), True) self.read_ops(f, True) f = io.open(test_support.TESTFN, "r+b") self.assertEqual(f.readable(), True) self.assertEqual(f.writable(), True) self.assertEqual(f.seekable(), True) self.write_ops(f) f.seek(0) self.read_ops(f, True) f.close() def test_readline(self): f = io.open(test_support.TESTFN, "wb") f.write(b"abc\ndef\nxyzzy\nfoo") f.close() f = io.open(test_support.TESTFN, "rb") self.assertEqual(f.readline(), b"abc\n") self.assertEqual(f.readline(10), b"def\n") self.assertEqual(f.readline(2), b"xy") self.assertEqual(f.readline(4), b"zzy\n") self.assertEqual(f.readline(), b"foo") f.close() def test_raw_bytes_io(self): f = io.BytesIO() self.write_ops(f) data = f.getvalue() self.assertEqual(data, b"hello world\n") f = io.BytesIO(data) self.read_ops(f, True) def test_large_file_ops(self): # On Windows and Mac OSX this test comsumes large resources; It takes # a long time to build the >2GB file and takes >2GB of disk space # therefore the resource must be enabled to run this test. if sys.platform[:3] in ('win', 'os2') or sys.platform == 'darwin': if not test_support.is_resource_enabled("largefile"): print("\nTesting large file ops skipped on %s." % sys.platform, file=sys.stderr) print("It requires %d bytes and a long time." % self.LARGE, file=sys.stderr) print("Use 'regrtest.py -u largefile test_io' to run it.", file=sys.stderr) return f = io.open(test_support.TESTFN, "w+b", 0) self.large_file_ops(f) f.close() f = io.open(test_support.TESTFN, "w+b") self.large_file_ops(f) f.close() def test_with_open(self): for bufsize in (0, 1, 100): f = None with open(test_support.TESTFN, "wb", bufsize) as f: f.write(b"xxx") self.assertEqual(f.closed, True) f = None try: with open(test_support.TESTFN, "wb", bufsize) as f: 1 // 0 except ZeroDivisionError: self.assertEqual(f.closed, True) else: self.fail("1 // 0 didn't raise an exception") # issue 5008 def test_append_mode_tell(self): with io.open(test_support.TESTFN, "wb") as f: f.write(b"xxx") with io.open(test_support.TESTFN, "ab", buffering=0) as f: self.assertEqual(f.tell(), 3) with io.open(test_support.TESTFN, "ab") as f: self.assertEqual(f.tell(), 3) with io.open(test_support.TESTFN, "a") as f: self.assert_(f.tell() > 0) def test_destructor(self): record = [] class MyFileIO(io.FileIO): def __del__(self): record.append(1) io.FileIO.__del__(self) def close(self): record.append(2) io.FileIO.close(self) def flush(self): record.append(3) io.FileIO.flush(self) f = MyFileIO(test_support.TESTFN, "w") f.write("xxx") del f self.assertEqual(record, [1, 2, 3]) def test_close_flushes(self): f = io.open(test_support.TESTFN, "wb") f.write(b"xxx") f.close() f = io.open(test_support.TESTFN, "rb") self.assertEqual(f.read(), b"xxx") f.close() def XXXtest_array_writes(self): # XXX memory view not available yet a = array.array('i', range(10)) n = len(memoryview(a)) f = io.open(test_support.TESTFN, "wb", 0) self.assertEqual(f.write(a), n) f.close() f = io.open(test_support.TESTFN, "wb") self.assertEqual(f.write(a), n) f.close() def test_closefd(self): self.assertRaises(ValueError, io.open, test_support.TESTFN, 'w', closefd=False) def testReadClosed(self): with io.open(test_support.TESTFN, "w") as f: f.write("egg\n") with io.open(test_support.TESTFN, "r") as f: file = io.open(f.fileno(), "r", closefd=False) self.assertEqual(file.read(), "egg\n") file.seek(0) file.close() self.assertRaises(ValueError, file.read) def test_no_closefd_with_filename(self): # can't use closefd in combination with a file name self.assertRaises(ValueError, io.open, test_support.TESTFN, "r", closefd=False) def test_closefd_attr(self): with io.open(test_support.TESTFN, "wb") as f: f.write(b"egg\n") with io.open(test_support.TESTFN, "r") as f: self.assertEqual(f.buffer.raw.closefd, True) file = io.open(f.fileno(), "r", closefd=False) self.assertEqual(file.buffer.raw.closefd, False) def test_flush_error_on_close(self): f = io.open(test_support.TESTFN, "wb", buffering=0) def bad_flush(): raise IOError() f.flush = bad_flush self.assertRaises(IOError, f.close) # exception not swallowed def test_multi_close(self): f = io.open(test_support.TESTFN, "wb", buffering=0) f.close() f.close() f.close() self.assertRaises(ValueError, f.flush) class MemorySeekTestMixin: def testInit(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) def testRead(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) self.assertEquals(buf[:1], bytesIo.read(1)) self.assertEquals(buf[1:5], bytesIo.read(4)) self.assertEquals(buf[5:], bytesIo.read(900)) self.assertEquals(self.EOF, bytesIo.read()) def testReadNoArgs(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) self.assertEquals(buf, bytesIo.read()) self.assertEquals(self.EOF, bytesIo.read()) def testSeek(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) bytesIo.read(5) bytesIo.seek(0) self.assertEquals(buf, bytesIo.read()) bytesIo.seek(3) self.assertEquals(buf[3:], bytesIo.read()) self.assertRaises(TypeError, bytesIo.seek, 0.0) def testTell(self): buf = self.buftype("1234567890") bytesIo = self.ioclass(buf) self.assertEquals(0, bytesIo.tell()) bytesIo.seek(5) self.assertEquals(5, bytesIo.tell()) bytesIo.seek(10000) self.assertEquals(10000, bytesIo.tell()) class BytesIOTest(MemorySeekTestMixin, unittest.TestCase): @staticmethod def buftype(s): return s.encode("utf-8") ioclass = io.BytesIO EOF = b"" class StringIOTest(MemorySeekTestMixin, unittest.TestCase): buftype = str ioclass = io.StringIO EOF = "" class BufferedReaderTest(unittest.TestCase): def testRead(self): rawio = MockRawIO((b"abc", b"d", b"efg")) bufio = io.BufferedReader(rawio) self.assertEquals(b"abcdef", bufio.read(6)) def testBuffering(self): data = b"abcdefghi" dlen = len(data) tests = [ [ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ], [ 100, [ 3, 3, 3], [ dlen ] ], [ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ], ] for bufsize, buf_read_sizes, raw_read_sizes in tests: rawio = MockFileIO(data) bufio = io.BufferedReader(rawio, buffer_size=bufsize) pos = 0 for nbytes in buf_read_sizes: self.assertEquals(bufio.read(nbytes), data[pos:pos+nbytes]) pos += nbytes self.assertEquals(rawio.read_history, raw_read_sizes) def testReadNonBlocking(self): # Inject some None's in there to simulate EWOULDBLOCK rawio = MockRawIO((b"abc", b"d", None, b"efg", None, None)) bufio = io.BufferedReader(rawio) self.assertEquals(b"abcd", bufio.read(6)) self.assertEquals(b"e", bufio.read(1)) self.assertEquals(b"fg", bufio.read()) self.assert_(None is bufio.read()) self.assertEquals(b"", bufio.read()) def testReadToEof(self): rawio = MockRawIO((b"abc", b"d", b"efg")) bufio = io.BufferedReader(rawio) self.assertEquals(b"abcdefg", bufio.read(9000)) def testReadNoArgs(self): rawio = MockRawIO((b"abc", b"d", b"efg")) bufio = io.BufferedReader(rawio) self.assertEquals(b"abcdefg", bufio.read()) def testFileno(self): rawio = MockRawIO((b"abc", b"d", b"efg")) bufio = io.BufferedReader(rawio) self.assertEquals(42, bufio.fileno()) def testFilenoNoFileno(self): # XXX will we always have fileno() function? If so, kill # this test. Else, write it. pass def testThreads(self): try: # Write out many bytes with exactly the same number of 0's, # 1's... 255's. This will help us check that concurrent reading # doesn't duplicate or forget contents. N = 1000 l = range(256) * N random.shuffle(l) s = bytes(bytearray(l)) with io.open(test_support.TESTFN, "wb") as f: f.write(s) with io.open(test_support.TESTFN, "rb", buffering=0) as raw: bufio = io.BufferedReader(raw, 8) errors = [] results = [] def f(): try: # Intra-buffer read then buffer-flushing read for n in cycle([1, 19]): s = bufio.read(n) if not s: break # list.append() is atomic results.append(s) except Exception as e: errors.append(e) raise threads = [threading.Thread(target=f) for x in range(20)] for t in threads: t.start() time.sleep(0.02) # yield for t in threads: t.join() self.assertFalse(errors, "the following exceptions were caught: %r" % errors) s = b''.join(results) for i in range(256): c = bytes(bytearray([i])) self.assertEqual(s.count(c), N) finally: test_support.unlink(test_support.TESTFN) class BufferedWriterTest(unittest.TestCase): def testWrite(self): # Write to the buffered IO but don't overflow the buffer. writer = MockRawIO() bufio = io.BufferedWriter(writer, 8) bufio.write(b"abc") self.assertFalse(writer._write_stack) def testWriteOverflow(self): writer = MockRawIO() bufio = io.BufferedWriter(writer, 8) bufio.write(b"abc") bufio.write(b"defghijkl") self.assertEquals(b"abcdefghijkl", writer._write_stack[0]) def testWriteNonBlocking(self): raw = MockNonBlockWriterIO((9, 2, 10, -6, 10, 8, 12)) bufio = io.BufferedWriter(raw, 8, 16) bufio.write(b"asdf") bufio.write(b"asdfa") self.assertEquals(b"asdfasdfa", raw._write_stack[0]) bufio.write(b"asdfasdfasdf") self.assertEquals(b"asdfasdfasdf", raw._write_stack[1]) bufio.write(b"asdfasdfasdf") self.assertEquals(b"dfasdfasdf", raw._write_stack[2]) self.assertEquals(b"asdfasdfasdf", raw._write_stack[3]) bufio.write(b"asdfasdfasdf") # XXX I don't like this test. It relies too heavily on how the # algorithm actually works, which we might change. Refactor # later. def testFileno(self): rawio = MockRawIO((b"abc", b"d", b"efg")) bufio = io.BufferedWriter(rawio) self.assertEquals(42, bufio.fileno()) def testFlush(self): writer = MockRawIO() bufio = io.BufferedWriter(writer, 8) bufio.write(b"abc") bufio.flush() self.assertEquals(b"abc", writer._write_stack[0]) def testThreads(self): # BufferedWriter should not raise exceptions or crash # when called from multiple threads. try: # We use a real file object because it allows us to # exercise situations where the GIL is released before # writing the buffer to the raw streams. This is in addition # to concurrency issues due to switching threads in the middle # of Python code. with io.open(test_support.TESTFN, "wb", buffering=0) as raw: bufio = io.BufferedWriter(raw, 8) errors = [] def f(): try: # Write enough bytes to flush the buffer s = b"a" * 19 for i in range(50): bufio.write(s) except Exception as e: errors.append(e) raise threads = [threading.Thread(target=f) for x in range(20)] for t in threads: t.start() time.sleep(0.02) # yield for t in threads: t.join() self.assertFalse(errors, "the following exceptions were caught: %r" % errors) finally: test_support.unlink(test_support.TESTFN) def test_flush_error_on_close(self): raw = MockRawIO() def bad_flush(): raise IOError() raw.flush = bad_flush b = io.BufferedWriter(raw) self.assertRaises(IOError, b.close) # exception not swallowed def test_multi_close(self): raw = MockRawIO() b = io.BufferedWriter(raw) b.close() b.close() b.close() self.assertRaises(ValueError, b.flush) class BufferedRWPairTest(unittest.TestCase): def testRWPair(self): r = MockRawIO(()) w = MockRawIO() pair = io.BufferedRWPair(r, w) self.assertFalse(pair.closed) # XXX More Tests class BufferedRandomTest(unittest.TestCase): def testReadAndWrite(self): raw = MockRawIO((b"asdf", b"ghjk")) rw = io.BufferedRandom(raw, 8, 12) self.assertEqual(b"as", rw.read(2)) rw.write(b"ddd") rw.write(b"eee") self.assertFalse(raw._write_stack) # Buffer writes self.assertEqual(b"ghjk", rw.read()) # This read forces write flush self.assertEquals(b"dddeee", raw._write_stack[0]) def testSeekAndTell(self): raw = io.BytesIO(b"asdfghjkl") rw = io.BufferedRandom(raw) self.assertEquals(b"as", rw.read(2)) self.assertEquals(2, rw.tell()) rw.seek(0, 0) self.assertEquals(b"asdf", rw.read(4)) rw.write(b"asdf") rw.seek(0, 0) self.assertEquals(b"asdfasdfl", rw.read()) self.assertEquals(9, rw.tell()) rw.seek(-4, 2) self.assertEquals(5, rw.tell()) rw.seek(2, 1) self.assertEquals(7, rw.tell()) self.assertEquals(b"fl", rw.read(11)) self.assertRaises(TypeError, rw.seek, 0.0) # To fully exercise seek/tell, the StatefulIncrementalDecoder has these # properties: # - A single output character can correspond to many bytes of input. # - The number of input bytes to complete the character can be # undetermined until the last input byte is received. # - The number of input bytes can vary depending on previous input. # - A single input byte can correspond to many characters of output. # - The number of output characters can be undetermined until the # last input byte is received. # - The number of output characters can vary depending on previous input. class StatefulIncrementalDecoder(codecs.IncrementalDecoder): """ For testing seek/tell behavior with a stateful, buffering decoder. Input is a sequence of words. Words may be fixed-length (length set by input) or variable-length (period-terminated). In variable-length mode, extra periods are ignored. Possible words are: - 'i' followed by a number sets the input length, I (maximum 99). When I is set to 0, words are space-terminated. - 'o' followed by a number sets the output length, O (maximum 99). - Any other word is converted into a word followed by a period on the output. The output word consists of the input word truncated or padded out with hyphens to make its length equal to O. If O is 0, the word is output verbatim without truncating or padding. I and O are initially set to 1. When I changes, any buffered input is re-scanned according to the new I. EOF also terminates the last word. """ def __init__(self, errors='strict'): codecs.IncrementalDecoder.__init__(self, errors) self.reset() def __repr__(self): return '<SID %x>' % id(self) def reset(self): self.i = 1 self.o = 1 self.buffer = bytearray() def getstate(self): i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset() return bytes(self.buffer), i*100 + o def setstate(self, state): buffer, io = state self.buffer = bytearray(buffer) i, o = divmod(io, 100) self.i, self.o = i ^ 1, o ^ 1 def decode(self, input, final=False): output = '' for b in input: if self.i == 0: # variable-length, terminated with period if b == '.': if self.buffer: output += self.process_word() else: self.buffer.append(b) else: # fixed-length, terminate after self.i bytes self.buffer.append(b) if len(self.buffer) == self.i: output += self.process_word() if final and self.buffer: # EOF terminates the last word output += self.process_word() return output def process_word(self): output = '' if self.buffer[0] == ord('i'): self.i = min(99, int(self.buffer[1:] or 0)) # set input length elif self.buffer[0] == ord('o'): self.o = min(99, int(self.buffer[1:] or 0)) # set output length else: output = self.buffer.decode('ascii') if len(output) < self.o: output += '-'*self.o # pad out with hyphens if self.o: output = output[:self.o] # truncate to output length output += '.' self.buffer = bytearray() return output codecEnabled = False @classmethod def lookupTestDecoder(cls, name): if cls.codecEnabled and name == 'test_decoder': latin1 = codecs.lookup('latin-1') return codecs.CodecInfo( name='test_decoder', encode=latin1.encode, decode=None, incrementalencoder=None, streamreader=None, streamwriter=None, incrementaldecoder=cls) # Register the previous decoder for testing. # Disabled by default, tests will enable it. codecs.register(StatefulIncrementalDecoder.lookupTestDecoder) class StatefulIncrementalDecoderTest(unittest.TestCase): """ Make sure the StatefulIncrementalDecoder actually works. """ test_cases = [ # I=1, O=1 (fixed-length input == fixed-length output) (b'abcd', False, 'a.b.c.d.'), # I=0, O=0 (variable-length input, variable-length output) (b'oiabcd', True, 'abcd.'), # I=0, O=0 (should ignore extra periods) (b'oi...abcd...', True, 'abcd.'), # I=0, O=6 (variable-length input, fixed-length output) (b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'), # I=2, O=6 (fixed-length input < fixed-length output) (b'i.i2.o6xyz', True, 'xy----.z-----.'), # I=6, O=3 (fixed-length input > fixed-length output) (b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'), # I=0, then 3; O=29, then 15 (with longer output) (b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True, 'a----------------------------.' + 'b----------------------------.' + 'cde--------------------------.' + 'abcdefghijabcde.' + 'a.b------------.' + '.c.------------.' + 'd.e------------.' + 'k--------------.' + 'l--------------.' + 'm--------------.') ] def testDecoder(self): # Try a few one-shot test cases. for input, eof, output in self.test_cases: d = StatefulIncrementalDecoder() self.assertEquals(d.decode(input, eof), output) # Also test an unfinished decode, followed by forcing EOF. d = StatefulIncrementalDecoder() self.assertEquals(d.decode(b'oiabcd'), '') self.assertEquals(d.decode(b'', 1), 'abcd.') def test_append_bom(self): # The BOM is not written again when appending to a non-empty file filename = test_support.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with io.open(filename, 'w', encoding=charset) as f: f.write('aaa') pos = f.tell() with io.open(filename, 'rb') as f: self.assertEquals(f.read(), 'aaa'.encode(charset)) with io.open(filename, 'a', encoding=charset) as f: f.write('xxx') with io.open(filename, 'rb') as f: self.assertEquals(f.read(), 'aaaxxx'.encode(charset)) def test_seek_bom(self): # Same test, but when seeking manually filename = test_support.TESTFN for charset in ('utf-8-sig', 'utf-16', 'utf-32'): with io.open(filename, 'w', encoding=charset) as f: f.write('aaa') pos = f.tell() with io.open(filename, 'r+', encoding=charset) as f: f.seek(pos) f.write('zzz') f.seek(0) f.write('bbb') with io.open(filename, 'rb') as f: self.assertEquals(f.read(), 'bbbzzz'.encode(charset)) class TextIOWrapperTest(unittest.TestCase): def setUp(self): self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n" self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii") def tearDown(self): test_support.unlink(test_support.TESTFN) def testLineBuffering(self): r = io.BytesIO() b = io.BufferedWriter(r, 1000) t = io.TextIOWrapper(b, newline="\n", line_buffering=True) t.write(u"X") self.assertEquals(r.getvalue(), b"") # No flush happened t.write(u"Y\nZ") self.assertEquals(r.getvalue(), b"XY\nZ") # All got flushed t.write(u"A\rB") self.assertEquals(r.getvalue(), b"XY\nZA\rB") def testEncodingErrorsReading(self): # (1) default b = io.BytesIO(b"abc\n\xff\n") t = io.TextIOWrapper(b, encoding="ascii") self.assertRaises(UnicodeError, t.read) # (2) explicit strict b = io.BytesIO(b"abc\n\xff\n") t = io.TextIOWrapper(b, encoding="ascii", errors="strict") self.assertRaises(UnicodeError, t.read) # (3) ignore b = io.BytesIO(b"abc\n\xff\n") t = io.TextIOWrapper(b, encoding="ascii", errors="ignore") self.assertEquals(t.read(), "abc\n\n") # (4) replace b = io.BytesIO(b"abc\n\xff\n") t = io.TextIOWrapper(b, encoding="ascii", errors="replace") self.assertEquals(t.read(), u"abc\n\ufffd\n") def testEncodingErrorsWriting(self): # (1) default b = io.BytesIO() t = io.TextIOWrapper(b, encoding="ascii") self.assertRaises(UnicodeError, t.write, u"\xff") # (2) explicit strict b = io.BytesIO() t = io.TextIOWrapper(b, encoding="ascii", errors="strict") self.assertRaises(UnicodeError, t.write, u"\xff") # (3) ignore b = io.BytesIO() t = io.TextIOWrapper(b, encoding="ascii", errors="ignore", newline="\n") t.write(u"abc\xffdef\n") t.flush() self.assertEquals(b.getvalue(), b"abcdef\n") # (4) replace b = io.BytesIO() t = io.TextIOWrapper(b, encoding="ascii", errors="replace", newline="\n") t.write(u"abc\xffdef\n") t.flush() self.assertEquals(b.getvalue(), b"abc?def\n") def testNewlinesInput(self): testdata = b"AAA\nBBB\nCCC\rDDD\rEEE\r\nFFF\r\nGGG" normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n") for newline, expected in [ (None, normalized.decode("ascii").splitlines(True)), ("", testdata.decode("ascii").splitlines(True)), ("\n", ["AAA\n", "BBB\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]), ("\r\n", ["AAA\nBBB\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]), ("\r", ["AAA\nBBB\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]), ]: buf = io.BytesIO(testdata) txt = io.TextIOWrapper(buf, encoding="ascii", newline=newline) self.assertEquals(txt.readlines(), expected) txt.seek(0) self.assertEquals(txt.read(), "".join(expected)) def testNewlinesOutput(self): testdict = { "": b"AAA\nBBB\nCCC\nX\rY\r\nZ", "\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ", "\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ", "\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ", } tests = [(None, testdict[os.linesep])] + sorted(testdict.items()) for newline, expected in tests: buf = io.BytesIO() txt = io.TextIOWrapper(buf, encoding="ascii", newline=newline) txt.write("AAA\nB") txt.write("BB\nCCC\n") txt.write("X\rY\r\nZ") txt.flush() self.assertEquals(buf.closed, False) self.assertEquals(buf.getvalue(), expected) def testNewlines(self): input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ] tests = [ [ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ], [ '', input_lines ], [ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ], [ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ], [ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ], ] encodings = ( 'utf-8', 'latin-1', 'utf-16', 'utf-16-le', 'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be', ) # Try a range of buffer sizes to test the case where \r is the last # character in TextIOWrapper._pending_line. for encoding in encodings: # XXX: str.encode() should return bytes data = bytes(''.join(input_lines).encode(encoding)) for do_reads in (False, True): for bufsize in range(1, 10): for newline, exp_lines in tests: bufio = io.BufferedReader(io.BytesIO(data), bufsize) textio = io.TextIOWrapper(bufio, newline=newline, encoding=encoding) if do_reads: got_lines = [] while True: c2 = textio.read(2) if c2 == '': break self.assertEquals(len(c2), 2) got_lines.append(c2 + textio.readline()) else: got_lines = list(textio) for got_line, exp_line in zip(got_lines, exp_lines): self.assertEquals(got_line, exp_line) self.assertEquals(len(got_lines), len(exp_lines)) def testNewlinesInput(self): testdata = b"AAA\nBBB\nCCC\rDDD\rEEE\r\nFFF\r\nGGG" normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n") for newline, expected in [ (None, normalized.decode("ascii").splitlines(True)), ("", testdata.decode("ascii").splitlines(True)), ("\n", ["AAA\n", "BBB\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]), ("\r\n", ["AAA\nBBB\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]), ("\r", ["AAA\nBBB\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]), ]: buf = io.BytesIO(testdata) txt = io.TextIOWrapper(buf, encoding="ascii", newline=newline) self.assertEquals(txt.readlines(), expected) txt.seek(0) self.assertEquals(txt.read(), "".join(expected)) def testNewlinesOutput(self): data = u"AAA\nBBB\rCCC\n" data_lf = b"AAA\nBBB\rCCC\n" data_cr = b"AAA\rBBB\rCCC\r" data_crlf = b"AAA\r\nBBB\rCCC\r\n" save_linesep = os.linesep try: for os.linesep, newline, expected in [ ("\n", None, data_lf), ("\r\n", None, data_crlf), ("\n", "", data_lf), ("\r\n", "", data_lf), ("\n", "\n", data_lf), ("\r\n", "\n", data_lf), ("\n", "\r", data_cr), ("\r\n", "\r", data_cr), ("\n", "\r\n", data_crlf), ("\r\n", "\r\n", data_crlf), ]: buf = io.BytesIO() txt = io.TextIOWrapper(buf, encoding="ascii", newline=newline) txt.write(data) txt.close() self.assertEquals(buf.closed, True) self.assertRaises(ValueError, buf.getvalue) finally: os.linesep = save_linesep # Systematic tests of the text I/O API def testBasicIO(self): for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65): for enc in "ascii", "latin1", "utf8" :# , "utf-16-be", "utf-16-le": f = io.open(test_support.TESTFN, "w+", encoding=enc) f._CHUNK_SIZE = chunksize self.assertEquals(f.write(u"abc"), 3) f.close() f = io.open(test_support.TESTFN, "r+", encoding=enc) f._CHUNK_SIZE = chunksize self.assertEquals(f.tell(), 0) self.assertEquals(f.read(), u"abc") cookie = f.tell() self.assertEquals(f.seek(0), 0) self.assertEquals(f.read(2), u"ab") self.assertEquals(f.read(1), u"c") self.assertEquals(f.read(1), u"") self.assertEquals(f.read(), u"") self.assertEquals(f.tell(), cookie) self.assertEquals(f.seek(0), 0) self.assertEquals(f.seek(0, 2), cookie) self.assertEquals(f.write(u"def"), 3) self.assertEquals(f.seek(cookie), cookie) self.assertEquals(f.read(), u"def") if enc.startswith("utf"): self.multi_line_test(f, enc) f.close() def multi_line_test(self, f, enc): f.seek(0) f.truncate() sample = u"s\xff\u0fff\uffff" wlines = [] for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000): chars = [] for i in range(size): chars.append(sample[i % len(sample)]) line = u"".join(chars) + u"\n" wlines.append((f.tell(), line)) f.write(line) f.seek(0) rlines = [] while True: pos = f.tell() line = f.readline() if not line: break rlines.append((pos, line)) self.assertEquals(rlines, wlines) def testTelling(self): f = io.open(test_support.TESTFN, "w+", encoding="utf8") p0 = f.tell() f.write(u"\xff\n") p1 = f.tell() f.write(u"\xff\n") p2 = f.tell() f.seek(0) self.assertEquals(f.tell(), p0) self.assertEquals(f.readline(), u"\xff\n") self.assertEquals(f.tell(), p1) self.assertEquals(f.readline(), u"\xff\n") self.assertEquals(f.tell(), p2) f.seek(0) for line in f: self.assertEquals(line, u"\xff\n") self.assertRaises(IOError, f.tell) self.assertEquals(f.tell(), p2) f.close() def testSeeking(self): chunk_size = io.TextIOWrapper._CHUNK_SIZE prefix_size = chunk_size - 2 u_prefix = "a" * prefix_size prefix = bytes(u_prefix.encode("utf-8")) self.assertEquals(len(u_prefix), len(prefix)) u_suffix = "\u8888\n" suffix = bytes(u_suffix.encode("utf-8")) line = prefix + suffix f = io.open(test_support.TESTFN, "wb") f.write(line*2) f.close() f = io.open(test_support.TESTFN, "r", encoding="utf-8") s = f.read(prefix_size) self.assertEquals(s, unicode(prefix, "ascii")) self.assertEquals(f.tell(), prefix_size) self.assertEquals(f.readline(), u_suffix) def testSeekingToo(self): # Regression test for a specific bug data = b'\xe0\xbf\xbf\n' f = io.open(test_support.TESTFN, "wb") f.write(data) f.close() f = io.open(test_support.TESTFN, "r", encoding="utf-8") f._CHUNK_SIZE # Just test that it exists f._CHUNK_SIZE = 2 f.readline() f.tell() def testSeekAndTell(self): """Test seek/tell using the StatefulIncrementalDecoder.""" def testSeekAndTellWithData(data, min_pos=0): """Tell/seek to various points within a data stream and ensure that the decoded data returned by read() is consistent.""" f = io.open(test_support.TESTFN, 'wb') f.write(data) f.close() f = io.open(test_support.TESTFN, encoding='test_decoder') decoded = f.read() f.close() for i in range(min_pos, len(decoded) + 1): # seek positions for j in [1, 5, len(decoded) - i]: # read lengths f = io.open(test_support.TESTFN, encoding='test_decoder') self.assertEquals(f.read(i), decoded[:i]) cookie = f.tell() self.assertEquals(f.read(j), decoded[i:i + j]) f.seek(cookie) self.assertEquals(f.read(), decoded[i:]) f.close() # Enable the test decoder. StatefulIncrementalDecoder.codecEnabled = 1 # Run the tests. try: # Try each test case. for input, _, _ in StatefulIncrementalDecoderTest.test_cases: testSeekAndTellWithData(input) # Position each test case so that it crosses a chunk boundary. CHUNK_SIZE = io.TextIOWrapper._CHUNK_SIZE for input, _, _ in StatefulIncrementalDecoderTest.test_cases: offset = CHUNK_SIZE - len(input)//2 prefix = b'.'*offset # Don't bother seeking into the prefix (takes too long). min_pos = offset*2 testSeekAndTellWithData(prefix + input, min_pos) # Ensure our test decoder won't interfere with subsequent tests. finally: StatefulIncrementalDecoder.codecEnabled = 0 def testEncodedWrites(self): data = u"1234567890" tests = ("utf-16", "utf-16-le", "utf-16-be", "utf-32", "utf-32-le", "utf-32-be") for encoding in tests: buf = io.BytesIO() f = io.TextIOWrapper(buf, encoding=encoding) # Check if the BOM is written only once (see issue1753). f.write(data) f.write(data) f.seek(0) self.assertEquals(f.read(), data * 2) self.assertEquals(buf.getvalue(), (data * 2).encode(encoding)) def timingTest(self): timer = time.time enc = "utf8" line = "\0\x0f\xff\u0fff\uffff\U000fffff\U0010ffff"*3 + "\n" nlines = 10000 nchars = len(line) nbytes = len(line.encode(enc)) for chunk_size in (32, 64, 128, 256): f = io.open(test_support.TESTFN, "w+", encoding=enc) f._CHUNK_SIZE = chunk_size t0 = timer() for i in range(nlines): f.write(line) f.flush() t1 = timer() f.seek(0) for line in f: pass t2 = timer() f.seek(0) while f.readline(): pass t3 = timer() f.seek(0) while f.readline(): f.tell() t4 = timer() f.close() if test_support.verbose: print("\nTiming test: %d lines of %d characters (%d bytes)" % (nlines, nchars, nbytes)) print("File chunk size: %6s" % f._CHUNK_SIZE) print("Writing: %6.3f seconds" % (t1-t0)) print("Reading using iteration: %6.3f seconds" % (t2-t1)) print("Reading using readline(): %6.3f seconds" % (t3-t2)) print("Using readline()+tell(): %6.3f seconds" % (t4-t3)) def testReadOneByOne(self): txt = io.TextIOWrapper(io.BytesIO(b"AA\r\nBB")) reads = "" while True: c = txt.read(1) if not c: break reads += c self.assertEquals(reads, "AA\nBB") # read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128. def testReadByChunk(self): # make sure "\r\n" straddles 128 char boundary. txt = io.TextIOWrapper(io.BytesIO(b"A" * 127 + b"\r\nB")) reads = "" while True: c = txt.read(128) if not c: break reads += c self.assertEquals(reads, "A"*127+"\nB") def test_issue1395_1(self): txt = io.TextIOWrapper(io.BytesIO(self.testdata), encoding="ascii") # read one char at a time reads = "" while True: c = txt.read(1) if not c: break reads += c self.assertEquals(reads, self.normalized) def test_issue1395_2(self): txt = io.TextIOWrapper(io.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = "" while True: c = txt.read(4) if not c: break reads += c self.assertEquals(reads, self.normalized) def test_issue1395_3(self): txt = io.TextIOWrapper(io.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) reads += txt.read(4) reads += txt.readline() reads += txt.readline() reads += txt.readline() self.assertEquals(reads, self.normalized) def test_issue1395_4(self): txt = io.TextIOWrapper(io.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) reads += txt.read() self.assertEquals(reads, self.normalized) def test_issue1395_5(self): txt = io.TextIOWrapper(io.BytesIO(self.testdata), encoding="ascii") txt._CHUNK_SIZE = 4 reads = txt.read(4) pos = txt.tell() txt.seek(0) txt.seek(pos) self.assertEquals(txt.read(4), "BBB\n") def test_issue2282(self): buffer = io.BytesIO(self.testdata) txt = io.TextIOWrapper(buffer, encoding="ascii") self.assertEqual(buffer.seekable(), txt.seekable()) def check_newline_decoder_utf8(self, decoder): # UTF-8 specific tests for a newline decoder def _check_decode(b, s, **kwargs): # We exercise getstate() / setstate() as well as decode() state = decoder.getstate() self.assertEquals(decoder.decode(b, **kwargs), s) decoder.setstate(state) self.assertEquals(decoder.decode(b, **kwargs), s) _check_decode(b'\xe8\xa2\x88', "\u8888") _check_decode(b'\xe8', "") _check_decode(b'\xa2', "") _check_decode(b'\x88', "\u8888") _check_decode(b'\xe8', "") _check_decode(b'\xa2', "") _check_decode(b'\x88', "\u8888") _check_decode(b'\xe8', "") self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True) decoder.reset() _check_decode(b'\n', "\n") _check_decode(b'\r', "") _check_decode(b'', "\n", final=True) _check_decode(b'\r', "\n", final=True) _check_decode(b'\r', "") _check_decode(b'a', "\na") _check_decode(b'\r\r\n', "\n\n") _check_decode(b'\r', "") _check_decode(b'\r', "\n") _check_decode(b'\na', "\na") _check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n") _check_decode(b'\xe8\xa2\x88', "\u8888") _check_decode(b'\n', "\n") _check_decode(b'\xe8\xa2\x88\r', "\u8888") _check_decode(b'\n', "\n") def check_newline_decoder(self, decoder, encoding): result = [] encoder = codecs.getincrementalencoder(encoding)() def _decode_bytewise(s): for b in encoder.encode(s): result.append(decoder.decode(b)) self.assertEquals(decoder.newlines, None) _decode_bytewise("abc\n\r") self.assertEquals(decoder.newlines, '\n') _decode_bytewise("\nabc") self.assertEquals(decoder.newlines, ('\n', '\r\n')) _decode_bytewise("abc\r") self.assertEquals(decoder.newlines, ('\n', '\r\n')) _decode_bytewise("abc") self.assertEquals(decoder.newlines, ('\r', '\n', '\r\n')) _decode_bytewise("abc\r") self.assertEquals("".join(result), "abc\n\nabcabc\nabcabc") decoder.reset() self.assertEquals(decoder.decode("abc".encode(encoding)), "abc") self.assertEquals(decoder.newlines, None) def test_newline_decoder(self): encodings = ( 'utf-8', 'latin-1', 'utf-16', 'utf-16-le', 'utf-16-be', 'utf-32', 'utf-32-le', 'utf-32-be', ) for enc in encodings: decoder = codecs.getincrementaldecoder(enc)() decoder = io.IncrementalNewlineDecoder(decoder, translate=True) self.check_newline_decoder(decoder, enc) decoder = codecs.getincrementaldecoder("utf-8")() decoder = io.IncrementalNewlineDecoder(decoder, translate=True) self.check_newline_decoder_utf8(decoder) def test_flush_error_on_close(self): txt = io.TextIOWrapper(io.BytesIO(self.testdata), encoding="ascii") def bad_flush(): raise IOError() txt.flush = bad_flush self.assertRaises(IOError, txt.close) # exception not swallowed def test_multi_close(self): txt = io.TextIOWrapper(io.BytesIO(self.testdata), encoding="ascii") txt.close() txt.close() txt.close() self.assertRaises(ValueError, txt.flush) # XXX Tests for open() class MiscIOTest(unittest.TestCase): def tearDown(self): test_support.unlink(test_support.TESTFN) def testImport__all__(self): for name in io.__all__: obj = getattr(io, name, None) self.assert_(obj is not None, name) if name == "open": continue elif "error" in name.lower(): self.assert_(issubclass(obj, Exception), name) else: self.assert_(issubclass(obj, io.IOBase)) def test_attributes(self): f = io.open(test_support.TESTFN, "wb", buffering=0) self.assertEquals(f.mode, "wb") f.close() f = io.open(test_support.TESTFN, "U") self.assertEquals(f.name, test_support.TESTFN) self.assertEquals(f.buffer.name, test_support.TESTFN) self.assertEquals(f.buffer.raw.name, test_support.TESTFN) self.assertEquals(f.mode, "U") self.assertEquals(f.buffer.mode, "rb") self.assertEquals(f.buffer.raw.mode, "rb") f.close() f = io.open(test_support.TESTFN, "w+") self.assertEquals(f.mode, "w+") self.assertEquals(f.buffer.mode, "rb+") # Does it really matter? self.assertEquals(f.buffer.raw.mode, "rb+") g = io.open(f.fileno(), "wb", closefd=False) self.assertEquals(g.mode, "wb") self.assertEquals(g.raw.mode, "wb") self.assertEquals(g.name, f.fileno()) self.assertEquals(g.raw.name, f.fileno()) f.close() g.close() def test_main(): test_support.run_unittest(IOTest, BytesIOTest, StringIOTest, BufferedReaderTest, BufferedWriterTest, BufferedRWPairTest, BufferedRandomTest, StatefulIncrementalDecoderTest, TextIOWrapperTest, MiscIOTest) if __name__ == "__main__": unittest.main()
PipelinesRicartAgrawala.py
import multiprocessing from ctypes import Structure, c_int, c_bool from enum import Enum from time import sleep from heapq import heappush, heappop from random import randint, uniform from functools import reduce TIMEOUT = 1.0/3.0 NUMBER_OF_ENTRIES = 5 class Message(Enum): REQUEST = "REQUEST" REPLY = "REPLY" class DBEntryStruct(Structure): _fields_ = [('id', c_int), ('clock', c_int), ('cs_entries', c_int)] class DBEntry: def __init__(self, ID, pipes): self.ID = ID self.pipes = pipes self.clock = 0 self.cs_entries = 0 self.request_queue = [] self.replies = [] def receive_messages(self): for pipe in self.pipes: if pipe != False and pipe.poll(TIMEOUT): message = pipe.recv() self.act_on_message(message) def act_on_message(self, message): time_stamp, text, sender_ID = message self.say("Received " + text + " from " + str(sender_ID)) self.clock = time_stamp + 1 if time_stamp >= self.clock else self.clock + 1 if text == Message.REQUEST.value: heappush(self.request_queue, message) if self.request_queue[0][2] != self.ID: heappop(self.request_queue) reply = [self.clock, Message.REPLY.value, self.ID] self.say("Sending " + Message.REPLY.value + " to " + str(sender_ID)) self.pipes[sender_ID].send(reply) elif text == Message.REPLY.value: self.replies.append(message) def say(self, text, database=False): if database: print(self.ID * 4 * "\t" + text) else: print(self.ID * 4 * "\t" + str(self.ID) + "[" + str(self.clock) + "]: " + text) def send_request(self): request = (self.clock, Message.REQUEST.value, self.ID) heappush(self.request_queue, request) self.say("Sending " + Message.REQUEST.value + " to all") for pipe in self.pipes: if pipe != False: pipe.send(request) def wait_for_your_turn(self): while not len(self.replies) == len(self.pipes) - 1: self.receive_messages() def delete_replies(self): self.replies = [] def send_delayed_replies(self): heappop(self.request_queue) while(True): try: request_clock, text, sender_ID = heappop(self.request_queue) reply = [self.clock, Message.REPLY.value, self.ID] self.say("Sending " + Message.REPLY.value + " to " + str(sender_ID)) self.pipes[sender_ID].send(reply) except IndexError: break def update_structure(self): self.cs_entries += 1 return DBEntryStruct(self.ID, self.clock, self.cs_entries) def work(ID, pipes, database, active_entries): db_entry = DBEntry(ID, pipes) for _ in range(NUMBER_OF_ENTRIES): db_entry.receive_messages() db_entry.send_request() db_entry.wait_for_your_turn() enter_database(db_entry, database, active_entries) db_entry.send_delayed_replies() while(active_entries.value): db_entry.receive_messages() def enter_database(db_entry, database, active_entries): db_entry.delete_replies() db_entry.say("--- DATABASE ---", database=True) database[db_entry.ID] = db_entry.update_structure() for entry in database: db_entry.say("[ID: " + str(entry.id) + " - C: " + str(entry.clock) + " - E: " + str(entry.cs_entries) + "]", database=True) active_entries.value = reduce(lambda num1, num2: num1 + num2, [1 if e.cs_entries == NUMBER_OF_ENTRIES else 0 for e in database]) != len(database) db_entry.say("................", database=True) sleep(uniform(0.1, 2)) def wait_until_they_all_finish(): active_process = multiprocessing.active_children() while(active_process): sleep(10) active_process = multiprocessing.active_children() if __name__ == '__main__': #create database n = randint(3, 8) database = multiprocessing.Array(DBEntryStruct, [(i, 0, 0) for i in range(n)]) active_entries = multiprocessing.Value(c_bool, True) #create pipe matrix pipe_matrix = [[False for _ in range(n)] for _ in range(n)] for row in range(n): for column in range(row+1, n): pipe_matrix[row][column], pipe_matrix[column][row] = multiprocessing.Pipe() for i in range(n): entry = multiprocessing.Process(target=work, args=(i, pipe_matrix[i], database, active_entries)) entry.start() wait_until_they_all_finish()
commands.py
import threading import telegram from . import config from .logger import Logger def config_editor_command(bot, update, args): ConfigEditorCommand(bot, update, args).run() def restart_command(bot, update, args): RestartCommand(bot, update, args).run() class Command: def __init__(self, bot: telegram.Bot, update: telegram.Update, args): self.bot = bot message = update.message or update.edited_message self.chat_id = message.chat_id self.args = args def run(self): self.handle() def handle(self): pass def _send_message(self, text): self.bot.sendMessage(chat_id=self.chat_id, text=text) class AdminCommand(Command): def run(self): if self.is_admin_chat(): self.handle() def is_admin_chat(self): return str(self.chat_id) == config.Key.ADMIN_CHAT_ID.read() class ConfigEditorCommand(AdminCommand): def handle(self): if len(self.args) == 0: self._send_message("Usage: /config key [value]") else: key = self.args[0].upper() config_object = config.Key.get_by_name(key) if config_object is None: self._send_message("Unknown config '{}'".format(key)) else: if len(self.args) == 1: config_value = config_object.read() self._send_message("Current value of '{}':\n\n{}".format(key, config_value)) elif len(self.args) == 2 and self.args[1] == "-": config_object.delete() self._send_message("Restored '{}' to default value.".format(key)) else: new_value = " ".join(self.args[1:]) try: new_value = config_object.parse(new_value) except Exception as e: self._send_message("Error while trying to parse new value: " + str(e)) else: config_object.write(new_value) read_value = config_object.read() self._send_message("Config '{}' set to:\n\n{}".format(key, read_value)) class RestartCommand(AdminCommand): def handle(self): threading.Thread(target=self.bot.updater.stop).start() Logger(self.bot).info("Restarting bot...")
_plugin.py
from . import _PluginInstance from nanome._internal import _network as Network from nanome._internal._process import _ProcessManager from nanome._internal._network._serialization._serializer import Serializer from nanome._internal._util._serializers import _TypeSerializer from nanome.util.logs import Logs from nanome.util import config from multiprocessing import Process, Pipe, current_process from timeit import default_timer as timer import sys import json import cProfile import time import os import fnmatch import subprocess import signal try_reconnection_time = 20.0 keep_alive_time_interval = 3600.0 __metaclass__ = type class _Plugin(object): __serializer = Serializer() _plugin_id = -1 _custom_data = None def __parse_args(self): Logs._set_verbose(False) for i in range(1, len(sys.argv)): if sys.argv[i] == "-h": Logs.message("Usage:", sys.argv[1],"[-h] [-a ADDRESS] [-p PORT]") Logs.message(" -h display this help") Logs.message(" -a connects to a NTS at the specified IP address") Logs.message(" -p connects to a NTS at the specified port") Logs.message(" -k specifies a key file to use to connect to NTS") Logs.message(" -n name to display for this plugin in Nanome") Logs.message(" -v enable verbose mode, to display Logs.debug") Logs.message(" -r, --auto-reload restart plugin automatically if a .py or .json file in current directory changes") Logs.message(" --ignore to use with auto-reload. All paths matching this pattern will be ignored, " \ "use commas to specify several. Supports */?/[seq]/[!seq]") sys.exit(0) elif sys.argv[i] == "-a": if i >= len(sys.argv): Logs.error("Error: -a requires an argument") sys.exit(1) self.__host = sys.argv[i + 1] i += 1 elif sys.argv[i] == "-p": if i >= len(sys.argv): Logs.error("Error: -p requires an argument") sys.exit(1) try: self.__port = int(sys.argv[i + 1]) except ValueError: Logs.error("Error: -p argument has to be an integer") sys.exit(1) i += 1 elif sys.argv[i] == "-k": if i >= len(sys.argv): Logs.error("Error: -k requires an argument") sys.exit(1) self.__key_file = sys.argv[i + 1] i += 1 elif sys.argv[i] == "-n": if i >= len(sys.argv): Logs.error("Error: -n requires an argument") sys.exit(1) self._description['name'] = sys.argv[i + 1] i += 1 elif sys.argv[i] == "-v": self.__has_verbose = True Logs._set_verbose(True) elif sys.argv[i] == "-r" or sys.argv[i] == "--auto-reload": self.__has_autoreload = True elif sys.argv[i] == "--ignore": if i >= len(sys.argv): Logs.error("Error: --ignore requires an argument") sys.exit(1) split = sys.argv[i + 1].split(",") self.__to_ignore.extend(split) def __read_key_file(self): try: f = open(self.__key_file, "r") key = f.read() return key except: return None def _on_packet_received(self, packet): if packet.packet_type == Network._Packet.packet_type_message_to_plugin: session_id = packet.session_id if session_id in self._sessions: # packet.decompress() self._sessions[session_id]._on_packet_received(packet.payload) return # If we don't know this session_id, try to register it first if _Plugin.__serializer.try_register_session(packet.payload) == True: received_version_table, _, _ = _Plugin.__serializer.deserialize_command(packet.payload, None) version_table = _TypeSerializer.get_best_version_table(received_version_table) self.__on_client_connection(session_id, version_table) # Doesn't register? It's an error else: Logs.warning("Received a command from an unregistered session", session_id) elif packet.packet_type == Network._Packet.packet_type_plugin_connection: _Plugin._plugin_id = packet.plugin_id Logs.message("Registered with plugin ID", _Plugin._plugin_id, "\n=======================================\n") elif packet.packet_type == Network._Packet.packet_type_plugin_disconnection: if _Plugin._plugin_id == -1: if self._description['auth'] == None: Logs.error("Connection refused by NTS. Are you missing a security key file?") else: Logs.error("Connection refused by NTS. Your security key file might be invalid") sys.exit(1) else: Logs.debug("Connection ended by NTS") sys.exit(0) elif packet.packet_type == Network._Packet.packet_type_client_disconnection: try: id = packet.session_id self._sessions[id].signal_and_close_pipes() del self._sessions[id] Logs.debug("Session", id, "disconnected") except: pass elif packet.packet_type == Network._Packet.packet_type_keep_alive: pass else: Logs.warning("Received a packet of unknown type", packet.packet_type, ". Ignoring") def __file_filter(self, name): return name.endswith(".py") or name.endswith(".json") def __file_times(self, path): found_file = False for root, dirs, files in os.walk(path): for file in filter(self.__file_filter, files): file_path = os.path.join(root, file) matched = False for pattern in self.__to_ignore: if fnmatch.fnmatch(file_path, pattern): matched = True if matched == False: found_file = True yield os.stat(file_path).st_mtime if found_file == False: yield 0.0 def __autoreload(self): wait = 3 if os.name == "nt": sub_kwargs = { 'creationflags': subprocess.CREATE_NEW_PROCESS_GROUP } break_signal = signal.CTRL_BREAK_EVENT else: sub_kwargs = {} break_signal = signal.SIGTERM sub_args = [x for x in sys.argv if x != '-r' and x != "--auto-reload"] try: sub_args = [sys.executable] + sub_args process = subprocess.Popen(sub_args, **sub_kwargs) except: Logs.error("Couldn't find a suitable python executable") sys.exit(1) last_mtime = max(self.__file_times(".")) while True: try: max_mtime = max(self.__file_times(".")) if max_mtime > last_mtime: last_mtime = max_mtime Logs.message("Restarting plugin") process.send_signal(break_signal) process = subprocess.Popen(sub_args, **sub_kwargs) time.sleep(wait) except KeyboardInterrupt: process.send_signal(break_signal) break def __run(self): if os.name == "nt": signal.signal(signal.SIGBREAK, self.__on_termination_signal) else: signal.signal(signal.SIGTERM, self.__on_termination_signal) if self._pre_run != None: self._pre_run() _Plugin.instance = self self._description['auth'] = self.__read_key_file() self._process_manager = _ProcessManager() self.__connect() self.__loop() def __connect(self): self._network = Network._NetInstance(self, _Plugin._on_packet_received) if self._network.connect(self.__host, self.__port): if _Plugin._plugin_id >= 0: plugin_id = _Plugin._plugin_id else: plugin_id = 0 packet = Network._Packet() packet.set(0, Network._Packet.packet_type_plugin_connection, plugin_id) packet.write_string(json.dumps(self._description)) self._network.send(packet) self.__connected = True self.__last_keep_alive = timer() return True else: self.__disconnection_time = timer() return False def __loop(self): to_remove = [] try: while True: if self.__connected == False: elapsed = timer() - self.__disconnection_time if elapsed >= try_reconnection_time: Logs.message("Trying to reconnect...") if self.__connect() == False: self.__disconnection_time = timer() continue else: time.sleep(try_reconnection_time - elapsed) continue if self._network.receive() == False: self.__connected = False self.__disconnect() continue if timer() - self.__last_keep_alive >= keep_alive_time_interval: self.__last_keep_alive = timer() packet = Network._Packet() packet.set(_Plugin._plugin_id, Network._Packet.packet_type_keep_alive, 0) self._network.send(packet) to_remove.clear() for id, session in self._sessions.items(): if session._read_from_plugin() == False: session.close_pipes() to_remove.append(id) for id in to_remove: self._sessions[id]._send_disconnection_message(_Plugin._plugin_id) del self._sessions[id] self._process_manager._update() except KeyboardInterrupt: self.__exit() def __disconnect(self): to_remove = [] for id in self._sessions.keys(): to_remove.append(id) for id in to_remove: del self._sessions[id] self.__disconnection_time = timer() def __on_termination_signal(self, signum, frame): self.__exit() def __exit(self): Logs.debug('Exiting') for session in _Plugin.instance._sessions.values(): session.signal_and_close_pipes() session.plugin_process.join() if self._post_run != None: self._post_run() sys.exit(0) def __on_client_connection(self, session_id, version_table): main_conn_net, process_conn_net = Pipe() main_conn_proc, process_conn_proc = Pipe() session = Network._Session(session_id, self._network, self._process_manager, main_conn_net, main_conn_proc) process = Process(target=_Plugin._launch_plugin, args=(self._plugin_class, session_id, process_conn_net, process_conn_proc, _Plugin.__serializer, _Plugin._plugin_id, version_table, _TypeSerializer.get_version_table(), Logs._is_verbose(), _Plugin._custom_data)) process.start() session.plugin_process = process self._sessions[session_id] = session Logs.debug("Registered new session:", session_id) @staticmethod def _is_process(): return current_process().name != 'MainProcess' @classmethod def _launch_plugin_profile(cls, plugin_class, session_id, pipe_net, pipe_proc, serializer, plugin_id, version_table, original_version_table, verbose, custom_data): cProfile.runctx('_Plugin._launch_plugin(plugin_class, session_id, pipe_net, pipe_proc, serializer, plugin_id, version_table, original_version_table, verbose, custom_data)', globals(), locals(), 'profile.out') @classmethod def _launch_plugin(cls, plugin_class, session_id, pipe_net, pipe_proc, serializer, plugin_id, version_table, original_version_table, verbose, custom_data): plugin = plugin_class() _PluginInstance.__init__(plugin, session_id, pipe_net, pipe_proc, serializer, plugin_id, version_table, original_version_table, verbose, custom_data) Logs.debug("Starting plugin") plugin._run() def __init__(self, name, description, category = "", has_advanced = False): self._sessions = dict() self._description = { 'name': name, 'description': description, 'category': category, 'hasAdvanced': has_advanced, 'auth': None } self._plugin_class = None self.__connected = False self.__has_autoreload = False self.__has_verbose = False self.__to_ignore = [] self._pre_run = None self._post_run = None
GluttonousSnake.py
import time import threading import os import random import keyboard import sys config = { "divider": 50, "speed": 1, # Timestamp "size-x": 20, "size-y": 48 } state = { "state": "running", # Control if it is running "time": 0, "score": 0, "delta_speed": 1, "food": [], # A list of 2-element tuple to store the position of the next foods "snake": [], # A list of all transition nodes of a snake. The head is at the end of the list! "last-direction": (0, 0), # Store the last direction "direction": (0, 1), # A 2-element tuple for the snake's direction, "graphic": None, # A size-x * size-y tuple of entries } up = (-1, 0) down = (1, 0) left = (0, -1) right = (0, 1) graphic_mapping = { None: " ", "f": "*", "s": "■" } def push_left(state = state): if neg(state["last-direction"]) != left: state["direction"] = left def push_right(state = state): if neg(state["last-direction"]) != right: state["direction"] = right def push_up(state = state): if neg(state["last-direction"]) != up: state["direction"] = up def push_down(state = state): if neg(state["last-direction"]) != down: state["direction"] = down def divider(n = 50): return "■" * n + "\n" def add(li, tu): # Used to add a list by a tuple # print(li, tu) for i in range(min(len(li), len(tu))): li[i] += tu[i] def check_boundary(pos, config = config, ): # Check if the position reaches the boundary if pos[0] >= 0 and pos[0] < config["size-x"] and pos[1] >= 0 and pos[1] < config["size-y"]: return False else: return True def check_eat_self(pos, graphic): if graphic[pos[0]][pos[1]] == "s": return True else: return False def neg(tu): # Get the negtive direction li = list(tu) for i in range(len(li)): li[i] = -li[i] return tuple(li) def get_direction(a, b): delta_x, delta_y = b[0] - a[0], b[1] - a[1] if delta_x <= -1: return up elif delta_x >= 1: return down elif delta_y <= -1: return left elif delta_y >= 1: return right else: print(a, b) assert (False) def get_length(snake): # Get the length of a snake return len(snake) def init_state(state = state, config = config): init_position = [random.randint(2, config["size-x"] - 2), random.randint(2, config["size-y"] - 2)] # Generate the initial position state["snake"] = [init_position] graphic = update_graphic(state, config) new_food(graphic, state, config) def new_food(graphic, state, config): # Randomly generate a new food if len(state["snake"]) + len(state["food"]) >= config["size-x"] * config["size-y"]: # No place! return else: remaining_pos = [ (i, j) for i in range(config["size-x"]) for j in range(config["size-y"]) if graphic[i][j] is None ] new_pos = random.choice(remaining_pos) state["food"].append(new_pos) graphic[new_pos[0]][new_pos[1]] = "f" return def update_graphic(state, config): new_graphic = [ [None for j in range(config["size-y"])] for i in range(config["size-x"]) ] s = state["snake"][0] new_graphic[s[0]][s[1]] = "s" for index in range(1, len(state["snake"])): prev, current = state["snake"][index - 1], state["snake"][index] direction = get_direction(prev, current) if direction[0] != 0: j = prev[1] # print(prev, current, direction) for i in range(prev[0], current[0] + direction[0], direction[0]): new_graphic[i][j] = "s" else: i = prev[0] # print(prev, current, direction) for j in range(prev[1], current[1] + direction[1], direction[1]): new_graphic[i][j] = "s" for i, j in state["food"]: new_graphic[i][j] = "f" return new_graphic def draw_graphic(graphic, config): string = "" for i in range(config["size-x"]): string += "|" for j in range(config["size-y"]): string += graphic_mapping[graphic[i][j]] string += "|\n" return string def update_state(state = state, config = config): if state["state"] == "running": state["time"] += 1 config["speed"] *= state["delta_speed"] state["delta_speed"] = 1 new_graphic = update_graphic(state, config) last_direction, direction, state["last-direction"] = state["last-direction"], state["direction"], state["direction"] # print("New Direction:", direction) if get_length(state["snake"]) == 1: current_pos = state["snake"][0][:] add(state["snake"][0], direction) if check_boundary(state["snake"][0]): state["state"] = "boundary" return new_graphic[state["snake"][0][0]][state["snake"][0][1]] = "s" if tuple(state["snake"][-1]) in state["food"]: # Add a tail node state["snake"] = [current_pos, state["snake"][0]] state["food"].remove(tuple(state["snake"][-1])) state["score"] += 1 # Add new food new_food(new_graphic, state, config) else: new_graphic[current_pos[0]][current_pos[1]] = None else: # The snake has at least length 2 if last_direction == direction: # Direction no change # We move the head forward and move the tail forward. All intermediate points remain the same. add(state["snake"][-1], direction) if check_boundary(state["snake"][-1]): state["state"] = "boundary" return # Check if it eats a food. If it eats a food, then the tail should not get forward head_pos = tuple(state["snake"][-1]) else: # We add a new transition point to the head of the snake head_pos = state["snake"][-1][:] add(head_pos, direction) state["snake"].append(head_pos) if check_boundary(state["snake"][-1]): state["state"] = "boundary" return head_pos = tuple(head_pos) # Eat food or not. if head_pos in state["food"]: # Do not move the tail => Length + 1 state["score"] += 1 # Eat the food state["food"].remove(head_pos) # Add new food new_food(new_graphic, state, config) else: # Forward the tail tail_direction = get_direction(state["snake"][0], state["snake"][1]) # print("tail:", state["snake"][0], state["snake"][1], tail_direction ) new_graphic[state["snake"][0][0]][state["snake"][0][1]] = None # Remove the tail in graphic add(state["snake"][0], tail_direction) if check_eat_self(state["snake"][-1], new_graphic): state["state"] = "eat_self" return if state["snake"][0] == state["snake"][1]: # We need to remove the tail state["snake"] = state["snake"][1:] new_graphic[head_pos[0]][head_pos[1]] = "s" del state["graphic"] state["graphic"] = new_graphic def draw(state = state, config = config): string = "" string += divider(config["divider"]) string += "Gluttonous Snake - Python | Author: Guochao Xie \n" string += divider(config["divider"]) string += "{} | Time: {} | Score: {} | Speed: {}\n".format(state["state"], state["time"], state["score"], config["speed"]) string += divider(config["divider"]) # print(state) string += draw_graphic(state["graphic"], config) string += divider(config["divider"]) # os.system("cls") sys.stdout.write(string) # sys.stdout.flush() def run(state = state, config = config): init_state(state, config) pausing = False while True: try: if state["state"] == "running": pausing = False # Update States update_state() # Draw draw() # input() time.sleep(1 / config["speed"]) elif state["state"] == "pause": if not pausing: draw() pausing = True time.sleep(1 / config["speed"]) continue else: print("Main thread end") return except KeyboardInterrupt: print("keyboard") state["state"] = "end" # return def pause_resume(state = state, config = config): if state["state"] == "running": state["state"] = "pause" elif state["state"] == "pause": state["state"] = "running" def end_handler(): print("Trigger Termination") state["state"] = "end" def add_speed(): state["delta_speed"] = 2 def reduce_speed(): state["delta_speed"] = 0.5 help_message = """ Space / Enter : Pause / Resume [ : Double Speed ] : Half Speed H : Help Q : Quit """ def help_game(): if state["state"] == "running": state["state"] = "pause" time.sleep(1 / config["speed"] + 0.1) print(help_message) def quit_game(): state["state"] = "end" if __name__ == "__main__": input("Ready?") main_thread = threading.Thread(target = run) main_thread.start() keyboard.add_hotkey("up", push_up) keyboard.add_hotkey("down", push_down) keyboard.add_hotkey("left", push_left) keyboard.add_hotkey("right", push_right) keyboard.add_hotkey("space", pause_resume) keyboard.add_hotkey("enter", pause_resume) keyboard.add_hotkey("[", reduce_speed) keyboard.add_hotkey("]", add_speed) keyboard.add_hotkey("h", help_game) # Help keyboard.add_hotkey("q", quit_game) # Quit keyboard.add_hotkey("ctrl+c", quit_game) try: while state["state"] == "running": pass except KeyboardInterrupt: end_handler() main_thread.join() print("Game Over!") print("Your result: {}".format(state["state"])) print("Your score: {}".format(state["score"]))
context.py
import threading from contextlib import contextmanager class Context(threading.local): def __init__(self): self._ctx = [{}] def __getattr__(self, name): for scope in reversed(self._ctx): if name in scope: return scope[name] raise AttributeError(name) def get(self, name, default=None): try: return getattr(self, name) except AttributeError: return default @contextmanager def __call__(self, **attrs): self._ctx.append(attrs) try: yield finally: _d = self._ctx.pop() assert attrs is _d def test_threading(): import time, random context = Context() def f(i): with context(i=i): g(i) def g(i): assert context.i == i time.sleep(random.random()) assert context.i == i print(i, end=', ') for i in range(100): t = threading.Thread(target=f, args=(i,)) t.start() # test_threading() context = Context()
redmatic_to_sqldb.py
import datetime import json import os import queue import threading import time import paho.mqtt.client as mqtt import toml from db_model_postgrest import Datum from db_model_postgrest import sende_daten SKRIPTPFAD = os.path.abspath(os.path.dirname(__file__)) CONFIGDATEI = "redmatic_to_sqldb_cfg.toml" def load_config(): configfile = os.path.join(SKRIPTPFAD, CONFIGDATEI) with open(configfile) as conffile: config = toml.loads(conffile.read()) return config CONFIG = load_config() DATAPOINTS = ["STATE", "LOWBAT", "SET_TEMPERATURE", "VALVE_STATE", "ACTUAL_TEMPERATURE", "CONTROL_MODE", "FAULT_REPORTING", "BATTERY_STATE"] queue_ = queue.Queue() def on_message(_, __, msg): queue_.put(msg.payload) def main_mqtt(): client = mqtt.Client() client.on_message = on_message client.connect("192.168.178.38", 1883, 60) client.subscribe("hm/#") client.loop_forever() def convert_mqtt_daten(data): data_convert = json.loads(data.decode("utf-8")) return data_convert def convert_wert(val): return float(val) def reduce_data(data): return Datum( datetime.datetime.fromtimestamp(data["ts"] / 1000), data["hm"]["deviceName"], convert_wert(data["val"]), data["hm"]["datapoint"] ) def main_daten_verarbeiten(): headers = {f"Authorization": "{user} {token}".format(user=CONFIG["user"], token=CONFIG["token"])} while True: data = [] while not queue_.empty(): datum = convert_mqtt_daten(queue_.get()) if datum["hm"]["datapoint"] in DATAPOINTS: data.append(reduce_data(datum)) if data: sende_daten(CONFIG["url"], CONFIG["table"], headers, data) time.sleep(30) def main(): threading.Thread(target=main_daten_verarbeiten).start() main_mqtt() if __name__ == "__main__": main()
paho_backend.py
import os import logging import paho.mqtt.client as mqtt import threading log = logging.getLogger("mqttwrapper.paho_backend") def on_connect(client, userdata, flags, rc): log.debug("Connected") for topic in userdata['topics']: client.subscribe(topic) log.debug("Subscribed to %s", topic) def on_message(client, userdata, msg): log.debug("Received message on %s", msg.topic) if msg.retain and userdata['ignore_retained']: log.debug("Ignoring retained message") return try: replies = userdata['callback'](msg.topic, msg.payload, **userdata['kwargs']) except Exception as e: log.error("Callback caused exception", exc_info = True) log.debug("Callback completed.") if replies is not None: log.debug("Received %s replies", len(replies)) for reply in replies: try: topic, payload, retain = reply except ValueError: topic, payload = reply retain = False log.debug("Publishing '%s' to %s (retain: %s)", payload, topic, retain) client.publish(topic, payload, retain=retain) log.debug("Published '%s' to %s (retain: %s)", payload, topic, retain) def mqtt_thread(client): client.loop_forever() def run_script(callback, broker=None, topics=None, ignore_retained=False, blocking=True, **kwargs): if not broker: broker = os.environ['MQTT_BROKER'] if not topics: topics = os.environ['MQTT_TOPICS'].split(",") userdata = { 'topics': topics, 'callback': callback, 'ignore_retained': ignore_retained, 'kwargs': kwargs } client = mqtt.Client(userdata=userdata) client.on_connect = on_connect client.on_message = on_message client.connect(broker.split("//")[1]) if blocking: client.loop_forever() else: t = threading.Thread(target=mqtt_thread, args=(client,), daemon=True) t.start()
hpsearch.py
#!/usr/bin/env python3 # Copyright 2019 Christian Henning # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @title :hpsearch/hpsearch.py # @author :ch # @contact :henningc@ethz.ch # @created :05/05/2019 # @version :1.0 # @python_version :3.6.8 """ A very simple hyperparameter search. The results will be gathered as a CSV file. Here is an example on how to start an hyperparameter search on a cluster using :code:`bsub`: .. code-block:: console $ bsub -n 1 -W 48:00 -e hpsearch.err -o hpsearch.out \\ -R "rusage[mem=8000]" \\ python3 hpsearch.py --run_cluster --num_jobs=20 For more demanding jobs (e.g., ImageNet), one may request more resources: .. code-block:: console $ bsub -n 1 -W 96:00 -e hpsearch.err -o hpsearch.out \\ -R "rusage[mem=16000]" \\ python3 hpsearch.py --run_cluster --num_jobs=20 --num_hours=48 \\ --resources="\"rusage[mem=8000, ngpus_excl_p=1]\"" Please fill in the grid parameters in the corresponding config file (see command line argument `grid_module`). """ # Do not delete the following import for all executable scripts! import __init__ # pylint: disable=unused-import import argparse from collections import defaultdict from datetime import datetime import getpass import glob import importlib import numpy as np import os import pandas import pickle from queue import Queue, Empty import random import re import shutil import subprocess import sys import time from threading import Thread import traceback import warnings try: import GPUtil except ModuleNotFoundError: GPUtil = None warnings.warn('Package "GPUtil" could not be imported, but might be ' + 'needed for some functionalities of this script.') try: from bsub import bsub except ModuleNotFoundError: bsub = None warnings.warn('Package "bsub" could not be imported, but might be ' + 'needed for some functionalities of this script.') from utils import misc # From which module to read the default grid. _DEFAULT_GRID = 'classifier.imagenet.hpsearch_config_ilsvrc_cub' ### The following variables will be otherwritten in the main ### ################################################################ ### See method `_read_config`. # Name of the script that should be executed by the hyperparameter search. # Note, the working directory is set seperately by the hyperparameter search # script. _SCRIPT_NAME = None # Has to be specified in helper module! # This file is expected to reside in the output folder of the simulation. _SUMMARY_FILENAME = None # Has to be specified in helper module! # These are the keywords that are supposed to be in the summary file. # A summary file always has to include the keyword "finished"!. _SUMMARY_KEYWORDS = None # Has to be specified in helper module! # The name of the command-line argument that determines the output folder # of the simulation. _OUT_ARG = 'out_dir' # Default value if attribute `_OUT_ARG` does not exist. # Function handle to parser of performance summary file. _SUMMARY_PARSER_HANDLE = None # Default parser `_get_performance_summary` used. # A function handle, that is used to evaluate whether an output folder should # be kept. _PERFORMANCE_EVAL_HANDLE = None # Has to be set in config file. # According to which keyword will the CSV be sorted. _PERFORMANCE_KEY = None # First key in `_SUMMARY_KEYWORDS` will be used. # Sort order. _PERFORMANCE_SORT_ASC = False # FIXME should be deleted soon. _ARGPARSE_HANDLE = None ################################################################ # This will be a list of booleans, each representing whether a specific cmd has # been executed. _CMD_FINISHED = None def _grid_to_commands(grid_dict): """Translate a dictionary of parameter values into a list of commands. Args: grid_dict: A dictionary of argument names to lists, where each list contains possible values for this argument. Returns: A list of dictionaries. Each key is an argument name that maps onto a single value. """ # We build a list of dictionaries with key value pairs. commands = [] # We need track of the index within each value array. gkeys = list(grid_dict.keys()) indices = [0] * len(gkeys) stopping_criteria = False while not stopping_criteria: cmd = dict() for i, k in enumerate(gkeys): v = grid_dict[k][indices[i]] cmd[k] = v commands.append(cmd) for i in range(len(indices)-1,-1,-1): indices[i] = (indices[i] + 1) % len(grid_dict[gkeys[i]]) if indices[i] == 0 and i == 0: stopping_criteria = True elif indices[i] != 0: break return commands def _args_to_cmd_str(cmd_dict, out_dir=None): """Translate a dictionary of argument names to values into a string that can be typed into a console. Args: cmd_dict: Dictionary with argument names as keys, that map to a value. out_dir (optional): The output directory that should be passed to the command. No output directory will be passed if not specified. Returns: A string of the form: python3 train.py --out_dir=OUT_DIR --ARG1=VAL1 ... """ cmd_str = 'python3 %s' % _SCRIPT_NAME if out_dir is not None: cmd_str += ' --%s=%s' % (_OUT_ARG, out_dir) for k, v in cmd_dict.items(): if type(v) == bool: cmd_str += ' --%s' % k if v else '' else: cmd_str += ' --%s=%s' % (k, str(v)) return cmd_str def _get_performance_summary(out_dir, cmd_ident): """Parse the performance summary file of a simulation. This is a very primitive parser, that expects that each line of the result file :code:`os.path.join(out_dir, _SUMMARY_FILENAME)` is a keyword-value pair. The keyword is taken from the :code:`_SUMMARY_KEYWORDS` list. **They must appear in the correct order.** The value can either be a single number or a list of numbers. A list of numbers will be converted into a string, such that it appears in a single cell under the given keyword when opening the result CSV file with a spreadsheet. Args: out_dir: The output directory of the simulation. cmd_ident (int): Identifier of this command (needed for informative error messages). Raises: IOError: If performance summary file does not exist. ValueError: If a summary key is not at the expected position in the result file. Returns: A dictionary containing strings as keywords. Note, the values may not be lists, and strings need to be wrapped into an extra layer of double quotes such that the spreadsheet interprets them as a single entity. """ # Get training results. result_summary_fn = os.path.join(out_dir, _SUMMARY_FILENAME) if not os.path.exists(result_summary_fn): raise IOError('Training run %d did not finish. No results!' \ % (cmd_ident+1)) with open(result_summary_fn, 'r') as f: result_summary = f.readlines() # Ensure downwards compatibility! summary_keys = _SUMMARY_KEYWORDS performance_dict = dict() for line, key in zip(result_summary, summary_keys): if not line.startswith(key): raise ValueError('Key %s does not appear in performance ' % (key) + 'summary where it is expected.') # Parse the lines of the result file. # Crop keyword to retrieve only the value. _, line = line.split(' ', maxsplit=1) # https://stackoverflow.com/questions/4703390/how-to-extract-a-floating-number-from-a-string line_nums = re.findall(r"[-+]?\d*\.\d+|\d+", line) if len(line_nums) == 1: # Single number performance_dict[key] = [line_nums[0]] else: # List of numbers # Convert list to a string for the resulting CSV file. Note, the # quotes are needed that the list will be written into a single cell # when opening the csv file (note, every key can have exactly one # value). performance_dict[key] = \ ['"' + misc.list_to_str(line_nums, delim=',') + '"'] return performance_dict def _write_slurm_script(args, cmd_str, cmd_folder_name): """Write a slurm job script for the given command string. The bash script will be dumped in the current folder. Args: args (argparse.Namespace): Command-line arguments. cmd_str (str): The actual command that should be executed by the job job scheduler (independent of slurm). cmd_folder_name (str): The folder name of the command ``cmd_str`` within the hpsearch output folder. This is used to determine a filename. Returns: (str): Bash script filename. """ script_fn = '%s_script.sh' % cmd_folder_name with open(script_fn, 'w') as f: f.write('#!/bin/bash\n') f.write('#SBATCH --job-name %s\n' % cmd_folder_name) f.write('#SBATCH --output %s_' % cmd_folder_name + '%j.out\n') f.write('#SBATCH --error %s_' % cmd_folder_name + '%j.err\n') f.write('#SBATCH --time %d:00:00\n' % args.num_hours) if len(args.slurm_mem) > 0: f.write('#SBATCH --mem %s\n' % args.slurm_mem) if len(args.slurm_gres) > 0: f.write('#SBATCH --gres %s\n' % args.slurm_gres) if len(args.slurm_partition) > 0: f.write('#SBATCH --partition %s\n' % args.slurm_partition) if len(args.slurm_qos) > 0: f.write('#SBATCH --qos %s\n' % args.slurm_qos) if len(args.slurm_constraint) > 0: f.write('#SBATCH --constraint %s\n' % args.slurm_constraint) f.write(cmd_str) return script_fn def _slurm_check_running(job_ids): """Check whether jobs are still in the job queue (either pending or running). Args: job_ids (list): List of job IDs. Returns: (list): List of bool values, denoting whether the corresponding job in ``job_ids`` is still listed via ``squeue``. Returns ``None`` if the jobs status couldn't be checked. """ # FIXME hacky way of getting the username. Are we sure, that the Slurm # username always is the same as the linux username? p = subprocess.Popen('squeue -u %s' % getpass.getuser(), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p_out, p_err = p.communicate() if len(p_out) == 0: warnings.warn('Couldn\'t check whether jobs are running. "squeue" ' + 'returned error: < %s >.' % (p_err.decode('utf-8'))) return None is_running = [False] * len(job_ids) try: qjobs = p_out.decode('utf-8').split('\n') assert len(qjobs) > 0 # First line is table header. qjobs = qjobs[1:] for qjob in qjobs: qentries = qjob.split() if len(qentries) > 0: # FIXME Don't know why some job IDs have underscores (job # arrays?). qid = int(qentries[0].strip('_')) if qid in job_ids: is_running[job_ids.index(qid)] = True except: traceback.print_exc(file=sys.stdout) warnings.warn('Could not assess whether jobs are still in the job ' + 'queue. Command "squeue" ended with error message: %s' \ % (p_err.decode('utf-8'))) return None return is_running def _get_available_gpus(args): """Get IDs of available GPUs. Args: (....): See function :func:`_run_cmds`. Returns: (list): List of GPU IDs. Contained are only IDs of visible GPUs that have enough resources and where we didn't start a job recently (according to the user defined warmup time). Returns ``None`` if no GPUs are visible to the program. """ if args.visible_gpus == '-1': return None if '_VISIBLE_GPUS' in globals(): visible_gpus = globals()['_VISIBLE_GPUS'] else: visible_gpus = misc.str_to_ints(args.visible_gpus) if len(visible_gpus) == 0: # Use all GPUs visible_gpus = [gpu.id for gpu in GPUtil.getGPUs()] globals()['_VISIBLE_GPUS'] = visible_gpus print('The following GPUs are used:') for gpu in GPUtil.getGPUs(): if gpu.id in visible_gpus: print('GPU %d: %s' % (gpu.id, gpu.name)) # When was the last job scheduled on each GPU. Note, we are only allowed to # schedule new jobs after a certain warmup period has passed. if '_LAST_JOBS_TS' not in globals(): globals()['_LAST_JOBS_TS'] = [None] * len(visible_gpus) if len(visible_gpus) == 0: return None visible_gpus_tmp = visible_gpus visible_gpus = [] for i, last_ts in enumerate(globals()['_LAST_JOBS_TS']): if last_ts is None or (time.time()-last_ts) > args.sim_startup_time: visible_gpus.append(visible_gpus_tmp[i]) gpus_to_exclude = [] for gpu in GPUtil.getGPUs(): if gpu.id not in visible_gpus: gpus_to_exclude.append(gpu.id) # FIXME We should ensure that the ressources are free over a sustained # period of time, as there are sometime sudden drops in GPU usage based on # the instantaneous computation done by the scripts running on them. return GPUtil.getAvailable(order='random', limit=len(visible_gpus), maxLoad=args.allowed_load, maxMemory=args.allowed_memory, excludeID=gpus_to_exclude) def _check_running(args, out_dir, results_file, jobs): """Check whether jobs are still running. This function checks whether the jobs in ``jobs`` are still running. If they have finished, they will be removed from the list ``jobs`` and the results of the jobs will be collected and saved. Args: (....): See function :func:`_run_cmds`. jobs (list): List of tuples, containing all running jobs. Returns: (list): Updated list ``jobs``, where finished jobs have been deleted. """ def _clear_pipe(out_file, out_queue): try: with open(out_file,'a') as f: while True: line = out_queue.get_nowait() f.write(line) except Empty: pass # Pipes are cleared again, no more lines in there if args.run_cluster and args.scheduler == 'lsf': try: rjobs = bsub.running_jobs() except: traceback.print_exc(file=sys.stdout) warnings.warn('Could not assess whether jobs are still in the ' + 'job queue. Assuming all jobs are still running.') rjobs = None elif args.run_cluster: assert args.scheduler == 'slurm' rjobs = _slurm_check_running([t[0] for t in jobs]) tmp_jobs = jobs jobs = [] for ii, job_tup in enumerate(tmp_jobs): job, cmd_dict, folder_name, ind, gpu_id, job_io = job_tup cmd_out_dir = cmd_dict[_OUT_ARG] if args.run_cluster and args.scheduler == 'lsf': if rjobs is None or job.job_id in rjobs: jobs.append((job, cmd_dict, folder_name, ind, gpu_id, job_io)) continue elif args.run_cluster: assert args.scheduler == 'slurm' # FIXME If we couldn't check the queue, then we assume all jobs are # still running. Otherwise we might run in danger that we just # consider all jobs as finished while always scheduling more. if rjobs is None or rjobs[ii]: jobs.append((job, cmd_dict, folder_name, ind, gpu_id, job_io)) continue else: job_name = 'job_%s' % folder_name job_out_file = os.path.join(cmd_out_dir, job_name + '.out') job_err_file = os.path.join(cmd_out_dir, job_name + '.err') if job.poll() == None: # Clear pipes, such that they don't fill up. if os.path.exists(cmd_out_dir) and job_io is not None: _, q_out, _, q_err = job_io _clear_pipe(job_out_file, q_out) _clear_pipe(job_err_file, q_err) jobs.append((job, cmd_dict, folder_name, ind, gpu_id, job_io)) continue print('Job %d finished.' % ind) try: # If the output folder doesn't exist yet, we still create it to # write the log files. An example scenario could be, that the user # provided invalid CLI arguments. He can figure this out, if we # write the error log into the corresponding result folder. # FIXME Could be, that just our check whether a job still exists # failed. Note, that simulations might not start if the output # folder already exists. if not os.path.exists(cmd_out_dir): # FIXME I deactivated the creation, as it causes sometimes # trouble. #warnings.warn('Output directory of run %d does not exist ' \ # % (ind+1) + 'and will be created to save log files.') #os.makedirs(cmd_out_dir) warnings.warn('Output directory of run %d does not exist.' % \ (ind+1)) # We store the command used for execution. This might be helpful # for the user in case he wants to manually continue the # simulation. with open(os.path.join(cmd_out_dir, 'hpsearch_command.sh'), 'w') as f: f.write('#!/bin/sh\n') f.write('%s' % (_args_to_cmd_str(cmd_dict))) ### Save logs from run. if args.run_cluster and args.scheduler == 'lsf': # Move the output files written by LSF on the cluster in the # simulation output folder. job_out_file = glob.glob('job_%s*.out' % folder_name) job_err_file = glob.glob('job_%s*.err' % folder_name) assert len(job_out_file + job_err_file) <= 2 for job_f in job_out_file + job_err_file: os.rename(job_f, os.path.join(cmd_out_dir, job_f)) elif args.run_cluster: assert args.scheduler == 'slurm' job_out_file = '%s_%d.out' % (folder_name, job) job_err_file = '%s_%d.err' % (folder_name, job) job_script_fn = '%s_script.sh' % (folder_name) for job_f in [job_out_file, job_err_file, job_script_fn]: if os.path.exists(job_f): os.rename(job_f, os.path.join(cmd_out_dir, job_f)) else: warnings.warn('Could not find file %s.' % job_f) elif job_io is not None: _, q_out, _, q_err = job_io _clear_pipe(job_out_file, q_out) _clear_pipe(job_err_file, q_err) ### Save results. # Get training results. performance_dict = _SUMMARY_PARSER_HANDLE(cmd_out_dir, ind) for k, v in performance_dict.items(): cmd_dict[k] = v # Create or update the CSV file summarizing all runs. panda_frame = pandas.DataFrame.from_dict(cmd_dict) if os.path.isfile(results_file): old_frame = pandas.read_csv(results_file, sep=';') panda_frame = pandas.concat([old_frame, panda_frame], sort=True) panda_frame.to_csv(results_file, sep=';', index=False) # Check whether simulation has finished successfully. has_finished = int(float(cmd_dict['finished'][0])) if has_finished == 1: _CMD_FINISHED[ind] = True else: _CMD_FINISHED[ind] = False except Exception: traceback.print_exc(file=sys.stdout) warnings.warn('Could not assess whether run %d has been ' \ % (ind+1) + 'completed.') return jobs def _run_cmds(args, commands, out_dir, results_file): """Run all commands associated with the hpsearch. Depending on the CLI argument ``--run_cluster``, this function will either submit a certain number of jobs to an LSF cluster and wait for these jobs to complete before starting new jobs or it will send jobs to multiple visible GPUs (potentially multiple jobs per GPU). Args: args (argparse.Namespace): Command-line arguments. commands (list): List of command dictionaries. out_dir (str): Output directory. results_file (str): Path to CSV file to store hpsearch results. """ num_cmds = len(commands) jobs = [] i = -1 while len(commands) > 0: ### Stall until resources are available ### jobs = _check_running(args, out_dir, results_file, jobs) if args.run_cluster: # On the cluster, we just need to check whether we can schedule # more jobs. The batch system is taking care of checking whether # ressources are available. while len(jobs) >= args.num_jobs: time.sleep(10) jobs = _check_running(args, out_dir, results_file, jobs) else: gpu_to_use = None while gpu_to_use is None: # On a machine without job scheduler, we have to figure out # which GPUs are available. available_gpus = _get_available_gpus(args) while available_gpus is not None and len(available_gpus) == 0: time.sleep(10) available_gpus = _get_available_gpus(args) if available_gpus is None: warnings.warn('No GPUs visible to the hpsearch!') gpu_to_use = -1 break # Check that there are not already too many jobs on the GPU. jobs = _check_running(args, out_dir, results_file, jobs) jobs_per_gpu = defaultdict(int) for _, _, _, _, job_gpu_id, _ in jobs: jobs_per_gpu[job_gpu_id] += 1 for agpu in available_gpus: if jobs_per_gpu[agpu] < args.max_num_jobs_per_gpu: gpu_to_use = agpu break if gpu_to_use is None: time.sleep(10) cmd_dict = commands.pop(0) i += 1 cmd_out_dir = cmd_dict[_OUT_ARG] folder_name = os.path.basename(cmd_out_dir) cmd_str = _args_to_cmd_str(cmd_dict) # Execute the program. print('Starting training run %d/%d -- "%s"' % (i+1, num_cmds, cmd_str)) job_name = 'job_%s' % folder_name if args.run_cluster and args.scheduler == 'lsf': # Schedule job. # FIXME the bsub module ignores the pathnames we set. Hence, all # output files are simply stored in the local directory. For now, we # will capture this when the run completed and move the file. job_error_file = os.path.join(cmd_out_dir, job_name + '.err') job_out_file = os.path.join(cmd_out_dir, job_name + '.out') sub = bsub(job_name, R=args.resources, n=1, W='%d:00' % args.num_hours, e=job_error_file, o=job_out_file, verbose=True) sub(cmd_str) jobs.append((sub, cmd_dict, folder_name, i, None, None)) elif args.run_cluster: assert args.scheduler == 'slurm' script_name = _write_slurm_script(args, cmd_str, folder_name) p = subprocess.Popen('sbatch --parsable %s' % script_name, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) p_out, p_err = p.communicate() if p.returncode != 0: warnings.warn('Job submission ended with return code %d - %s.' \ % (p.returncode, p_err.decode('utf-8'))) try: job_id = int(p_out.decode('utf-8').strip()) jobs.append((job_id, cmd_dict, folder_name, i, None, None)) except: traceback.print_exc(file=sys.stdout) warnings.warn('Could not assess whether run %d has been ' \ % (i+1) + 'submitted.') elif gpu_to_use == -1: # Start on CPU. # FIXME stdout and stderr is not logged and therefore can't be # written to file. p_cmd = subprocess.Popen(cmd_str, shell=True) # Wait for job to finish. p_cmd.communicate() jobs.append((p_cmd, cmd_dict, folder_name, i, None, None)) else: # Start job on local GPU. print('Job will be scheduled on GPU %d.' % gpu_to_use) p_cmd = subprocess.Popen( \ 'CUDA_VISIBLE_DEVICES=%d ' % (gpu_to_use) + cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Register start time of job, such that we can respect the warmup # time before sending the next job onto the GPU. gpu_ind = globals()['_VISIBLE_GPUS'].index(gpu_to_use) globals()['_LAST_JOBS_TS'][gpu_ind] = time.time() # We have to clear the stdout and stderr pipes regularly, as # they have limited size in linux and will stall the process # once they are full. q_out = Queue() q_err = Queue() t_out = Thread(target=_enqueue_pipe, args=(p_cmd.stdout, q_out)) t_err = Thread(target=_enqueue_pipe, args=(p_cmd.stderr, q_err)) # The threads should get killed ones a job ends. But if the hpsearch # ends, they also should get killed. t_out.daemon = True t_err.daemon = True t_out.start() t_err.start() job_io = (t_out, q_out, t_err, q_err) jobs.append((p_cmd, cmd_dict, folder_name, i, gpu_to_use, job_io)) # Wait for all jobs to complete. while len(jobs) > 0: time.sleep(10) jobs = _check_running(args, out_dir, results_file, jobs) def _enqueue_pipe(pipe, queue): # The code from this function and our solution for logging is inpired by the # following thread (accessed: 05/12/2020): # https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python # # NOTE Our copyright and license does not apply for this function. # We use this code WITHOUT ANY WARRANTIES. # # Instead, the code in this method is licensed under CC BY-SA 3.0: # https://creativecommons.org/licenses/by-sa/3.0/ # # The code stems from an answer by user "jfs": # https://stackoverflow.com/users/4279/jfs # and was edited by user "ankostis" # https://stackoverflow.com/users/548792/ankostis for l in iter(pipe.readline, b''): queue.put(l.decode('utf-8')) pipe.close() def _backup_commands(commands, out_dir): """Backup commands. This function will generate a bash script that resembles the order in which the individual commands have been executed. This is important, as the order might be random. This script is just another helper for the user to follow the execution order. Additionally, this file save the commands as pickle. This is a backup for future usage (i.e., maybe a continue search option will be build in at some point). Args: commands (list): List of command dictionaries. out_dir (str): Output directory. """ fn_script = os.path.join(out_dir, 'commands.sh') fn_plain_script = os.path.join(out_dir, 'commands_wo_dirs.sh') fn_pickle = os.path.join(out_dir, 'commands.pickle') with open(fn_pickle, 'wb') as f: pickle.dump(commands, f) with open(fn_script, 'w') as f: f.write('#!/bin/sh\n') f.write('# This script contains all %d commands that are planned ' \ % (len(commands)) + 'to be executed during this ' + 'hyperparameter search. The order of execution is preserved ' + 'in this script.\n\n') for cmd in commands: f.write('%s\n' % (_args_to_cmd_str(cmd))) with open(fn_plain_script, 'w') as f: f.write('#!/bin/sh\n') f.write('# This script contains all %d commands that are planned ' \ % (len(commands)) + 'to be executed during this ' + 'hyperparameter search. The order of execution is preserved ' + 'in this script.\n') f.write('# Note, for visual clarity, the output directories have ' + 'been omitted. See script "commands.sh" for the full ' + 'commands.\n') for cmd in commands: cmd_wo_dir = dict(cmd) cmd_wo_dir.pop(_OUT_ARG, None) f.write('%s\n' % (_args_to_cmd_str(cmd_wo_dir))) def _store_incomplete(commands, out_dir): """This function will pickle all command dictionaries of commands that have not been completed. This might be used to just continue an interrupted hyperparameter search. Args: commands: List of command dictionaries. out_dir: Output directory. """ incomplete = [] for i, cmd in enumerate(commands): if not _CMD_FINISHED[i]: incomplete.append(cmd) if len(incomplete) == 0: return warnings.warn('%d runs have not been completed.' % (len(incomplete))) fn_pickle = os.path.join(out_dir, 'not_completed.pickle') with open(fn_pickle, 'wb') as f: pickle.dump(incomplete, f) def _read_config(config_mod, require_perf_eval_handle=False, require_argparse_handle=False): """Parse the configuration module and check whether all attributes are set correctly. This function will set the corresponding global variables from this script appropriately. Args: config_mod: The implemented configuration template :mod:`hpsearch.hpsearch_postprocessing`. require_perf_eval_handle: Whether :attr:`_PERFORMANCE_EVAL_HANDLE` has to be specified in the config file. require_argparse_handle: Whether :attr:`_ARGPARSE_HANDLE` has to be specified in the config file. """ assert(hasattr(config_mod, '_SCRIPT_NAME')) assert(hasattr(config_mod, '_SUMMARY_FILENAME')) assert(hasattr(config_mod, '_SUMMARY_KEYWORDS') and \ 'finished' in config_mod._SUMMARY_KEYWORDS) globals()['_SCRIPT_NAME'] = config_mod._SCRIPT_NAME globals()['_SUMMARY_FILENAME'] = config_mod._SUMMARY_FILENAME globals()['_SUMMARY_KEYWORDS'] = config_mod._SUMMARY_KEYWORDS # Ensure downwards compatibility -- attributes did not exist previously. if hasattr(config_mod, '_OUT_ARG'): globals()['_OUT_ARG'] = config_mod._OUT_ARG if hasattr(config_mod, '_SUMMARY_PARSER_HANDLE') and \ config_mod._SUMMARY_PARSER_HANDLE is not None: globals()['_SUMMARY_PARSER_HANDLE'] = config_mod._SUMMARY_PARSER_HANDLE else: globals()['_SUMMARY_PARSER_HANDLE'] = _get_performance_summary if require_perf_eval_handle: assert(hasattr(config_mod, '_PERFORMANCE_EVAL_HANDLE') and \ config_mod._PERFORMANCE_EVAL_HANDLE is not None) globals()['_PERFORMANCE_EVAL_HANDLE'] = \ config_mod._PERFORMANCE_EVAL_HANDLE else: if not hasattr(config_mod, '_PERFORMANCE_EVAL_HANDLE') or \ config_mod._PERFORMANCE_EVAL_HANDLE is None: warnings.warn('Attribute "_PERFORMANCE_EVAL_HANDLE" not defined ' + 'in configuration file but might be required in ' + 'future releases.') if hasattr(config_mod, '_PERFORMANCE_KEY') and \ config_mod._PERFORMANCE_KEY is not None: globals()['_PERFORMANCE_KEY'] = config_mod._PERFORMANCE_KEY else: globals()['_PERFORMANCE_KEY'] = config_mod._SUMMARY_KEYWORDS[0] if hasattr(config_mod, '_PERFORMANCE_SORT_ASC'): globals()['_PERFORMANCE_SORT_ASC'] = config_mod._PERFORMANCE_SORT_ASC if require_argparse_handle: assert(hasattr(config_mod, '_ARGPARSE_HANDLE') and \ config_mod._ARGPARSE_HANDLE is not None) globals()['_ARGPARSE_HANDLE'] = config_mod._ARGPARSE_HANDLE if __name__ == '__main__': parser = argparse.ArgumentParser(description= \ 'hpsearch - Automatic Parameter Search -- ' + 'Note, that the search values are defined in the source code of the ' + 'accompanied configuration file!') parser.add_argument('--deterministic_search', action='store_true', help='If not selected, the order of configurations ' + 'is randomly picked.') parser.add_argument('--num_searches', type=int, metavar='N', default=-1, help='If not -1, then the number of configurations ' + 'that should be tested maximally. ' + 'Default: %(default)s.') parser.add_argument('--out_dir', type=str, default='./out/hyperparam_search', help='Where should all the output files be written ' + 'to? Note, a timestep is added to this path, ' + 'except "force_out_dir" is set. ' + 'Default: %(default)s.') parser.add_argument('--force_out_dir', action='store_true', help='If enabled, the search will be stored in the ' + 'exact location provided in "out_dir" and not a ' + 'subfolder.') parser.add_argument('--dont_force_new_dir', action='store_true', help='If enabled, the search can be stored in an '+ 'output folder that already exists. NOTE, ' + 'this option is not a merging option. Previous ' + 'hpsearch results will be overwritten if ' + 'existing.') parser.add_argument('--grid_module', type=str, default=_DEFAULT_GRID, help='Name of module to import from which to read ' + 'the hyperparameter search grid. The module ' + 'must define the two variables "grid" and ' + '"conditions". Default: %(default)s.') parser.add_argument('--run_cwd', type=str, default='.', help='The working directory in which runs are ' + 'executed (in case the run script resides at a ' + 'different folder than this hpsearch script. ' + 'All outputs of this script will be relative to ' + 'this working directory (if output folder is ' + 'defined as relative folder). ' + 'Default: "%(default)s".') parser.add_argument('--run_cluster', action='store_true', help='This option would produce jobs for a GPU ' + 'cluser running a job scheduler (see option ' + '"scheduler".') parser.add_argument('--scheduler', type=str, default='lsf', choices=['lsf', 'slurm'], help='The job scheduler used on the cluster. ' + 'Default: %(default)s.') parser.add_argument('--num_jobs', type=int, metavar='N', default=8, help='If "run_cluster" is activated, then this ' + 'option determines the maximum number of jobs ' + 'that can be submitted in parallel. ' + 'Default: %(default)s.') parser.add_argument('--num_hours', type=int, metavar='N', default=24, help='If "run_cluster" is activated, then this ' + 'option determines the maximum number of hours ' + 'a job may run on the cluster. ' + 'Default: %(default)s.') parser.add_argument('--resources', type=str, default='"rusage[mem=8000, ngpus_excl_p=1]"', help='If "run_cluster" is activated and "scheduler" ' + 'is "lsf", then this option determines the ' + 'resources assigned to job in the ' + 'hyperparameter search (option -R of bsub). ' + 'Default: %(default)s.') parser.add_argument('--slurm_mem', type=str, default='8G', help='If "run_cluster" is activated and "scheduler" ' + 'is "slurm", then this value will be passed as ' + 'argument "mem" of "sbatch". An empty string ' + 'means that "mem" will not be specified. ' + 'Default: %(default)s.') parser.add_argument('--slurm_gres', type=str, default='gpu:1', help='If "run_cluster" is activated and "scheduler" ' + 'is "slurm", then this value will be passed as ' + 'argument "gres" of "sbatch". An empty string ' + 'means that "gres" will not be specified. ' + 'Default: %(default)s.') parser.add_argument('--slurm_partition', type=str, default='', help='If "run_cluster" is activated and "scheduler" ' + 'is "slurm", then this value will be passed as ' + 'argument "partition" of "sbatch". An empty ' + 'string means that "partition" will not be ' + 'specified. Default: %(default)s.') parser.add_argument('--slurm_qos', type=str, default='', help='If "run_cluster" is activated and "scheduler" ' + 'is "slurm", then this value will be passed as ' + 'argument "qos" of "sbatch". An empty string ' + 'means that "qos" will not be specified. ' + 'Default: %(default)s.') parser.add_argument('--slurm_constraint', type=str, default='', help='If "run_cluster" is activated and "scheduler" ' + 'is "slurm", then this value will be passed as ' + 'argument "constraint" of "sbatch". An empty ' + 'string means that "constraint" will not be ' + 'specified. Default: %(default)s.') parser.add_argument('--visible_gpus', type=str, default='', help='If "run_cluster" is NOT activated, then this ' + 'option determines the CUDA devices visible to ' + 'the hyperparameter search. A string of comma ' + 'separated integers is expected. If the list is ' + 'empty, then all GPUs of the machine are used. ' + 'The relative memory usage is specified, i.e., ' + 'a number between 0 and 1. If "-1" is given, ' + 'the jobs will be executed sequentially and not ' + 'assigned to a particular GPU. ' + 'Default: %(default)s.') parser.add_argument('--allowed_load', type=float, default=0.5, help='If "run_cluster" is NOT activated, then this ' + 'option determines the maximum load a GPU may ' + 'have such that another process may start on ' + 'it. The relative load is specified, i.e., a ' + 'number between 0 and 1. Default: %(default)s.') parser.add_argument('--allowed_memory', type=float, default=0.5, help='If "run_cluster" is NOT activated, then this ' + 'option determines the maximum memory usage a ' + 'GPU may have such that another process may ' + 'start on it. Default: %(default)s.') parser.add_argument('--sim_startup_time', type=int, metavar='N', default=60, help='If "run_cluster" is NOT activated, then this ' + 'option determines the startup time of ' + 'simulations. If a job was assigned to a GPU, ' + 'then this time (in seconds) has to pass before ' + 'options "allowed_load" and "allowed_memory" ' + 'are checked to decide whether a new process ' + 'can be send to a GPU.Default: %(default)s.') parser.add_argument('--max_num_jobs_per_gpu', type=int, metavar='N', default=1, help='If "run_cluster" is NOT activated, then this ' + 'option determines the maximum number of jobs ' + 'per GPU that can be submitted in parallel. ' + 'Note, this script does not validate whether ' + 'other processes are already assigned to a GPU. ' + 'Default: %(default)s.') parser.add_argument('--random_seed', type=int, metavar='N', default=42, help='Random seed. Default: %(default)s.') # TODO build in "continue" option to finish incomplete commands. args = parser.parse_args() random.seed(args.random_seed) np.random.seed(args.random_seed) if args.run_cluster and args.scheduler == 'lsf': if bsub is None: raise ImportError('Package "bsub" is required for running a ' + 'hyperparameter-search on the cluster using the LSF job ' + 'scheduler. Please install it via ' + '"pip install -U --user bsub".') elif not args.run_cluster: if args.visible_gpus != '-1' and GPUtil is None: raise ImportError('Package "GPUtil" is required for this hyper-' + 'parameter search if option "--run_cluster" is not used. ' + 'please install via "pip install -U --user gputil" as ' + 'explained here: https://github.com/anderskm/gputil.') ### Get hyperparameter search grid from specified module. grid_module = importlib.import_module(args.grid_module) print('Loaded hp config from %s.' % grid_module.__file__) assert(hasattr(grid_module, 'grid') and hasattr(grid_module, 'conditions')) grid = grid_module.grid conditions = grid_module.conditions assert(len(grid) > 0) _read_config(grid_module) print('### Running Hyperparameter Search ...') if len(conditions) > 0: print('Note, %d conditions have been defined and will be enforced!' % \ len(conditions)) if args.run_cwd != '.': os.chdir(args.run_cwd) print('Current working directory: %s.' % os.path.abspath(os.curdir)) ### Output directory creation. # FIXME we should build in possibilities to merge with previous searches. hpsearch_dt = datetime.now() if args.force_out_dir: out_dir = args.out_dir else: out_dir = os.path.join(args.out_dir, 'search_' + hpsearch_dt.strftime('%Y-%m-%d_%H-%M-%S')) # Sometimes on the cluster, hpsearches get scheduled simultaneously and # this is a simple way to assert that they don't collide. while os.path.exists(out_dir): time.sleep(1) hpsearch_dt = datetime.now() out_dir = os.path.join(args.out_dir, 'search_' + hpsearch_dt.strftime('%Y-%m-%d_%H-%M-%S')) print('Results will be stored in %s.' % os.path.abspath(out_dir)) if not args.dont_force_new_dir and os.path.exists(out_dir): raise RuntimeError('Output directory %s already exists!' % out_dir) if not os.path.exists(out_dir): os.makedirs(out_dir) ### Backup hpsearch config. shutil.copyfile(grid_module.__file__, os.path.join(out_dir, os.path.basename(grid_module.__file__))) ### Build the grid. # We build a list of dictionaries with key value pairs. commands = _grid_to_commands(grid) # Ensure, that all conditions can be enforced. orig_conditions = conditions conditions = [] for i, cond in enumerate(orig_conditions): assert len(cond) == 2 and isinstance(cond[0], dict) \ and isinstance(cond[0], dict) valid = True for k in cond[0].keys(): if k not in grid.keys(): warnings.warn('Condition %d can not be enforced. ' % (i) + 'Key "%s" is not specified in grid -- %s.' % (k, str(cond))) valid = False if valid: conditions.append(cond) # Now, we have the commands according to the grid, but we still need to # enforce the conditions. # This list will keep track of the conditions each command is affected. # FIXME We enforce conditions sequentially. But it could be, that the user # specifies conflicting conditions. E.g., condition 2 modifies commands such # that condition 1 would fire again. for i, cond_tup in enumerate(conditions): cond, action = cond_tup cond_keys = list(cond.keys()) affected = [False] * len(commands) new_commands = [] for j, command in enumerate(commands): # Figure out, whether condition i is satisfied for command j. comm_keys = command.keys() key_satisfied = [False] * len(cond_keys) for l, cond_key in enumerate(cond_keys): if cond_key in comm_keys: cond_vals = cond[cond_key] if command[cond_key] in cond_vals: key_satisfied[l] = True if np.all(key_satisfied): affected[j] = True else: continue # Generate a set of replacement commands for command j, such that # condition i is satisfied. cmds = _grid_to_commands(action) for l, cmd in enumerate(cmds): for k in comm_keys: if k not in cmd.keys(): cmds[l][k] = command[k] new_commands.extend(cmds) # Remove all commands affected by this condition and insert the new # ones. old_cmds = commands commands = [] for j, cmd in enumerate(old_cmds): if not affected[j]: commands.append(cmd) commands.extend(new_commands) # Note, the way we enforced conditions above may result in dublicates. # We need to remove them now. old_cmds = [(hash(frozenset(cmd.items())), cmd) for cmd in commands] # Sort commands according to their hash value. old_cmds = sorted(old_cmds, key=lambda tup: tup[0]) commands = [] i = 0 while i < len(old_cmds): hash_i, cmd_i = old_cmds[i] # Check if current command has dublicate. if i < len(old_cmds)-1: hash_next, cmd_next = old_cmds[i+1] if hash_i == hash_next: warnings.warn('Command dublicate found! The following ' + 'commands have been identified as dublicates. The first ' + 'one will be removed.\n--- %s\n--- %s' \ % (_args_to_cmd_str(cmd_i), _args_to_cmd_str(cmd_next))) i += 1 continue commands.append(cmd_i) i += 1 ### Random shuffling of command execution order. if not args.deterministic_search: random.shuffle(commands) ### Consider the maximum number of commands we may execute. if args.num_searches != -1 and len(commands) > args.num_searches: print('Only %d of %d configurations will be tested!' % \ (args.num_searches, len(commands))) commands = commands[:args.num_searches] ### Print all commands to user to allow visual verification. print('\n### List of all commands. Please verify carefully. ###\n') for cmd in commands: print(_args_to_cmd_str(cmd)) print('\nThe %d command(s) above will be executed.' % len(commands)) _CMD_FINISHED = [False] * len(commands) ### Assign an output directory to each command. # We do this after the shuffling to make sure the folder are displayed in # their execution order. # We also do it after the printing above to avoid visual clutter. # Identifier of current hpsearch. Why do we add such a cryptic number to the # simulation output folders? We need the folder names of different # hpsearches to be different, as we use the folder names to name the job # files (.out and .err) that are written by the LSF Batch system on the # cluster. Those files are all stored in the same folder, even if coming # from different hpsearches. # FIXME There is for sure a better solution. hpsearch_ident = hpsearch_dt.strftime("%Y%m%d%H%M%S") num_cmds = len(commands) n_digits = int(np.floor(np.log10(num_cmds))) + 1 for i, cmd in enumerate(commands): assert _OUT_ARG not in cmd.keys() folder_name = 'sim_%s_%s' % (hpsearch_ident, str(i+1).zfill(n_digits)) cmd[_OUT_ARG] = os.path.join(out_dir, folder_name) # The list of command strings will be dumped into a file, such that the # user sees their order. _backup_commands(commands, out_dir) ### Hyperparameter Search # Where do we summarize the results? results_file = os.path.join(out_dir, 'search_results.csv') try: _run_cmds(args, commands, out_dir, results_file) except: traceback.print_exc(file=sys.stdout) warnings.warn('An error occurred during the hyperparameter search.') _store_incomplete(commands, out_dir) ### Sort CSV file according to performance key. try: csv_file_content = pandas.read_csv(results_file, sep=';') csv_file_content = csv_file_content.sort_values(_PERFORMANCE_KEY, ascending=_PERFORMANCE_SORT_ASC) csv_file_content.to_csv(results_file, sep=';', index=False) except: traceback.print_exc(file=sys.stdout) warnings.warn('No results have been gathered during this hpsearch.') print('### Running Hyperparameter Search ... Done')
trezor.py
import traceback import sys from typing import NamedTuple, Any from electrum_mona.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException from electrum_mona.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT from electrum_mona.bip32 import BIP32Node, convert_bip32_path_to_list_of_uint32 as parse_path from electrum_mona import constants from electrum_mona.i18n import _ from electrum_mona.plugin import Device from electrum_mona.transaction import deserialize, Transaction from electrum_mona.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey from electrum_mona.base_wizard import ScriptTypeNotSupported, HWD_SETUP_NEW_WALLET from electrum_mona.logging import get_logger from ..hw_wallet import HW_PluginBase from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data, LibraryFoundButUnusable, OutdatedHwFirmwareException) _logger = get_logger(__name__) try: import trezorlib import trezorlib.transport from trezorlib.transport.bridge import BridgeTransport, call_bridge from .clientbase import TrezorClientBase from trezorlib.messages import ( RecoveryDeviceType, HDNodeType, HDNodePathType, InputScriptType, OutputScriptType, MultisigRedeemScriptType, TxInputType, TxOutputType, TxOutputBinType, TransactionType, SignTx) RECOVERY_TYPE_SCRAMBLED_WORDS = RecoveryDeviceType.ScrambledWords RECOVERY_TYPE_MATRIX = RecoveryDeviceType.Matrix TREZORLIB = True except Exception as e: _logger.exception('error importing trezorlib') TREZORLIB = False RECOVERY_TYPE_SCRAMBLED_WORDS, RECOVERY_TYPE_MATRIX = range(2) # Trezor initialization methods TIM_NEW, TIM_RECOVER = range(2) TREZOR_PRODUCT_KEY = 'Trezor' class TrezorKeyStore(Hardware_KeyStore): hw_type = 'trezor' device = TREZOR_PRODUCT_KEY def get_derivation(self): return self.derivation def get_client(self, force_pair=True): return self.plugin.get_client(self, force_pair) def decrypt_message(self, sequence, message, password): raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device)) def sign_message(self, sequence, message, password): client = self.get_client() address_path = self.get_derivation() + "/%d/%d"%sequence msg_sig = client.sign_message(address_path, message) return msg_sig.signature def sign_transaction(self, tx, password): if tx.is_complete(): return # previous transactions used as inputs prev_tx = {} # path of the xpubs that are involved xpub_path = {} for txin in tx.inputs(): pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin) tx_hash = txin['prevout_hash'] if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin): raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device)) prev_tx[tx_hash] = txin['prev_tx'] for x_pubkey in x_pubkeys: if not is_xpubkey(x_pubkey): continue xpub, s = parse_xpubkey(x_pubkey) if xpub == self.get_master_public_key(): xpub_path[xpub] = self.get_derivation() self.plugin.sign_transaction(self, tx, prev_tx, xpub_path) class TrezorInitSettings(NamedTuple): word_count: int label: str pin_enabled: bool passphrase_enabled: bool recovery_type: Any = None no_backup: bool = False class TrezorPlugin(HW_PluginBase): # Derived classes provide: # # class-static variables: client_class, firmware_URL, handler_class, # libraries_available, libraries_URL, minimum_firmware, # wallet_class, types firmware_URL = 'https://wallet.trezor.io' libraries_URL = 'https://github.com/trezor/python-trezor' minimum_firmware = (1, 6, 0) keystore_class = TrezorKeyStore minimum_library = (0, 11, 0) maximum_library = (0, 12) SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh') DEVICE_IDS = (TREZOR_PRODUCT_KEY,) MAX_LABEL_LEN = 32 def __init__(self, parent, config, name): super().__init__(parent, config, name) self.libraries_available = self.check_libraries_available() if not self.libraries_available: return self.device_manager().register_enumerate_func(self.enumerate) def get_library_version(self): import trezorlib try: version = trezorlib.__version__ except Exception: version = 'unknown' if TREZORLIB: return version else: raise LibraryFoundButUnusable(library_version=version) def enumerate(self): # If there is a bridge, prefer that. # On Windows, the bridge runs as Admin (and Electrum usually does not), # so the bridge has better chances of finding devices. see #5420 # This also avoids duplicate entries. try: call_bridge("enumerate") except Exception: devices = trezorlib.transport.enumerate_devices() else: devices = BridgeTransport.enumerate() return [Device(path=d.get_path(), interface_number=-1, id_=d.get_path(), product_key=TREZOR_PRODUCT_KEY, usage_page=0, transport_ui_string=d.get_path()) for d in devices] def create_client(self, device, handler): try: self.logger.info(f"connecting to device at {device.path}") transport = trezorlib.transport.get_transport(device.path) except BaseException as e: self.logger.info(f"cannot connect at {device.path} {e}") return None if not transport: self.logger.info(f"cannot connect at {device.path}") return self.logger.info(f"connected to device at {device.path}") # note that this call can still raise! return TrezorClientBase(transport, handler, self) def get_client(self, keystore, force_pair=True): devmgr = self.device_manager() handler = keystore.handler with devmgr.hid_lock: client = devmgr.client_for_keystore(self, handler, keystore, force_pair) # returns the client for a given keystore. can use xpub if client: client.used() return client def get_coin_name(self): return "Testnet" if constants.net.TESTNET else "Monacoin" def initialize_device(self, device_id, wizard, handler): # Initialization method msg = _("Choose how you want to initialize your {}.\n\n" "The first two methods are secure as no secret information " "is entered into your computer.\n\n" "For the last two methods you input secrets on your keyboard " "and upload them to your {}, and so you should " "only do those on a computer you know to be trustworthy " "and free of malware." ).format(self.device, self.device) choices = [ # Must be short as QT doesn't word-wrap radio button text (TIM_NEW, _("Let the device generate a completely new seed randomly")), (TIM_RECOVER, _("Recover from a seed you have previously written down")), ] def f(method): import threading settings = self.request_trezor_init_settings(wizard, method, device_id) t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler)) t.setDaemon(True) t.start() exit_code = wizard.loop.exec_() if exit_code != 0: # this method (initialize_device) was called with the expectation # of leaving the device in an initialized state when finishing. # signal that this is not the case: raise UserCancelled() wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f) def _initialize_device_safe(self, settings, method, device_id, wizard, handler): exit_code = 0 try: self._initialize_device(settings, method, device_id, wizard, handler) except UserCancelled: exit_code = 1 except BaseException as e: self.logger.exception('') handler.show_error(repr(e)) exit_code = 1 finally: wizard.loop.exit(exit_code) def _initialize_device(self, settings: TrezorInitSettings, method, device_id, wizard, handler): if method == TIM_RECOVER and settings.recovery_type == RECOVERY_TYPE_SCRAMBLED_WORDS: handler.show_error(_( "You will be asked to enter 24 words regardless of your " "seed's actual length. If you enter a word incorrectly or " "misspell it, you cannot change it or go back - you will need " "to start again from the beginning.\n\nSo please enter " "the words carefully!"), blocking=True) devmgr = self.device_manager() client = devmgr.client_by_id(device_id) if not client: raise Exception(_("The device was disconnected.")) if method == TIM_NEW: strength_from_word_count = {12: 128, 18: 192, 24: 256} client.reset_device( strength=strength_from_word_count[settings.word_count], passphrase_protection=settings.passphrase_enabled, pin_protection=settings.pin_enabled, label=settings.label, no_backup=settings.no_backup) elif method == TIM_RECOVER: client.recover_device( recovery_type=settings.recovery_type, word_count=settings.word_count, passphrase_protection=settings.passphrase_enabled, pin_protection=settings.pin_enabled, label=settings.label) if settings.recovery_type == RECOVERY_TYPE_MATRIX: handler.close_matrix_dialog() else: raise RuntimeError("Unsupported recovery method") def _make_node_path(self, xpub, address_n): bip32node = BIP32Node.from_xkey(xpub) node = HDNodeType( depth=bip32node.depth, fingerprint=int.from_bytes(bip32node.fingerprint, 'big'), child_num=int.from_bytes(bip32node.child_number, 'big'), chain_code=bip32node.chaincode, public_key=bip32node.eckey.get_public_key_bytes(compressed=True), ) return HDNodePathType(node=node, address_n=address_n) def setup_device(self, device_info, wizard, purpose): devmgr = self.device_manager() device_id = device_info.device.id_ client = devmgr.client_by_id(device_id) if client is None: raise UserFacingException(_('Failed to create a client for this device.') + '\n' + _('Make sure it is in the correct state.')) if not client.is_uptodate(): msg = (_('Outdated {} firmware for device labelled {}. Please ' 'download the updated firmware from {}') .format(self.device, client.label(), self.firmware_URL)) raise OutdatedHwFirmwareException(msg) # fixme: we should use: client.handler = wizard client.handler = self.create_handler(wizard) if not device_info.initialized: self.initialize_device(device_id, wizard, client.handler) is_creating_wallet = purpose == HWD_SETUP_NEW_WALLET client.get_xpub('m', 'standard', creating=is_creating_wallet) client.used() def get_xpub(self, device_id, derivation, xtype, wizard): if xtype not in self.SUPPORTED_XTYPES: raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device)) devmgr = self.device_manager() client = devmgr.client_by_id(device_id) client.handler = wizard xpub = client.get_xpub(derivation, xtype) client.used() return xpub def get_trezor_input_script_type(self, electrum_txin_type: str): if electrum_txin_type in ('p2wpkh', 'p2wsh'): return InputScriptType.SPENDWITNESS if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'): return InputScriptType.SPENDP2SHWITNESS if electrum_txin_type in ('p2pkh', ): return InputScriptType.SPENDADDRESS if electrum_txin_type in ('p2sh', ): return InputScriptType.SPENDMULTISIG raise ValueError('unexpected txin type: {}'.format(electrum_txin_type)) def get_trezor_output_script_type(self, electrum_txin_type: str): if electrum_txin_type in ('p2wpkh', 'p2wsh'): return OutputScriptType.PAYTOWITNESS if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'): return OutputScriptType.PAYTOP2SHWITNESS if electrum_txin_type in ('p2pkh', ): return OutputScriptType.PAYTOADDRESS if electrum_txin_type in ('p2sh', ): return OutputScriptType.PAYTOMULTISIG raise ValueError('unexpected txin type: {}'.format(electrum_txin_type)) def sign_transaction(self, keystore, tx, prev_tx, xpub_path): prev_tx = { bfh(txhash): self.electrum_tx_to_txtype(tx, xpub_path) for txhash, tx in prev_tx.items() } client = self.get_client(keystore) inputs = self.tx_inputs(tx, xpub_path, True) outputs = self.tx_outputs(keystore.get_derivation(), tx) details = SignTx(lock_time=tx.locktime, version=tx.version) signatures, _ = client.sign_tx(self.get_coin_name(), inputs, outputs, details=details, prev_txes=prev_tx) signatures = [(bh2u(x) + '01') for x in signatures] tx.update_signatures(signatures) def show_address(self, wallet, address, keystore=None): if keystore is None: keystore = wallet.get_keystore() if not self.show_address_helper(wallet, address, keystore): return deriv_suffix = wallet.get_address_index(address) derivation = keystore.derivation address_path = "%s/%d/%d"%(derivation, *deriv_suffix) script_type = self.get_trezor_input_script_type(wallet.txin_type) # prepare multisig, if available: xpubs = wallet.get_master_public_keys() if len(xpubs) > 1: pubkeys = wallet.get_public_keys(address) # sort xpubs using the order of pubkeys sorted_pairs = sorted(zip(pubkeys, xpubs)) multisig = self._make_multisig( wallet.m, [(xpub, deriv_suffix) for _, xpub in sorted_pairs]) else: multisig = None client = self.get_client(keystore) client.show_address(address_path, script_type, multisig) def tx_inputs(self, tx, xpub_path, for_sig=False): inputs = [] for txin in tx.inputs(): txinputtype = TxInputType() if txin['type'] == 'coinbase': prev_hash = b"\x00"*32 prev_index = 0xffffffff # signed int -1 else: if for_sig: x_pubkeys = txin['x_pubkeys'] xpubs = [parse_xpubkey(x) for x in x_pubkeys] multisig = self._make_multisig(txin.get('num_sig'), xpubs, txin.get('signatures')) script_type = self.get_trezor_input_script_type(txin['type']) txinputtype = TxInputType( script_type=script_type, multisig=multisig) # find which key is mine for xpub, deriv in xpubs: if xpub in xpub_path: xpub_n = parse_path(xpub_path[xpub]) txinputtype.address_n = xpub_n + deriv break prev_hash = bfh(txin['prevout_hash']) prev_index = txin['prevout_n'] if 'value' in txin: txinputtype.amount = txin['value'] txinputtype.prev_hash = prev_hash txinputtype.prev_index = prev_index if txin.get('scriptSig') is not None: script_sig = bfh(txin['scriptSig']) txinputtype.script_sig = script_sig txinputtype.sequence = txin.get('sequence', 0xffffffff - 1) inputs.append(txinputtype) return inputs def _make_multisig(self, m, xpubs, signatures=None): if len(xpubs) == 1: return None pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs] if signatures is None: signatures = [b''] * len(pubkeys) elif len(signatures) != len(pubkeys): raise RuntimeError('Mismatched number of signatures') else: signatures = [bfh(x)[:-1] if x else b'' for x in signatures] return MultisigRedeemScriptType( pubkeys=pubkeys, signatures=signatures, m=m) def tx_outputs(self, derivation, tx: Transaction): def create_output_by_derivation(): script_type = self.get_trezor_output_script_type(info.script_type) deriv = parse_path("/%d/%d" % index) multisig = self._make_multisig(m, [(xpub, deriv) for xpub in xpubs]) txoutputtype = TxOutputType( multisig=multisig, amount=amount, address_n=parse_path(derivation + "/%d/%d" % index), script_type=script_type) return txoutputtype def create_output_by_address(): txoutputtype = TxOutputType() txoutputtype.amount = amount if _type == TYPE_SCRIPT: txoutputtype.script_type = OutputScriptType.PAYTOOPRETURN txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o) elif _type == TYPE_ADDRESS: txoutputtype.script_type = OutputScriptType.PAYTOADDRESS txoutputtype.address = address return txoutputtype outputs = [] has_change = False any_output_on_change_branch = is_any_tx_output_on_change_branch(tx) for o in tx.outputs(): _type, address, amount = o.type, o.address, o.value use_create_by_derivation = False info = tx.output_info.get(address) if info is not None and not has_change: index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig # prioritise hiding outputs on the 'change' branch from user # because no more than one change address allowed # note: ^ restriction can be removed once we require fw # that has https://github.com/trezor/trezor-mcu/pull/306 if info.is_change == any_output_on_change_branch: use_create_by_derivation = True has_change = True if use_create_by_derivation: txoutputtype = create_output_by_derivation() else: txoutputtype = create_output_by_address() outputs.append(txoutputtype) return outputs def electrum_tx_to_txtype(self, tx, xpub_path): t = TransactionType() if tx is None: # probably for segwit input and we don't need this prev txn return t d = deserialize(tx.raw) t.version = d['version'] t.lock_time = d['lockTime'] t.inputs = self.tx_inputs(tx, xpub_path) t.bin_outputs = [ TxOutputBinType(amount=vout['value'], script_pubkey=bfh(vout['scriptPubKey'])) for vout in d['outputs'] ] return t
test_carpark.py
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This test module contains the integration test for the weather skills.""" import io import os import shutil import signal import subprocess # nosec import sys import tempfile import threading import time import pytest from aea.cli import cli from ...common.click_testing import CliRunner from ...conftest import AUTHOR, CLI_LOG_OPTION def _read_tty(pid: subprocess.Popen): for line in io.TextIOWrapper(pid.stdout, encoding="utf-8"): print("stdout: " + line.replace("\n", "")) def _read_error(pid: subprocess.Popen): for line in io.TextIOWrapper(pid.stderr, encoding="utf-8"): print("stderr: " + line.replace("\n", "")) class TestCarPark: """Test that carpark skills work.""" @pytest.fixture(autouse=True) def _start_oef_node(self, network_node): """Start an oef node.""" @classmethod def setup_class(cls): """Set up the test class.""" cls.runner = CliRunner() cls.agent_name_one = "my_carpark_aea" cls.agent_name_two = "my_carpark_client_aea" cls.cwd = os.getcwd() cls.t = tempfile.mkdtemp() os.chdir(cls.t) def test_carpark(self, pytestconfig): """Run the weather skills sequence.""" if pytestconfig.getoption("ci"): pytest.skip("Skipping the test since it doesn't work in CI.") # add packages folder packages_src = os.path.join(self.cwd, "packages") packages_dst = os.path.join(self.t, "packages") shutil.copytree(packages_src, packages_dst) # Add scripts folder scripts_src = os.path.join(self.cwd, "scripts") scripts_dst = os.path.join(self.t, "scripts") shutil.copytree(scripts_src, scripts_dst) result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "init", "--local", "--author", AUTHOR], standalone_mode=False, ) assert result.exit_code == 0 # create agent one and agent two result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "create", "--local", self.agent_name_one], standalone_mode=False, ) assert result.exit_code == 0 result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "create", "--local", self.agent_name_two], standalone_mode=False, ) assert result.exit_code == 0 # Setup agent one agent_one_dir_path = os.path.join(self.t, self.agent_name_one) os.chdir(agent_one_dir_path) result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "add", "--local", "connection", "fetchai/oef:0.1.0"], standalone_mode=False, ) assert result.exit_code == 0 result = self.runner.invoke( cli, [ *CLI_LOG_OPTION, "add", "--local", "skill", "fetchai/carpark_detection:0.1.0", ], standalone_mode=False, ) assert result.exit_code == 0 result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "install"], standalone_mode=False ) assert result.exit_code == 0 # Load the skill yaml file and manually insert the things we need yaml_path = os.path.join( "vendor", "fetchai", "skills", "carpark_detection", "skill.yaml" ) file = open(yaml_path, mode="r") # read all lines at once whole_file = file.read() whole_file = whole_file.replace( "db_is_rel_to_cwd: true", "# db_is_rel_to_cwd: true" ) whole_file = whole_file.replace( "db_rel_dir: ../temp_files", "# db_rel_dir: ../temp_files" ) # close the file file.close() with open(yaml_path, "w") as f: f.write(whole_file) # Load the agent yaml file and manually insert the things we need (ledger APIs) file = open("aea-config.yaml", mode="r") # read all lines at once whole_file = file.read() # add in the ledger address find_text = "ledger_apis: {}" replace_text = """ledger_apis: fetchai: network: testnet""" whole_file = whole_file.replace(find_text, replace_text) # close the file file.close() with open("aea-config.yaml", "w") as f: f.write(whole_file) os.chdir(self.t) # Setup Agent two agent_two_dir_path = os.path.join(self.t, self.agent_name_two) os.chdir(agent_two_dir_path) result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "add", "--local", "connection", "fetchai/oef:0.1.0"], standalone_mode=False, ) assert result.exit_code == 0 result = self.runner.invoke( cli, [ *CLI_LOG_OPTION, "add", "--local", "skill", "fetchai/carpark_client:0.1.0", ], standalone_mode=False, ) assert result.exit_code == 0 result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "install"], standalone_mode=False ) assert result.exit_code == 0 # Load the agent yaml file and manually insert the things we need file = open("aea-config.yaml", mode="r") # read all lines at once whole_file = file.read() # add in the ledger address find_text = "ledger_apis: {}" replace_text = """ledger_apis: fetchai: network: testnet""" whole_file = whole_file.replace(find_text, replace_text) # close the file file.close() with open("aea-config.yaml", "w") as f: f.write(whole_file) # Generate the private keys result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "generate-key", "fetchai"], standalone_mode=False ) assert result.exit_code == 0 # Add the private key result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "add-key", "fetchai", "fet_private_key.txt"], standalone_mode=False, ) assert result.exit_code == 0 # Add some funds to the car park client result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "generate-wealth", "fetchai"], standalone_mode=False ) assert result.exit_code == 0 # Fire the sub-processes and the threads. try: os.chdir(agent_one_dir_path) process_one = subprocess.Popen( # nosec [ sys.executable, "-m", "aea.cli", "run", "--connections", "fetchai/oef:0.1.0", ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy(), ) os.chdir(agent_two_dir_path) process_two = subprocess.Popen( # nosec [ sys.executable, "-m", "aea.cli", "run", "--connections", "fetchai/oef:0.1.0", ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ.copy(), ) tty_read_thread = threading.Thread(target=_read_tty, args=(process_one,)) tty_read_thread.start() error_read_thread = threading.Thread( target=_read_error, args=(process_one,) ) error_read_thread.start() tty_read_thread = threading.Thread(target=_read_tty, args=(process_two,)) tty_read_thread.start() error_read_thread = threading.Thread( target=_read_error, args=(process_two,) ) error_read_thread.start() time.sleep(10) process_one.send_signal(signal.SIGINT) process_two.send_signal(signal.SIGINT) process_one.wait(timeout=10) process_two.wait(timeout=10) assert process_one.returncode == 0 assert process_two.returncode == 0 finally: poll_one = process_one.poll() if poll_one is None: process_one.terminate() process_one.wait(2) poll_two = process_two.poll() if poll_two is None: process_two.terminate() process_two.wait(2) tty_read_thread.join() error_read_thread.join() os.chdir(self.t) result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "delete", self.agent_name_one], standalone_mode=False ) assert result.exit_code == 0 result = self.runner.invoke( cli, [*CLI_LOG_OPTION, "delete", self.agent_name_two], standalone_mode=False ) assert result.exit_code == 0 @classmethod def teardown_class(cls): """Teardowm the test.""" os.chdir(cls.cwd) try: shutil.rmtree(cls.t) except (OSError, IOError): pass
__init__.py
# -*- coding: utf-8 -*- ''' Set up the Salt integration test suite ''' # Import Python libs from __future__ import absolute_import, print_function import os import re import sys import copy import json import time import stat import errno import signal import shutil import pprint import atexit import socket import logging import tempfile import threading import subprocess import multiprocessing from datetime import datetime, timedelta try: import pwd except ImportError: pass # Import salt tests support dirs from tests.support.paths import * # pylint: disable=wildcard-import from tests.support.processes import * # pylint: disable=wildcard-import from tests.support.unit import TestCase from tests.support.case import ShellTestCase from tests.support.parser import PNUM, print_header, SaltTestcaseParser from tests.support.helpers import requires_sshd_server, RedirectStdStreams from tests.support.paths import ScriptPathMixin from tests.support.mixins import CheckShellBinaryNameAndVersionMixin, ShellCaseCommonTestsMixin from tests.support.mixins import AdaptedConfigurationTestCaseMixin, SaltClientTestCaseMixin from tests.support.mixins import SaltMinionEventAssertsMixin, SaltReturnAssertsMixin from tests.support.runtests import RUNTIME_VARS # Import Salt libs import salt import salt.config import salt.minion import salt.runner import salt.output import salt.version import salt.utils.color import salt.utils.files import salt.utils.path import salt.utils.platform import salt.utils.process import salt.utils.stringutils import salt.log.setup as salt_log_setup from salt.utils.verify import verify_env from salt.utils.immutabletypes import freeze from salt.utils.nb_popen import NonBlockingPopen from salt.exceptions import SaltClientError try: import salt.master except ImportError: # Not required for raet tests pass # Import 3rd-party libs import yaml import msgpack from salt.ext import six from salt.ext.six.moves import cStringIO try: import salt.ext.six.moves.socketserver as socketserver except ImportError: import socketserver from tornado import gen from tornado import ioloop # Import salt tests support libs from tests.support.processes import SaltMaster, SaltMinion, SaltSyndic log = logging.getLogger(__name__) _RUNTESTS_PORTS = {} def get_unused_localhost_port(): ''' Return a random unused port on localhost ''' usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM) usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) usock.bind(('127.0.0.1', 0)) port = usock.getsockname()[1] if port in (54505, 54506, 64505, 64506, 64510, 64511, 64520, 64521): # These ports are hardcoded in the test configuration port = get_unused_localhost_port() usock.close() return port DARWIN = True if sys.platform.startswith('darwin') else False BSD = True if 'bsd' in sys.platform else False if DARWIN and port in _RUNTESTS_PORTS: port = get_unused_localhost_port() usock.close() return port _RUNTESTS_PORTS[port] = usock if DARWIN or BSD: usock.close() return port def close_open_sockets(sockets_dict): for port in list(sockets_dict): sock = sockets_dict.pop(port) sock.close() atexit.register(close_open_sockets, _RUNTESTS_PORTS) SALT_LOG_PORT = get_unused_localhost_port() class ThreadingMixIn(socketserver.ThreadingMixIn): daemon_threads = True class ThreadedSocketServer(ThreadingMixIn, socketserver.TCPServer): allow_reuse_address = True def server_activate(self): self.shutting_down = threading.Event() socketserver.TCPServer.server_activate(self) #super(ThreadedSocketServer, self).server_activate() def server_close(self): if hasattr(self, 'shutting_down'): self.shutting_down.set() socketserver.TCPServer.server_close(self) #super(ThreadedSocketServer, self).server_close() class SocketServerRequestHandler(socketserver.StreamRequestHandler): def handle(self): unpacker = msgpack.Unpacker(encoding='utf-8') while not self.server.shutting_down.is_set(): try: wire_bytes = self.request.recv(1024) if not wire_bytes: break unpacker.feed(wire_bytes) for record_dict in unpacker: record = logging.makeLogRecord(record_dict) logger = logging.getLogger(record.name) logger.handle(record) del record_dict except (EOFError, KeyboardInterrupt, SystemExit): break except socket.error as exc: try: if exc.errno == errno.WSAECONNRESET: # Connection reset on windows break except AttributeError: # We're not on windows pass log.exception(exc) except Exception as exc: log.exception(exc) class TestDaemon(object): ''' Set up the master and minion daemons, and run related cases ''' MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 120 def __init__(self, parser): self.parser = parser self.colors = salt.utils.color.get_colors(self.parser.options.no_colors is False) if salt.utils.platform.is_windows(): # There's no shell color support on windows... for key in self.colors: self.colors[key] = '' def __enter__(self): ''' Start a master and minion ''' # Setup the multiprocessing logging queue listener salt_log_setup.setup_multiprocessing_logging_listener( self.master_opts ) # Set up PATH to mockbin self._enter_mockbin() if self.parser.options.transport == 'zeromq': self.start_zeromq_daemons() elif self.parser.options.transport == 'raet': self.start_raet_daemons() elif self.parser.options.transport == 'tcp': self.start_tcp_daemons() self.minion_targets = set(['minion', 'sub_minion']) self.pre_setup_minions() self.setup_minions() if getattr(self.parser.options, 'ssh', False): self.prep_ssh() if self.parser.options.sysinfo: try: print_header( '~~~~~~~ Versions Report ', inline=True, width=getattr(self.parser.options, 'output_columns', PNUM) ) except TypeError: print_header('~~~~~~~ Versions Report ', inline=True) print('\n'.join(salt.version.versions_report())) try: print_header( '~~~~~~~ Minion Grains Information ', inline=True, width=getattr(self.parser.options, 'output_columns', PNUM) ) except TypeError: print_header('~~~~~~~ Minion Grains Information ', inline=True) grains = self.client.cmd('minion', 'grains.items') minion_opts = self.minion_opts.copy() minion_opts['color'] = self.parser.options.no_colors is False salt.output.display_output(grains, 'grains', minion_opts) try: print_header( '=', sep='=', inline=True, width=getattr(self.parser.options, 'output_columns', PNUM) ) except TypeError: print_header('', sep='=', inline=True) try: return self finally: self.post_setup_minions() def start_daemon(self, cls, opts, start_fun): def start(cls, opts, start_fun): salt.utils.process.appendproctitle('{0}-{1}'.format(self.__class__.__name__, cls.__name__)) daemon = cls(opts) getattr(daemon, start_fun)() process = multiprocessing.Process(target=start, args=(cls, opts, start_fun)) process.start() return process def start_zeromq_daemons(self): ''' Fire up the daemons used for zeromq tests ''' self.log_server = ThreadedSocketServer(('localhost', SALT_LOG_PORT), SocketServerRequestHandler) self.log_server_process = threading.Thread(target=self.log_server.serve_forever) self.log_server_process.daemon = True self.log_server_process.start() try: sys.stdout.write( ' * {LIGHT_YELLOW}Starting salt-master ... {ENDC}'.format(**self.colors) ) sys.stdout.flush() self.master_process = start_daemon( daemon_name='salt-master', daemon_id=self.master_opts['id'], daemon_log_prefix='salt-master/{}'.format(self.master_opts['id']), daemon_cli_script_name='master', daemon_config=self.master_opts, daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR, daemon_class=SaltMaster, bin_dir_path=SCRIPT_DIR, fail_hard=True, start_timeout=30) sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) sys.stdout.write( ' * {LIGHT_GREEN}Starting salt-master ... STARTED!\n{ENDC}'.format(**self.colors) ) sys.stdout.flush() except (RuntimeWarning, RuntimeError): sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) sys.stdout.write( ' * {LIGHT_RED}Starting salt-master ... FAILED!\n{ENDC}'.format(**self.colors) ) sys.stdout.flush() try: sys.stdout.write( ' * {LIGHT_YELLOW}Starting salt-minion ... {ENDC}'.format(**self.colors) ) sys.stdout.flush() self.minion_process = start_daemon( daemon_name='salt-minion', daemon_id=self.master_opts['id'], daemon_log_prefix='salt-minion/{}'.format(self.minion_opts['id']), daemon_cli_script_name='minion', daemon_config=self.minion_opts, daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR, daemon_class=SaltMinion, bin_dir_path=SCRIPT_DIR, fail_hard=True, start_timeout=30) sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) sys.stdout.write( ' * {LIGHT_GREEN}Starting salt-minion ... STARTED!\n{ENDC}'.format(**self.colors) ) sys.stdout.flush() except (RuntimeWarning, RuntimeError): sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) sys.stdout.write( ' * {LIGHT_RED}Starting salt-minion ... FAILED!\n{ENDC}'.format(**self.colors) ) sys.stdout.flush() try: sys.stdout.write( ' * {LIGHT_YELLOW}Starting sub salt-minion ... {ENDC}'.format(**self.colors) ) sys.stdout.flush() self.sub_minion_process = start_daemon( daemon_name='sub salt-minion', daemon_id=self.master_opts['id'], daemon_log_prefix='sub-salt-minion/{}'.format(self.sub_minion_opts['id']), daemon_cli_script_name='minion', daemon_config=self.sub_minion_opts, daemon_config_dir=RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, daemon_class=SaltMinion, bin_dir_path=SCRIPT_DIR, fail_hard=True, start_timeout=30) sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) sys.stdout.write( ' * {LIGHT_GREEN}Starting sub salt-minion ... STARTED!\n{ENDC}'.format(**self.colors) ) sys.stdout.flush() except (RuntimeWarning, RuntimeError): sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) sys.stdout.write( ' * {LIGHT_RED}Starting sub salt-minion ... FAILED!\n{ENDC}'.format(**self.colors) ) sys.stdout.flush() try: sys.stdout.write( ' * {LIGHT_YELLOW}Starting syndic salt-master ... {ENDC}'.format(**self.colors) ) sys.stdout.flush() self.smaster_process = start_daemon( daemon_name='salt-smaster', daemon_id=self.syndic_master_opts['id'], daemon_log_prefix='salt-smaster/{}'.format(self.syndic_master_opts['id']), daemon_cli_script_name='master', daemon_config=self.syndic_master_opts, daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, daemon_class=SaltMaster, bin_dir_path=SCRIPT_DIR, fail_hard=True, start_timeout=30) sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) sys.stdout.write( ' * {LIGHT_GREEN}Starting syndic salt-master ... STARTED!\n{ENDC}'.format(**self.colors) ) sys.stdout.flush() except (RuntimeWarning, RuntimeError): sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) sys.stdout.write( ' * {LIGHT_RED}Starting syndic salt-master ... FAILED!\n{ENDC}'.format(**self.colors) ) sys.stdout.flush() try: sys.stdout.write( ' * {LIGHT_YELLOW}Starting salt-syndic ... {ENDC}'.format(**self.colors) ) sys.stdout.flush() self.syndic_process = start_daemon( daemon_name='salt-syndic', daemon_id=self.syndic_opts['id'], daemon_log_prefix='salt-syndic/{}'.format(self.syndic_opts['id']), daemon_cli_script_name='syndic', daemon_config=self.syndic_opts, daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, daemon_class=SaltSyndic, bin_dir_path=SCRIPT_DIR, fail_hard=True, start_timeout=30) sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) sys.stdout.write( ' * {LIGHT_GREEN}Starting salt-syndic ... STARTED!\n{ENDC}'.format(**self.colors) ) sys.stdout.flush() except (RuntimeWarning, RuntimeError): sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) sys.stdout.write( ' * {LIGHT_RED}Starting salt-syndic ... FAILED!\n{ENDC}'.format(**self.colors) ) sys.stdout.flush() if self.parser.options.proxy: try: sys.stdout.write( ' * {LIGHT_YELLOW}Starting salt-proxy ... {ENDC}'.format(**self.colors) ) sys.stdout.flush() self.proxy_process = start_daemon( daemon_name='salt-proxy', daemon_id=self.master_opts['id'], daemon_log_prefix='salt-proxy/{}'.format(self.proxy_opts['id']), daemon_cli_script_name='proxy', daemon_config=self.proxy_opts, daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR, daemon_class=SaltProxy, bin_dir_path=SCRIPT_DIR, fail_hard=True, start_timeout=30) sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) sys.stdout.write( ' * {LIGHT_GREEN}Starting salt-proxy ... STARTED!\n{ENDC}'.format(**self.colors) ) sys.stdout.flush() except (RuntimeWarning, RuntimeError): sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) sys.stdout.write( ' * {LIGHT_RED}Starting salt-proxy ... FAILED!\n{ENDC}'.format(**self.colors) ) sys.stdout.flush() def start_raet_daemons(self): ''' Fire up the raet daemons! ''' import salt.daemons.flo self.master_process = self.start_daemon(salt.daemons.flo.IofloMaster, self.master_opts, 'start') self.minion_process = self.start_daemon(salt.daemons.flo.IofloMinion, self.minion_opts, 'tune_in') self.sub_minion_process = self.start_daemon(salt.daemons.flo.IofloMinion, self.sub_minion_opts, 'tune_in') # Wait for the daemons to all spin up time.sleep(5) # self.smaster_process = self.start_daemon(salt.daemons.flo.IofloMaster, # self.syndic_master_opts, # 'start') # no raet syndic daemon yet start_tcp_daemons = start_zeromq_daemons def prep_ssh(self): ''' Generate keys and start an ssh daemon on an alternate port ''' sys.stdout.write( ' * {LIGHT_GREEN}Starting {0} ... {ENDC}'.format( 'SSH server', **self.colors ) ) keygen = salt.utils.path.which('ssh-keygen') sshd = salt.utils.path.which('sshd') if not (keygen and sshd): print('WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!') return if not os.path.exists(RUNTIME_VARS.TMP_CONF_DIR): os.makedirs(RUNTIME_VARS.TMP_CONF_DIR) # Generate client key pub_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub') priv_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test') if os.path.exists(pub_key_test_file): os.remove(pub_key_test_file) if os.path.exists(priv_key_test_file): os.remove(priv_key_test_file) keygen_process = subprocess.Popen( [keygen, '-t', 'ecdsa', '-b', '521', '-C', '"$(whoami)@$(hostname)-$(date -I)"', '-f', 'key_test', '-P', ''], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, cwd=RUNTIME_VARS.TMP_CONF_DIR ) _, keygen_err = keygen_process.communicate() if keygen_err: print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_err))) sshd_config_path = os.path.join(FILES, 'conf/_ssh/sshd_config') shutil.copy(sshd_config_path, RUNTIME_VARS.TMP_CONF_DIR) auth_key_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub') # Generate server key server_key_dir = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'server') if not os.path.exists(server_key_dir): os.makedirs(server_key_dir) server_dsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key') server_dsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key.pub') server_ecdsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key') server_ecdsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key.pub') server_ed25519_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ed25519_key') server_ed25519_pub_key_file = os.path.join(server_key_dir, 'ssh_host.ed25519_key.pub') for server_key_file in (server_dsa_priv_key_file, server_dsa_pub_key_file, server_ecdsa_priv_key_file, server_ecdsa_pub_key_file, server_ed25519_priv_key_file, server_ed25519_pub_key_file): if os.path.exists(server_key_file): os.remove(server_key_file) keygen_process_dsa = subprocess.Popen( [keygen, '-t', 'dsa', '-b', '1024', '-C', '"$(whoami)@$(hostname)-$(date -I)"', '-f', 'ssh_host_dsa_key', '-P', ''], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, cwd=server_key_dir ) _, keygen_dsa_err = keygen_process_dsa.communicate() if keygen_dsa_err: print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_dsa_err))) keygen_process_ecdsa = subprocess.Popen( [keygen, '-t', 'ecdsa', '-b', '521', '-C', '"$(whoami)@$(hostname)-$(date -I)"', '-f', 'ssh_host_ecdsa_key', '-P', ''], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, cwd=server_key_dir ) _, keygen_escda_err = keygen_process_ecdsa.communicate() if keygen_escda_err: print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_escda_err))) keygen_process_ed25519 = subprocess.Popen( [keygen, '-t', 'ed25519', '-b', '521', '-C', '"$(whoami)@$(hostname)-$(date -I)"', '-f', 'ssh_host_ed25519_key', '-P', ''], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, cwd=server_key_dir ) _, keygen_ed25519_err = keygen_process_ed25519.communicate() if keygen_ed25519_err: print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_ed25519_err))) with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd_config'), 'a') as ssh_config: ssh_config.write('AuthorizedKeysFile {0}\n'.format(auth_key_file)) if not keygen_dsa_err: ssh_config.write('HostKey {0}\n'.format(server_dsa_priv_key_file)) if not keygen_escda_err: ssh_config.write('HostKey {0}\n'.format(server_ecdsa_priv_key_file)) if not keygen_ed25519_err: ssh_config.write('HostKey {0}\n'.format(server_ed25519_priv_key_file)) self.sshd_pidfile = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd.pid') self.sshd_process = subprocess.Popen( [sshd, '-f', 'sshd_config', '-oPidFile={0}'.format(self.sshd_pidfile)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, cwd=RUNTIME_VARS.TMP_CONF_DIR ) _, sshd_err = self.sshd_process.communicate() if sshd_err: print('sshd had errors on startup: {0}'.format(salt.utils.stringutils.to_str(sshd_err))) else: os.environ['SSH_DAEMON_RUNNING'] = 'True' roster_path = os.path.join(FILES, 'conf/_ssh/roster') shutil.copy(roster_path, RUNTIME_VARS.TMP_CONF_DIR) with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'roster'), 'a') as roster: roster.write(' user: {0}\n'.format(RUNTIME_VARS.RUNNING_TESTS_USER)) roster.write(' priv: {0}/{1}'.format(RUNTIME_VARS.TMP_CONF_DIR, 'key_test')) sys.stdout.write( ' {LIGHT_GREEN}STARTED!\n{ENDC}'.format( **self.colors ) ) @classmethod def config(cls, role): ''' Return a configuration for a master/minion/syndic. Currently these roles are: * master * minion * syndic * syndic_master * sub_minion * proxy ''' return RUNTIME_VARS.RUNTIME_CONFIGS[role] @classmethod def config_location(cls): return RUNTIME_VARS.TMP_CONF_DIR @property def client(self): ''' Return a local client which will be used for example to ping and sync the test minions. This client is defined as a class attribute because its creation needs to be deferred to a latter stage. If created it on `__enter__` like it previously was, it would not receive the master events. ''' if 'runtime_client' not in RUNTIME_VARS.RUNTIME_CONFIGS: RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client( mopts=self.master_opts ) return RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client'] @classmethod def transplant_configs(cls, transport='zeromq'): if os.path.isdir(RUNTIME_VARS.TMP_CONF_DIR): shutil.rmtree(RUNTIME_VARS.TMP_CONF_DIR) os.makedirs(RUNTIME_VARS.TMP_CONF_DIR) os.makedirs(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR) os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR) os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR) print(' * Transplanting configuration files to \'{0}\''.format(RUNTIME_VARS.TMP_CONF_DIR)) tests_known_hosts_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'salt_ssh_known_hosts') with salt.utils.files.fopen(tests_known_hosts_file, 'w') as known_hosts: known_hosts.write('') # This master connects to syndic_master via a syndic master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'master')) master_opts['known_hosts_file'] = tests_known_hosts_file master_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache') master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER master_opts['config_dir'] = RUNTIME_VARS.TMP_CONF_DIR master_opts['root_dir'] = os.path.join(TMP, 'rootdir') master_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki', 'master') # This is the syndic for master # Let's start with a copy of the syndic master configuration syndic_opts = copy.deepcopy(master_opts) # Let's update with the syndic configuration syndic_opts.update(salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic'))) syndic_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache') syndic_opts['config_dir'] = RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR # This minion connects to master minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'minion')) minion_opts['cachedir'] = os.path.join(TMP, 'rootdir', 'cache') minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER minion_opts['config_dir'] = RUNTIME_VARS.TMP_CONF_DIR minion_opts['root_dir'] = os.path.join(TMP, 'rootdir') minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir', 'pki') minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts') minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases') # This sub_minion also connects to master sub_minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'sub_minion')) sub_minion_opts['cachedir'] = os.path.join(TMP, 'rootdir-sub-minion', 'cache') sub_minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER sub_minion_opts['config_dir'] = RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion') sub_minion_opts['pki_dir'] = os.path.join(TMP, 'rootdir-sub-minion', 'pki', 'minion') sub_minion_opts['hosts.file'] = os.path.join(TMP, 'rootdir', 'hosts') sub_minion_opts['aliases.file'] = os.path.join(TMP, 'rootdir', 'aliases') # This is the master of masters syndic_master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic_master')) syndic_master_opts['cachedir'] = os.path.join(TMP, 'rootdir-syndic-master', 'cache') syndic_master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER syndic_master_opts['config_dir'] = RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master') syndic_master_opts['pki_dir'] = os.path.join(TMP, 'rootdir-syndic-master', 'pki', 'master') # This proxy connects to master proxy_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'proxy')) proxy_opts['cachedir'] = os.path.join(TMP, 'rootdir-proxy', 'cache') # proxy_opts['user'] = running_tests_user proxy_opts['config_dir'] = RUNTIME_VARS.TMP_CONF_DIR proxy_opts['root_dir'] = os.path.join(TMP, 'rootdir-proxy') proxy_opts['pki_dir'] = os.path.join(TMP, 'rootdir-proxy', 'pki') proxy_opts['hosts.file'] = os.path.join(TMP, 'rootdir-proxy', 'hosts') proxy_opts['aliases.file'] = os.path.join(TMP, 'rootdir-proxy', 'aliases') if transport == 'raet': master_opts['transport'] = 'raet' master_opts['raet_port'] = 64506 minion_opts['transport'] = 'raet' minion_opts['raet_port'] = 64510 sub_minion_opts['transport'] = 'raet' sub_minion_opts['raet_port'] = 64520 # syndic_master_opts['transport'] = 'raet' if transport == 'tcp': master_opts['transport'] = 'tcp' minion_opts['transport'] = 'tcp' sub_minion_opts['transport'] = 'tcp' syndic_master_opts['transport'] = 'tcp' proxy_opts['transport'] = 'tcp' # Set up config options that require internal data master_opts['pillar_roots'] = syndic_master_opts['pillar_roots'] = { 'base': [ RUNTIME_VARS.TMP_PILLAR_TREE, os.path.join(FILES, 'pillar', 'base'), ] } master_opts['file_roots'] = syndic_master_opts['file_roots'] = { 'base': [ os.path.join(FILES, 'file', 'base'), # Let's support runtime created files that can be used like: # salt://my-temp-file.txt RUNTIME_VARS.TMP_STATE_TREE ], # Alternate root to test __env__ choices 'prod': [ os.path.join(FILES, 'file', 'prod'), RUNTIME_VARS.TMP_PRODENV_STATE_TREE ] } master_opts.setdefault('reactor', []).append( { 'salt/minion/*/start': [ os.path.join(FILES, 'reactor-sync-minion.sls') ], } ) for opts_dict in (master_opts, syndic_master_opts): if 'ext_pillar' not in opts_dict: opts_dict['ext_pillar'] = [] if salt.utils.platform.is_windows(): opts_dict['ext_pillar'].append( {'cmd_yaml': 'type {0}'.format(os.path.join(FILES, 'ext.yaml'))}) else: opts_dict['ext_pillar'].append( {'cmd_yaml': 'cat {0}'.format(os.path.join(FILES, 'ext.yaml'))}) for opts_dict in (master_opts, syndic_master_opts): # We need to copy the extension modules into the new master root_dir or # it will be prefixed by it new_extension_modules_path = os.path.join(opts_dict['root_dir'], 'extension_modules') if not os.path.exists(new_extension_modules_path): shutil.copytree( os.path.join( INTEGRATION_TEST_DIR, 'files', 'extension_modules' ), new_extension_modules_path ) opts_dict['extension_modules'] = os.path.join(opts_dict['root_dir'], 'extension_modules') # Point the config values to the correct temporary paths for name in ('hosts', 'aliases'): optname = '{0}.file'.format(name) optname_path = os.path.join(TMP, name) master_opts[optname] = optname_path minion_opts[optname] = optname_path sub_minion_opts[optname] = optname_path syndic_opts[optname] = optname_path syndic_master_opts[optname] = optname_path proxy_opts[optname] = optname_path master_opts['runtests_conn_check_port'] = get_unused_localhost_port() minion_opts['runtests_conn_check_port'] = get_unused_localhost_port() sub_minion_opts['runtests_conn_check_port'] = get_unused_localhost_port() syndic_opts['runtests_conn_check_port'] = get_unused_localhost_port() syndic_master_opts['runtests_conn_check_port'] = get_unused_localhost_port() proxy_opts['runtests_conn_check_port'] = get_unused_localhost_port() for conf in (master_opts, minion_opts, sub_minion_opts, syndic_opts, syndic_master_opts, proxy_opts): if 'engines' not in conf: conf['engines'] = [] conf['engines'].append({'salt_runtests': {}}) if 'engines_dirs' not in conf: conf['engines_dirs'] = [] conf['engines_dirs'].insert(0, ENGINES_DIR) if 'log_handlers_dirs' not in conf: conf['log_handlers_dirs'] = [] conf['log_handlers_dirs'].insert(0, LOG_HANDLERS_DIR) conf['runtests_log_port'] = SALT_LOG_PORT # ----- Transcribe Configuration ----------------------------------------------------------------------------> for entry in os.listdir(RUNTIME_VARS.CONF_DIR): if entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'): # These have runtime computed values and will be handled # differently continue entry_path = os.path.join(RUNTIME_VARS.CONF_DIR, entry) if os.path.isfile(entry_path): shutil.copy( entry_path, os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry) ) elif os.path.isdir(entry_path): shutil.copytree( entry_path, os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry) ) for entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'): computed_config = copy.deepcopy(locals()['{0}_opts'.format(entry)]) with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry), 'w') as fp_: fp_.write(yaml.dump(computed_config, default_flow_style=False)) sub_minion_computed_config = copy.deepcopy(sub_minion_opts) with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'), 'w') as wfh: wfh.write( yaml.dump(sub_minion_computed_config, default_flow_style=False) ) shutil.copyfile(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'master')) syndic_master_computed_config = copy.deepcopy(syndic_master_opts) with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'), 'w') as wfh: wfh.write( yaml.dump(syndic_master_computed_config, default_flow_style=False) ) syndic_computed_config = copy.deepcopy(syndic_opts) with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion'), 'w') as wfh: wfh.write( yaml.dump(syndic_computed_config, default_flow_style=False) ) shutil.copyfile(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'master')) # <---- Transcribe Configuration ----------------------------------------------------------------------------- # ----- Verify Environment ----------------------------------------------------------------------------------> master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master')) minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'minion')) syndic_opts = salt.config.syndic_config( os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion'), ) sub_minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion')) syndic_master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master')) proxy_opts = salt.config.proxy_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'proxy')) RUNTIME_VARS.RUNTIME_CONFIGS['master'] = freeze(master_opts) RUNTIME_VARS.RUNTIME_CONFIGS['minion'] = freeze(minion_opts) RUNTIME_VARS.RUNTIME_CONFIGS['syndic'] = freeze(syndic_opts) RUNTIME_VARS.RUNTIME_CONFIGS['sub_minion'] = freeze(sub_minion_opts) RUNTIME_VARS.RUNTIME_CONFIGS['syndic_master'] = freeze(syndic_master_opts) RUNTIME_VARS.RUNTIME_CONFIGS['proxy'] = freeze(proxy_opts) verify_env([os.path.join(master_opts['pki_dir'], 'minions'), os.path.join(master_opts['pki_dir'], 'minions_pre'), os.path.join(master_opts['pki_dir'], 'minions_rejected'), os.path.join(master_opts['pki_dir'], 'minions_denied'), os.path.join(master_opts['cachedir'], 'jobs'), os.path.join(master_opts['cachedir'], 'raet'), os.path.join(master_opts['root_dir'], 'cache', 'tokens'), os.path.join(syndic_master_opts['pki_dir'], 'minions'), os.path.join(syndic_master_opts['pki_dir'], 'minions_pre'), os.path.join(syndic_master_opts['pki_dir'], 'minions_rejected'), os.path.join(syndic_master_opts['cachedir'], 'jobs'), os.path.join(syndic_master_opts['cachedir'], 'raet'), os.path.join(syndic_master_opts['root_dir'], 'cache', 'tokens'), os.path.join(master_opts['pki_dir'], 'accepted'), os.path.join(master_opts['pki_dir'], 'rejected'), os.path.join(master_opts['pki_dir'], 'pending'), os.path.join(syndic_master_opts['pki_dir'], 'accepted'), os.path.join(syndic_master_opts['pki_dir'], 'rejected'), os.path.join(syndic_master_opts['pki_dir'], 'pending'), os.path.join(syndic_master_opts['cachedir'], 'raet'), os.path.join(minion_opts['pki_dir'], 'accepted'), os.path.join(minion_opts['pki_dir'], 'rejected'), os.path.join(minion_opts['pki_dir'], 'pending'), os.path.join(minion_opts['cachedir'], 'raet'), os.path.join(sub_minion_opts['pki_dir'], 'accepted'), os.path.join(sub_minion_opts['pki_dir'], 'rejected'), os.path.join(sub_minion_opts['pki_dir'], 'pending'), os.path.join(sub_minion_opts['cachedir'], 'raet'), os.path.dirname(master_opts['log_file']), minion_opts['extension_modules'], sub_minion_opts['extension_modules'], sub_minion_opts['pki_dir'], master_opts['sock_dir'], syndic_master_opts['sock_dir'], sub_minion_opts['sock_dir'], minion_opts['sock_dir'], RUNTIME_VARS.TMP_STATE_TREE, RUNTIME_VARS.TMP_PILLAR_TREE, RUNTIME_VARS.TMP_PRODENV_STATE_TREE, TMP, ], RUNTIME_VARS.RUNNING_TESTS_USER, root_dir=master_opts['root_dir'], ) cls.master_opts = master_opts cls.minion_opts = minion_opts # cls.proxy_opts = proxy_opts cls.sub_minion_opts = sub_minion_opts cls.syndic_opts = syndic_opts cls.syndic_master_opts = syndic_master_opts cls.proxy_opts = proxy_opts # <---- Verify Environment ----------------------------------------------------------------------------------- def __exit__(self, type, value, traceback): ''' Kill the minion and master processes ''' self.sub_minion_process.terminate() self.minion_process.terminate() if hasattr(self, 'proxy_process'): self.proxy_process.terminate() self.master_process.terminate() try: self.syndic_process.terminate() except AttributeError: pass try: self.smaster_process.terminate() except AttributeError: pass #salt.utils.process.clean_proc(self.sub_minion_process, wait_for_kill=50) #self.sub_minion_process.join() #salt.utils.process.clean_proc(self.minion_process, wait_for_kill=50) #self.minion_process.join() #salt.utils.process.clean_proc(self.master_process, wait_for_kill=50) #self.master_process.join() #try: # salt.utils.process.clean_proc(self.syndic_process, wait_for_kill=50) # self.syndic_process.join() #except AttributeError: # pass #try: # salt.utils.process.clean_proc(self.smaster_process, wait_for_kill=50) # self.smaster_process.join() #except AttributeError: # pass self.log_server.server_close() self.log_server.shutdown() self._exit_mockbin() self._exit_ssh() self.log_server_process.join() # Shutdown the multiprocessing logging queue listener salt_log_setup.shutdown_multiprocessing_logging() salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True) def pre_setup_minions(self): ''' Subclass this method for additional minion setups. ''' def setup_minions(self): ''' Minions setup routines ''' def post_setup_minions(self): ''' Subclass this method to execute code after the minions have been setup ''' def _enter_mockbin(self): path = os.environ.get('PATH', '') path_items = path.split(os.pathsep) if MOCKBIN not in path_items: path_items.insert(0, MOCKBIN) os.environ['PATH'] = os.pathsep.join(path_items) def _exit_ssh(self): if hasattr(self, 'sshd_process'): try: self.sshd_process.kill() except OSError as exc: if exc.errno != 3: raise with salt.utils.files.fopen(self.sshd_pidfile) as fhr: try: os.kill(int(fhr.read()), signal.SIGKILL) except OSError as exc: if exc.errno != 3: raise def _exit_mockbin(self): path = os.environ.get('PATH', '') path_items = path.split(os.pathsep) try: path_items.remove(MOCKBIN) except ValueError: pass os.environ['PATH'] = os.pathsep.join(path_items) @classmethod def clean(cls): ''' Clean out the tmp files ''' def remove_readonly(func, path, excinfo): # Give full permissions to owner os.chmod(path, stat.S_IRWXU) func(path) for dirname in (TMP, RUNTIME_VARS.TMP_STATE_TREE, RUNTIME_VARS.TMP_PILLAR_TREE, RUNTIME_VARS.TMP_PRODENV_STATE_TREE): if os.path.isdir(dirname): shutil.rmtree(dirname, onerror=remove_readonly) def wait_for_jid(self, targets, jid, timeout=120): time.sleep(1) # Allow some time for minions to accept jobs now = datetime.now() expire = now + timedelta(seconds=timeout) job_finished = False while now <= expire: running = self.__client_job_running(targets, jid) sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) if not running and job_finished is False: # Let's not have false positives and wait one more seconds job_finished = True elif not running and job_finished is True: return True elif running and job_finished is True: job_finished = False if job_finished is False: sys.stdout.write( ' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format( '{0}'.format(expire - now).rsplit('.', 1)[0], ', '.join(running), **self.colors ) ) sys.stdout.flush() time.sleep(1) now = datetime.now() else: # pylint: disable=W0120 sys.stdout.write( '\n {LIGHT_RED}*{ENDC} ERROR: Failed to get information ' 'back\n'.format(**self.colors) ) sys.stdout.flush() return False def __client_job_running(self, targets, jid): running = self.client.cmd( list(targets), 'saltutil.running', tgt_type='list' ) return [ k for (k, v) in six.iteritems(running) if v and v[0]['jid'] == jid ] def wait_for_minion_connections(self, targets, timeout): salt.utils.process.appendproctitle('WaitForMinionConnections') sys.stdout.write( ' {LIGHT_BLUE}*{ENDC} Waiting at most {0} for minions({1}) to ' 'connect back\n'.format( (timeout > 60 and timedelta(seconds=timeout) or '{0} secs'.format(timeout)), ', '.join(targets), **self.colors ) ) sys.stdout.flush() expected_connections = set(targets) now = datetime.now() expire = now + timedelta(seconds=timeout) while now <= expire: sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) sys.stdout.write( ' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format( '{0}'.format(expire - now).rsplit('.', 1)[0], ', '.join(expected_connections), **self.colors ) ) sys.stdout.flush() try: responses = self.client.cmd( list(expected_connections), 'test.ping', tgt_type='list', ) # we'll get this exception if the master process hasn't finished starting yet except SaltClientError: time.sleep(0.1) now = datetime.now() continue for target in responses: if target not in expected_connections: # Someone(minion) else "listening"? continue expected_connections.remove(target) sys.stdout.write( '\r{0}\r'.format( ' ' * getattr(self.parser.options, 'output_columns', PNUM) ) ) sys.stdout.write( ' {LIGHT_GREEN}*{ENDC} {0} connected.\n'.format( target, **self.colors ) ) sys.stdout.flush() if not expected_connections: return time.sleep(1) now = datetime.now() else: # pylint: disable=W0120 print( '\n {LIGHT_RED}*{ENDC} WARNING: Minions failed to connect ' 'back. Tests requiring them WILL fail'.format(**self.colors) ) try: print_header( '=', sep='=', inline=True, width=getattr(self.parser.options, 'output_columns', PNUM) ) except TypeError: print_header('=', sep='=', inline=True) raise SystemExit() def sync_minion_modules_(self, modules_kind, targets, timeout=None): if not timeout: timeout = 120 # Let's sync all connected minions print( ' {LIGHT_BLUE}*{ENDC} Syncing minion\'s {1} ' '(saltutil.sync_{1})'.format( ', '.join(targets), modules_kind, **self.colors ) ) syncing = set(targets) jid_info = self.client.run_job( list(targets), 'saltutil.sync_{0}'.format(modules_kind), tgt_type='list', timeout=999999999999999, ) if self.wait_for_jid(targets, jid_info['jid'], timeout) is False: print( ' {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. ' 'Tests requiring these {0} WILL fail'.format( modules_kind, **self.colors) ) raise SystemExit() while syncing: rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1) if rdata: for name, output in six.iteritems(rdata): if not output['ret']: # Already synced!? syncing.remove(name) continue if isinstance(output['ret'], six.string_types): # An errors has occurred print( ' {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: ' '{1}'.format( name, output['ret'], modules_kind, **self.colors) ) return False print( ' {LIGHT_GREEN}*{ENDC} Synced {0} {2}: ' '{1}'.format( name, ', '.join(output['ret']), modules_kind, **self.colors ) ) # Synced! try: syncing.remove(name) except KeyError: print( ' {LIGHT_RED}*{ENDC} {0} already synced??? ' '{1}'.format(name, output, **self.colors) ) return True def sync_minion_states(self, targets, timeout=None): salt.utils.process.appendproctitle('SyncMinionStates') self.sync_minion_modules_('states', targets, timeout=timeout) def sync_minion_modules(self, targets, timeout=None): salt.utils.process.appendproctitle('SyncMinionModules') self.sync_minion_modules_('modules', targets, timeout=timeout) def sync_minion_grains(self, targets, timeout=None): salt.utils.process.appendproctitle('SyncMinionGrains') self.sync_minion_modules_('grains', targets, timeout=timeout)
_socketcan.py
# Copyright (c) 2019 UAVCAN Consortium # This software is distributed under the terms of the MIT License. # Author: Pavel Kirienko <pavel@uavcan.org> import enum import time import errno import typing import socket import struct import select import asyncio import logging import warnings import threading import contextlib import pyuavcan.transport from pyuavcan.transport import Timestamp from pyuavcan.transport.can.media import Media, Envelope, FilterConfiguration, FrameFormat from pyuavcan.transport.can.media import DataFrame # Disable unused ignore warning for this file only because there appears to be no other way to make MyPy # accept this file both on Windows and GNU/Linux. # mypy: warn_unused_ignores=False _logger = logging.getLogger(__name__) class SocketCANMedia(Media): """ This media implementation provides a simple interface for the standard Linux SocketCAN media layer. If you are testing with a virtual CAN bus and you need CAN FD, you may need to enable it manually (https://stackoverflow.com/questions/36568167/can-fd-support-for-virtual-can-vcan-on-socketcan); otherwise, you may observe errno 90 "Message too long". Configuration example:: ip link set vcan0 mtu 72 SocketCAN documentation: https://www.kernel.org/doc/Documentation/networking/can.txt """ def __init__(self, iface_name: str, mtu: int, loop: typing.Optional[asyncio.AbstractEventLoop] = None) -> None: """ CAN Classic/FD is selected automatically based on the MTU. It is not possible to use CAN FD with MTU of 8 bytes. :param iface_name: E.g., ``can0``. :param mtu: The maximum data field size in bytes. CAN FD is used if this value > 8, Classic CAN otherwise. This value must belong to Media.VALID_MTU_SET. :param loop: Deprecated. """ self._mtu = int(mtu) if self._mtu not in self.VALID_MTU_SET: raise ValueError(f"Invalid MTU: {self._mtu} not in {self.VALID_MTU_SET}") if loop: warnings.warn("The loop argument is deprecated", DeprecationWarning) self._iface_name = str(iface_name) self._is_fd = self._mtu > _NativeFrameDataCapacity.CAN_CLASSIC self._native_frame_data_capacity = int( { False: _NativeFrameDataCapacity.CAN_CLASSIC, True: _NativeFrameDataCapacity.CAN_FD, }[self._is_fd] ) self._native_frame_size = _FRAME_HEADER_STRUCT.size + self._native_frame_data_capacity self._sock = _make_socket(iface_name, can_fd=self._is_fd) self._ctl_main, self._ctl_worker = socket.socketpair() # This is used for controlling the worker thread. self._closed = False self._maybe_thread: typing.Optional[threading.Thread] = None self._loopback_enabled = False self._ancillary_data_buffer_size = socket.CMSG_SPACE(_TIMEVAL_STRUCT.size) # Used for recvmsg() super().__init__() @property def interface_name(self) -> str: return self._iface_name @property def mtu(self) -> int: return self._mtu @property def number_of_acceptance_filters(self) -> int: """ 512 for SocketCAN. - https://github.com/torvalds/linux/blob/9c7db5004280767566e91a33445bf93aa479ef02/net/can/af_can.c#L327-L348 - https://github.com/torvalds/linux/blob/54dee406374ce8adb352c48e175176247cb8db7c/include/uapi/linux/can.h#L200 """ return 512 def start(self, handler: Media.ReceivedFramesHandler, no_automatic_retransmission: bool) -> None: if self._maybe_thread is None: self._maybe_thread = threading.Thread( target=self._thread_function, name=str(self), args=(handler, asyncio.get_event_loop()), daemon=True ) self._maybe_thread.start() if no_automatic_retransmission: _logger.info("%s non-automatic retransmission is not supported", self) else: raise RuntimeError("The RX frame handler is already set up") def configure_acceptance_filters(self, configuration: typing.Sequence[FilterConfiguration]) -> None: if self._closed: raise pyuavcan.transport.ResourceClosedError(repr(self)) _logger.info( "%s FIXME: acceptance filter configuration is not yet implemented; please submit patches! " "Requested configuration: %s", self, ", ".join(map(str, configuration)), ) async def send(self, frames: typing.Iterable[Envelope], monotonic_deadline: float) -> int: num_sent = 0 for f in frames: if self._closed: raise pyuavcan.transport.ResourceClosedError(repr(self)) self._set_loopback_enabled(f.loopback) try: loop = asyncio.get_running_loop() await asyncio.wait_for( loop.sock_sendall(self._sock, self._compile_native_frame(f.frame)), timeout=monotonic_deadline - loop.time(), ) except OSError as err: if err.errno == errno.EINVAL and self._is_fd: raise pyuavcan.transport.InvalidMediaConfigurationError( "Invalid socketcan configuration: " "the device probably doesn't support CAN-FD. " "Try setting MTU to 8 (Classic CAN)" ) from err raise err except asyncio.TimeoutError: break else: num_sent += 1 return num_sent def close(self) -> None: try: self._closed = True if self._ctl_main.fileno() >= 0: # Ignore if already closed. self._ctl_main.send(b"stop") # The actual data is irrelevant, we just need it to unblock the select(). if self._maybe_thread: self._maybe_thread.join(timeout=_SELECT_TIMEOUT) self._maybe_thread = None finally: self._sock.close() # These are expected to be idempotent. self._ctl_worker.close() self._ctl_main.close() def _thread_function(self, handler: Media.ReceivedFramesHandler, loop: asyncio.AbstractEventLoop) -> None: def handler_wrapper(frs: typing.Sequence[typing.Tuple[Timestamp, Envelope]]) -> None: try: if not self._closed: # Don't call after closure to prevent race conditions and use-after-close. handler(frs) except Exception as exc: _logger.exception("%s: Unhandled exception in the receive handler: %s; lost frames: %s", self, exc, frs) while not self._closed: try: ( read_ready, _, _, ) = select.select((self._sock, self._ctl_worker), (), (), _SELECT_TIMEOUT) ts_mono_ns = time.monotonic_ns() if self._sock in read_ready: frames: typing.List[typing.Tuple[Timestamp, Envelope]] = [] try: while True: frames.append(self._read_frame(ts_mono_ns)) except OSError as ex: if ex.errno != errno.EAGAIN: raise loop.call_soon_threadsafe(handler_wrapper, frames) if self._ctl_worker in read_ready: if self._ctl_worker.recv(1): # pragma: no branch break except Exception as ex: # pragma: no cover if self._sock.fileno() < 0 or self._ctl_worker.fileno() < 0 or self._ctl_main.fileno() < 0: self._closed = True _logger.exception("%s thread failure: %s", self, ex) time.sleep(1) # Is this an adequate failure management strategy? self._closed = True _logger.debug("%s thread is about to exit", self) def _read_frame(self, ts_mono_ns: int) -> typing.Tuple[Timestamp, Envelope]: while True: data, ancdata, msg_flags, _addr = self._sock.recvmsg( # type: ignore self._native_frame_size, self._ancillary_data_buffer_size ) assert msg_flags & socket.MSG_TRUNC == 0, "The data buffer is not large enough" assert msg_flags & socket.MSG_CTRUNC == 0, "The ancillary data buffer is not large enough" loopback = bool(msg_flags & socket.MSG_CONFIRM) ts_system_ns = 0 for cmsg_level, cmsg_type, cmsg_data in ancdata: if cmsg_level == socket.SOL_SOCKET and cmsg_type == _SO_TIMESTAMP: sec, usec = _TIMEVAL_STRUCT.unpack(cmsg_data) ts_system_ns = (sec * 1_000_000 + usec) * 1000 else: assert False, f"Unexpected ancillary data: {cmsg_level}, {cmsg_type}, {cmsg_data!r}" assert ts_system_ns > 0, "Missing the timestamp; does the driver support timestamping?" timestamp = Timestamp(system_ns=ts_system_ns, monotonic_ns=ts_mono_ns) out = SocketCANMedia._parse_native_frame(data) if out is not None: return timestamp, Envelope(out, loopback=loopback) def _compile_native_frame(self, source: DataFrame) -> bytes: flags = _CANFD_BRS if self._is_fd else 0 ident = source.identifier | (_CAN_EFF_FLAG if source.format == FrameFormat.EXTENDED else 0) header = _FRAME_HEADER_STRUCT.pack(ident, len(source.data), flags) out = header + source.data.ljust(self._native_frame_data_capacity, b"\x00") assert len(out) == self._native_frame_size return out @staticmethod def _parse_native_frame(source: bytes) -> typing.Optional[DataFrame]: header_size = _FRAME_HEADER_STRUCT.size ident_raw, data_length, _flags = _FRAME_HEADER_STRUCT.unpack(source[:header_size]) if (ident_raw & _CAN_RTR_FLAG) or (ident_raw & _CAN_ERR_FLAG): # Unsupported format, ignore silently _logger.debug("Unsupported CAN frame dropped; raw SocketCAN ID is %08x", ident_raw) return None frame_format = FrameFormat.EXTENDED if ident_raw & _CAN_EFF_FLAG else FrameFormat.BASE data = source[header_size : header_size + data_length] assert len(data) == data_length ident = ident_raw & _CAN_EFF_MASK return DataFrame(frame_format, ident, bytearray(data)) def _set_loopback_enabled(self, enable: bool) -> None: if enable != self._loopback_enabled: self._sock.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_RECV_OWN_MSGS, int(enable)) # type: ignore self._loopback_enabled = enable @staticmethod def list_available_interface_names() -> typing.Iterable[str]: import re import subprocess try: proc = subprocess.run("ip link show", check=True, timeout=1, text=True, shell=True, capture_output=True) return re.findall(r"\d+?: ([a-z0-9]+?): <[^>]*UP[^>]*>.*\n *link/can", proc.stdout) except Exception as ex: _logger.debug( "Could not scrape the output of `ip link show`, using the fallback method: %s", ex, exc_info=True ) with open("/proc/net/dev") as f: # pylint: disable=unspecified-encoding out = [line.split(":")[0].strip() for line in f if ":" in line and "can" in line] return sorted(out, key=lambda x: "can" in x, reverse=True) class _NativeFrameDataCapacity(enum.IntEnum): CAN_CLASSIC = 8 CAN_FD = 64 _SELECT_TIMEOUT = 1.0 # struct can_frame { # canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ # __u8 can_dlc; /* data length code: 0 .. 8 */ # __u8 data[8] __attribute__((aligned(8))); # }; # struct canfd_frame { # canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ # __u8 len; /* frame payload length in byte */ # __u8 flags; /* additional flags for CAN FD */ # __u8 __res0; /* reserved / padding */ # __u8 __res1; /* reserved / padding */ # __u8 data[CANFD_MAX_DLEN] __attribute__((aligned(8))); # }; _FRAME_HEADER_STRUCT = struct.Struct("=IBB2x") # Using standard size because the native definition relies on stdint.h _TIMEVAL_STRUCT = struct.Struct("@Ll") # Using native size because the native definition uses plain integers # From the Linux kernel; not exposed via the Python's socket module _SO_TIMESTAMP = 29 _CANFD_BRS = 1 _CAN_EFF_FLAG = 0x80000000 _CAN_RTR_FLAG = 0x40000000 _CAN_ERR_FLAG = 0x20000000 _CAN_EFF_MASK = 0x1FFFFFFF def _make_socket(iface_name: str, can_fd: bool) -> socket.socket: s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) # type: ignore try: s.bind((iface_name,)) s.setsockopt(socket.SOL_SOCKET, _SO_TIMESTAMP, 1) # timestamping if can_fd: s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FD_FRAMES, 1) # type: ignore s.setblocking(False) if 0 != s.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR): raise OSError("Could not configure the socket: getsockopt(SOL_SOCKET, SO_ERROR) != 0") except BaseException: with contextlib.suppress(Exception): s.close() raise return s
tls_server_openssl.py
from __future__ import print_function import sys import os if not ('..' in sys.path or os.path.realpath('..') in sys.path): sys.path.append(os.path.realpath('..')) if not ('.' in sys.path or os.path.realpath('.') in sys.path): sys.path.append(os.path.realpath('.')) from net_common import * from xre_common import * from sslmasterkey import get_ssl_master_key """ This is a python implementation of the XFiles Remote Explorer (XRE) server-side protocol. The TLS channel is established using python's openssl native bindings. The TLS session master secret for end-to-end verification is extracted by looking up in the native C structures (see sslmasterkey.py; with this feature enabled, it works on any well known OS - Windows,OSX,Linux,BSD - with Python 3 and OpenSSL 1.1 bindings; commenting the relevant code lines should make it work also with Python 2.7, and with OpenSSL 1.0 bindings) """ def xre_server_session_wrapper(conn): client_host, client_port = conn.getpeername() print("Serving client:", client_host, " at port:", client_port) print("Ciphersuite:", conn.cipher()) from hashlib import sha256 sha_ = sha256(get_ssl_master_key(conn)) print("SHA256 of this session's master secret:\n", sha_.hexdigest()) xre_server_session(conn) if __name__ == '__main__': bindsocket = socket.socket() bindsocket.bind(('0.0.0.0', 11111)) bindsocket.listen(5) print('XRE server started') while True: newsocket, fromaddr = bindsocket.accept() connstream = ssl.wrap_socket(newsocket, server_side=True, ssl_version=ssl.PROTOCOL_TLSv1_2, certfile="dummycrt.pem", keyfile="dummykey.pem") t = threading.Thread(target=xre_server_session_wrapper, args=(connstream,)) t.setDaemon(True) t.start()
test_manager.py
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import multiprocessing import os import pathlib import random import socket import sys import threading import unittest from datetime import datetime, timedelta from tempfile import TemporaryDirectory from unittest import mock from unittest.mock import MagicMock, PropertyMock import pytest from freezegun import freeze_time from airflow.configuration import conf from airflow.dag_processing.manager import ( DagFileProcessorAgent, DagFileProcessorManager, DagFileStat, DagParsingSignal, DagParsingStat, ) from airflow.dag_processing.processor import DagFileProcessorProcess from airflow.jobs.local_task_job import LocalTaskJob as LJ from airflow.models import DagBag, DagModel, TaskInstance as TI, errors from airflow.models.serialized_dag import SerializedDagModel from airflow.models.taskinstance import SimpleTaskInstance from airflow.utils import timezone from airflow.utils.callback_requests import CallbackRequest, TaskCallbackRequest from airflow.utils.net import get_hostname from airflow.utils.session import create_session from airflow.utils.state import State from tests.core.test_logging_config import SETTINGS_FILE_VALID, settings_context from tests.test_utils.config import conf_vars from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags TEST_DAG_FOLDER = pathlib.Path(__file__).parent.parent / 'dags' DEFAULT_DATE = timezone.datetime(2016, 1, 1) class FakeDagFileProcessorRunner(DagFileProcessorProcess): # This fake processor will return the zombies it received in constructor # as its processing result w/o actually parsing anything. def __init__(self, file_path, pickle_dags, dag_ids, callbacks): super().__init__(file_path, pickle_dags, dag_ids, callbacks) # We need a "real" selectable handle for waitable_handle to work readable, writable = multiprocessing.Pipe(duplex=False) writable.send('abc') writable.close() self._waitable_handle = readable self._result = 0, 0 def start(self): pass @property def start_time(self): return DEFAULT_DATE @property def pid(self): return 1234 @property def done(self): return True @property def result(self): return self._result @staticmethod def _create_process(file_path, callback_requests, dag_ids, pickle_dags): return FakeDagFileProcessorRunner( file_path, pickle_dags, dag_ids, callback_requests, ) @property def waitable_handle(self): return self._waitable_handle class TestDagFileProcessorManager: def setup_method(self): clear_db_runs() def run_processor_manager_one_loop(self, manager, parent_pipe): if not manager._async_mode: parent_pipe.send(DagParsingSignal.AGENT_RUN_ONCE) results = [] while True: manager._run_parsing_loop() while parent_pipe.poll(timeout=0.01): obj = parent_pipe.recv() if not isinstance(obj, DagParsingStat): results.append(obj) elif obj.done: return results raise RuntimeError("Shouldn't get here - nothing to read, but manager not finished!") @conf_vars({('core', 'load_examples'): 'False'}) def test_remove_file_clears_import_error(self, tmpdir): filename_to_parse = tmpdir / 'temp_dag.py' # Generate original import error with open(filename_to_parse, 'w') as file_to_parse: file_to_parse.writelines('an invalid airflow DAG') child_pipe, parent_pipe = multiprocessing.Pipe() async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn') manager = DagFileProcessorManager( dag_directory=tmpdir, max_runs=1, processor_timeout=timedelta.max, signal_conn=child_pipe, dag_ids=[], pickle_dags=False, async_mode=async_mode, ) with create_session() as session: self.run_processor_manager_one_loop(manager, parent_pipe) import_errors = session.query(errors.ImportError).all() assert len(import_errors) == 1 filename_to_parse.remove() # Rerun the scheduler once the dag file has been removed self.run_processor_manager_one_loop(manager, parent_pipe) import_errors = session.query(errors.ImportError).all() assert len(import_errors) == 0 session.rollback() child_pipe.close() parent_pipe.close() @conf_vars({('core', 'load_examples'): 'False'}) def test_max_runs_when_no_files(self): child_pipe, parent_pipe = multiprocessing.Pipe() with TemporaryDirectory(prefix="empty-airflow-dags-") as dags_folder: async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn') manager = DagFileProcessorManager( dag_directory=dags_folder, max_runs=1, processor_timeout=timedelta.max, signal_conn=child_pipe, dag_ids=[], pickle_dags=False, async_mode=async_mode, ) self.run_processor_manager_one_loop(manager, parent_pipe) child_pipe.close() parent_pipe.close() @pytest.mark.backend("mysql", "postgres") def test_start_new_processes_with_same_filepath(self): """ Test that when a processor already exist with a filepath, a new processor won't be created with that filepath. The filepath will just be removed from the list. """ manager = DagFileProcessorManager( dag_directory='directory', max_runs=1, processor_timeout=timedelta.max, signal_conn=MagicMock(), dag_ids=[], pickle_dags=False, async_mode=True, ) file_1 = 'file_1.py' file_2 = 'file_2.py' file_3 = 'file_3.py' manager._file_path_queue = [file_1, file_2, file_3] # Mock that only one processor exists. This processor runs with 'file_1' manager._processors[file_1] = MagicMock() # Start New Processes manager.start_new_processes() # Because of the config: '[scheduler] parsing_processes = 2' # verify that only one extra process is created # and since a processor with 'file_1' already exists, # even though it is first in '_file_path_queue' # a new processor is created with 'file_2' and not 'file_1'. assert file_1 in manager._processors.keys() assert file_2 in manager._processors.keys() assert [file_3] == manager._file_path_queue def test_set_file_paths_when_processor_file_path_not_in_new_file_paths(self): manager = DagFileProcessorManager( dag_directory='directory', max_runs=1, processor_timeout=timedelta.max, signal_conn=MagicMock(), dag_ids=[], pickle_dags=False, async_mode=True, ) mock_processor = MagicMock() mock_processor.stop.side_effect = AttributeError('DagFileProcessor object has no attribute stop') mock_processor.terminate.side_effect = None manager._processors['missing_file.txt'] = mock_processor manager._file_stats['missing_file.txt'] = DagFileStat(0, 0, None, None, 0) manager.set_file_paths(['abc.txt']) assert manager._processors == {} def test_set_file_paths_when_processor_file_path_is_in_new_file_paths(self): manager = DagFileProcessorManager( dag_directory='directory', max_runs=1, processor_timeout=timedelta.max, signal_conn=MagicMock(), dag_ids=[], pickle_dags=False, async_mode=True, ) mock_processor = MagicMock() mock_processor.stop.side_effect = AttributeError('DagFileProcessor object has no attribute stop') mock_processor.terminate.side_effect = None manager._processors['abc.txt'] = mock_processor manager.set_file_paths(['abc.txt']) assert manager._processors == {'abc.txt': mock_processor} @conf_vars({("scheduler", "file_parsing_sort_mode"): "alphabetical"}) @mock.patch("zipfile.is_zipfile", return_value=True) @mock.patch("airflow.utils.file.might_contain_dag", return_value=True) @mock.patch("airflow.utils.file.find_path_from_directory", return_value=True) @mock.patch("airflow.utils.file.os.path.isfile", return_value=True) def test_file_paths_in_queue_sorted_alphabetically( self, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile ): """Test dag files are sorted alphabetically""" dag_files = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"] mock_find_path.return_value = dag_files manager = DagFileProcessorManager( dag_directory='directory', max_runs=1, processor_timeout=timedelta.max, signal_conn=MagicMock(), dag_ids=[], pickle_dags=False, async_mode=True, ) manager.set_file_paths(dag_files) assert manager._file_path_queue == [] manager.prepare_file_path_queue() assert manager._file_path_queue == ['file_1.py', 'file_2.py', 'file_3.py', 'file_4.py'] @conf_vars({("scheduler", "file_parsing_sort_mode"): "random_seeded_by_host"}) @mock.patch("zipfile.is_zipfile", return_value=True) @mock.patch("airflow.utils.file.might_contain_dag", return_value=True) @mock.patch("airflow.utils.file.find_path_from_directory", return_value=True) @mock.patch("airflow.utils.file.os.path.isfile", return_value=True) def test_file_paths_in_queue_sorted_random_seeded_by_host( self, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile ): """Test files are randomly sorted and seeded by host name""" dag_files = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"] mock_find_path.return_value = dag_files manager = DagFileProcessorManager( dag_directory='directory', max_runs=1, processor_timeout=timedelta.max, signal_conn=MagicMock(), dag_ids=[], pickle_dags=False, async_mode=True, ) manager.set_file_paths(dag_files) assert manager._file_path_queue == [] manager.prepare_file_path_queue() expected_order = dag_files random.Random(get_hostname()).shuffle(expected_order) assert manager._file_path_queue == expected_order # Verify running it again produces same order manager._file_paths = [] manager.prepare_file_path_queue() assert manager._file_path_queue == expected_order @conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"}) @mock.patch("zipfile.is_zipfile", return_value=True) @mock.patch("airflow.utils.file.might_contain_dag", return_value=True) @mock.patch("airflow.utils.file.find_path_from_directory", return_value=True) @mock.patch("airflow.utils.file.os.path.isfile", return_value=True) @mock.patch("airflow.utils.file.os.path.getmtime") def test_file_paths_in_queue_sorted_by_modified_time( self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile ): """Test files are sorted by modified time""" paths_with_mtime = {"file_3.py": 3.0, "file_2.py": 2.0, "file_4.py": 5.0, "file_1.py": 4.0} dag_files = list(paths_with_mtime.keys()) mock_getmtime.side_effect = list(paths_with_mtime.values()) mock_find_path.return_value = dag_files manager = DagFileProcessorManager( dag_directory='directory', max_runs=1, processor_timeout=timedelta.max, signal_conn=MagicMock(), dag_ids=[], pickle_dags=False, async_mode=True, ) manager.set_file_paths(dag_files) assert manager._file_path_queue == [] manager.prepare_file_path_queue() assert manager._file_path_queue == ['file_4.py', 'file_1.py', 'file_3.py', 'file_2.py'] @conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"}) @mock.patch("zipfile.is_zipfile", return_value=True) @mock.patch("airflow.utils.file.might_contain_dag", return_value=True) @mock.patch("airflow.utils.file.find_path_from_directory", return_value=True) @mock.patch("airflow.utils.file.os.path.isfile", return_value=True) @mock.patch("airflow.utils.file.os.path.getmtime") def test_recently_modified_file_is_parsed_with_mtime_mode( self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile ): """ Test recently updated files are processed even if min_file_process_interval is not reached """ freezed_base_time = timezone.datetime(2020, 1, 5, 0, 0, 0) initial_file_1_mtime = (freezed_base_time - timedelta(minutes=5)).timestamp() dag_files = ["file_1.py"] mock_getmtime.side_effect = [initial_file_1_mtime] mock_find_path.return_value = dag_files manager = DagFileProcessorManager( dag_directory='directory', max_runs=3, processor_timeout=timedelta.max, signal_conn=MagicMock(), dag_ids=[], pickle_dags=False, async_mode=True, ) # let's say the DAG was just parsed 2 seconds before the Freezed time last_finish_time = freezed_base_time - timedelta(seconds=10) manager._file_stats = { "file_1.py": DagFileStat(1, 0, last_finish_time, 1.0, 1), } with freeze_time(freezed_base_time): manager.set_file_paths(dag_files) assert manager._file_path_queue == [] # File Path Queue will be empty as the "modified time" < "last finish time" manager.prepare_file_path_queue() assert manager._file_path_queue == [] # Simulate the DAG modification by using modified_time which is greater # than the last_parse_time but still less than now - min_file_process_interval file_1_new_mtime = freezed_base_time - timedelta(seconds=5) file_1_new_mtime_ts = file_1_new_mtime.timestamp() with freeze_time(freezed_base_time): manager.set_file_paths(dag_files) assert manager._file_path_queue == [] # File Path Queue will be empty as the "modified time" < "last finish time" mock_getmtime.side_effect = [file_1_new_mtime_ts] manager.prepare_file_path_queue() # Check that file is added to the queue even though file was just recently passed assert manager._file_path_queue == ["file_1.py"] assert last_finish_time < file_1_new_mtime assert ( manager._file_process_interval > (freezed_base_time - manager.get_last_finish_time("file_1.py")).total_seconds() ) def test_find_zombies(self): manager = DagFileProcessorManager( dag_directory='directory', max_runs=1, processor_timeout=timedelta.max, signal_conn=MagicMock(), dag_ids=[], pickle_dags=False, async_mode=True, ) dagbag = DagBag(TEST_DAG_FOLDER, read_dags_from_db=False) with create_session() as session: session.query(LJ).delete() dag = dagbag.get_dag('example_branch_operator') dag.sync_to_db() task = dag.get_task(task_id='run_this_first') ti = TI(task, DEFAULT_DATE, State.RUNNING) local_job = LJ(ti) local_job.state = State.SHUTDOWN session.add(local_job) session.commit() ti.job_id = local_job.id session.add(ti) session.commit() manager._last_zombie_query_time = timezone.utcnow() - timedelta( seconds=manager._zombie_threshold_secs + 1 ) manager._find_zombies() requests = manager._callback_to_execute[dag.fileloc] assert 1 == len(requests) assert requests[0].full_filepath == dag.fileloc assert requests[0].msg == "Detected as zombie" assert requests[0].is_failure_callback is True assert isinstance(requests[0].simple_task_instance, SimpleTaskInstance) assert ti.dag_id == requests[0].simple_task_instance.dag_id assert ti.task_id == requests[0].simple_task_instance.task_id assert ti.execution_date == requests[0].simple_task_instance.execution_date session.query(TI).delete() session.query(LJ).delete() @mock.patch('airflow.dag_processing.manager.DagFileProcessorProcess') def test_handle_failure_callback_with_zombies_are_correctly_passed_to_dag_file_processor( self, mock_processor ): """ Check that the same set of failure callback with zombies are passed to the dag file processors until the next zombie detection logic is invoked. """ test_dag_path = TEST_DAG_FOLDER / 'test_example_bash_operator.py' with conf_vars({('scheduler', 'parsing_processes'): '1', ('core', 'load_examples'): 'False'}): dagbag = DagBag(test_dag_path, read_dags_from_db=False) with create_session() as session: session.query(LJ).delete() dag = dagbag.get_dag('test_example_bash_operator') dag.sync_to_db() task = dag.get_task(task_id='run_this_last') ti = TI(task, DEFAULT_DATE, State.RUNNING) local_job = LJ(ti) local_job.state = State.SHUTDOWN session.add(local_job) session.commit() # TODO: If there was an actual Relationship between TI and Job # we wouldn't need this extra commit session.add(ti) ti.job_id = local_job.id session.commit() expected_failure_callback_requests = [ TaskCallbackRequest( full_filepath=dag.fileloc, simple_task_instance=SimpleTaskInstance(ti), msg="Message", ) ] test_dag_path = TEST_DAG_FOLDER / 'test_example_bash_operator.py' child_pipe, parent_pipe = multiprocessing.Pipe() async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn') fake_processors = [] def fake_processor_(*args, **kwargs): nonlocal fake_processors processor = FakeDagFileProcessorRunner._create_process(*args, **kwargs) fake_processors.append(processor) return processor mock_processor.side_effect = fake_processor_ manager = DagFileProcessorManager( dag_directory=test_dag_path, max_runs=1, processor_timeout=timedelta.max, signal_conn=child_pipe, dag_ids=[], pickle_dags=False, async_mode=async_mode, ) self.run_processor_manager_one_loop(manager, parent_pipe) if async_mode: # Once for initial parse, and then again for the add_callback_to_queue assert len(fake_processors) == 2 assert fake_processors[0]._file_path == str(test_dag_path) assert fake_processors[0]._callback_requests == [] else: assert len(fake_processors) == 1 assert fake_processors[-1]._file_path == str(test_dag_path) callback_requests = fake_processors[-1]._callback_requests assert {zombie.simple_task_instance.key for zombie in expected_failure_callback_requests} == { result.simple_task_instance.key for result in callback_requests } child_pipe.close() parent_pipe.close() @mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.pid", new_callable=PropertyMock) @mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.kill") def test_kill_timed_out_processors_kill(self, mock_kill, mock_pid): mock_pid.return_value = 1234 manager = DagFileProcessorManager( dag_directory='directory', max_runs=1, processor_timeout=timedelta(seconds=5), signal_conn=MagicMock(), dag_ids=[], pickle_dags=False, async_mode=True, ) processor = DagFileProcessorProcess('abc.txt', False, [], []) processor._start_time = timezone.make_aware(datetime.min) manager._processors = {'abc.txt': processor} manager._kill_timed_out_processors() mock_kill.assert_called_once_with() @mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess.pid", new_callable=PropertyMock) @mock.patch("airflow.dag_processing.processor.DagFileProcessorProcess") def test_kill_timed_out_processors_no_kill(self, mock_dag_file_processor, mock_pid): mock_pid.return_value = 1234 manager = DagFileProcessorManager( dag_directory='directory', max_runs=1, processor_timeout=timedelta(seconds=5), signal_conn=MagicMock(), dag_ids=[], pickle_dags=False, async_mode=True, ) processor = DagFileProcessorProcess('abc.txt', False, [], []) processor._start_time = timezone.make_aware(datetime.max) manager._processors = {'abc.txt': processor} manager._kill_timed_out_processors() mock_dag_file_processor.kill.assert_not_called() @conf_vars({('core', 'load_examples'): 'False'}) @pytest.mark.execution_timeout(10) def test_dag_with_system_exit(self): """ Test to check that a DAG with a system.exit() doesn't break the scheduler. """ dag_id = 'exit_test_dag' dag_directory = TEST_DAG_FOLDER.parent / 'dags_with_system_exit' # Delete the one valid DAG/SerializedDAG, and check that it gets re-created clear_db_dags() clear_db_serialized_dags() child_pipe, parent_pipe = multiprocessing.Pipe() manager = DagFileProcessorManager( dag_directory=dag_directory, dag_ids=[], max_runs=1, processor_timeout=timedelta(seconds=5), signal_conn=child_pipe, pickle_dags=False, async_mode=True, ) manager._run_parsing_loop() result = None while parent_pipe.poll(timeout=None): result = parent_pipe.recv() if isinstance(result, DagParsingStat) and result.done: break # Three files in folder should be processed assert sum(stat.run_count for stat in manager._file_stats.values()) == 3 with create_session() as session: assert session.query(DagModel).get(dag_id) is not None @conf_vars({('core', 'load_examples'): 'False'}) @pytest.mark.backend("mysql", "postgres") @pytest.mark.execution_timeout(30) @mock.patch('airflow.dag_processing.manager.DagFileProcessorProcess') def test_pipe_full_deadlock(self, mock_processor): dag_filepath = TEST_DAG_FOLDER / "test_scheduler_dags.py" child_pipe, parent_pipe = multiprocessing.Pipe() # Shrink the buffers to exacerbate the problem! for fd in (parent_pipe.fileno(),): sock = socket.socket(fileno=fd) sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024) sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024) sock.detach() exit_event = threading.Event() # To test this behaviour we need something that continually fills the # parent pipe's buffer (and keeps it full). def keep_pipe_full(pipe, exit_event): n = 0 while True: if exit_event.is_set(): break req = CallbackRequest(str(dag_filepath)) try: logging.debug("Sending CallbackRequests %d", n + 1) pipe.send(req) except TypeError: # This is actually the error you get when the parent pipe # is closed! Nicely handled, eh? break except OSError: break n += 1 logging.debug(" Sent %d CallbackRequests", n) thread = threading.Thread(target=keep_pipe_full, args=(parent_pipe, exit_event)) fake_processors = [] def fake_processor_(*args, **kwargs): nonlocal fake_processors processor = FakeDagFileProcessorRunner._create_process(*args, **kwargs) fake_processors.append(processor) return processor mock_processor.side_effect = fake_processor_ manager = DagFileProcessorManager( dag_directory=dag_filepath, dag_ids=[], # A reasonable large number to ensure that we trigger the deadlock max_runs=100, processor_timeout=timedelta(seconds=5), signal_conn=child_pipe, pickle_dags=False, async_mode=True, ) try: thread.start() # If this completes without hanging, then the test is good! manager._run_parsing_loop() exit_event.set() finally: logging.info("Closing pipes") parent_pipe.close() child_pipe.close() thread.join(timeout=1.0) class TestDagFileProcessorAgent(unittest.TestCase): def setUp(self): # Make sure that the configure_logging is not cached self.old_modules = dict(sys.modules) def tearDown(self): # Remove any new modules imported during the test run. This lets us # import the same source files for more than one test. remove_list = [] for mod in sys.modules: if mod not in self.old_modules: remove_list.append(mod) for mod in remove_list: del sys.modules[mod] @staticmethod def _processor_factory(file_path, zombies, dag_ids, pickle_dags): return DagFileProcessorProcess(file_path, pickle_dags, dag_ids, zombies) def test_reload_module(self): """ Configure the context to have logging.logging_config_class set to a fake logging class path, thus when reloading logging module the airflow.processor_manager logger should not be configured. """ with settings_context(SETTINGS_FILE_VALID): # Launch a process through DagFileProcessorAgent, which will try # reload the logging module. test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py' async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn') log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION') try: os.remove(log_file_loc) except OSError: pass # Starting dag processing with 0 max_runs to avoid redundant operations. processor_agent = DagFileProcessorAgent(test_dag_path, 0, timedelta.max, [], False, async_mode) processor_agent.start() if not async_mode: processor_agent.run_single_parsing_loop() processor_agent._process.join() # Since we are reloading logging config not creating this file, # we should expect it to be nonexistent. assert not os.path.isfile(log_file_loc) @conf_vars({('core', 'load_examples'): 'False'}) def test_parse_once(self): clear_db_serialized_dags() clear_db_dags() test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py' async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn') processor_agent = DagFileProcessorAgent(test_dag_path, 1, timedelta.max, [], False, async_mode) processor_agent.start() if not async_mode: processor_agent.run_single_parsing_loop() while not processor_agent.done: if not async_mode: processor_agent.wait_until_finished() processor_agent.heartbeat() assert processor_agent.all_files_processed assert processor_agent.done with create_session() as session: dag_ids = session.query(DagModel.dag_id).order_by("dag_id").all() assert dag_ids == [('test_start_date_scheduling',), ('test_task_start_date_scheduling',)] dag_ids = session.query(SerializedDagModel.dag_id).order_by("dag_id").all() assert dag_ids == [('test_start_date_scheduling',), ('test_task_start_date_scheduling',)] def test_launch_process(self): test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py' async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn') log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION') try: os.remove(log_file_loc) except OSError: pass # Starting dag processing with 0 max_runs to avoid redundant operations. processor_agent = DagFileProcessorAgent(test_dag_path, 0, timedelta.max, [], False, async_mode) processor_agent.start() if not async_mode: processor_agent.run_single_parsing_loop() processor_agent._process.join() assert os.path.isfile(log_file_loc)
pureblood.py
#!/usr/bin/python # -*- coding: utf-8 -*- """ Script Created By: Cr4sHCoD3 Page: https://github.com/cr4shcod3 https://github.com/cr4shcod3/pureblood FB Page: https://facebook.com/cr4shcod3.py Copyrights: Cr4sHCoD3 2018 MIT LICENSE Special Mentions: PureHackers PH Blood Security Hackers """ import os import sys import platform import time import datetime import re import threading import socket import webbrowser try: import colorama colorama.init() except: print ('[!] - Module (colorama) not installed!') try: import requests from requests.exceptions import ConnectionError except: print ('[!] - Module (requests) not installed!') try: import whois except: print ('[!] - Module (python-whois) not installed!') try: import dns.resolver except: print ('[!] - Module (dnspython) not installed!') try: from bs4 import BeautifulSoup except: print ('[!] - Module (bs4) not installed!') ######################################################################################################################################################### # GLOBAL ## Color reset = '\033[0m' bold = '\033[1m' underline = '\033[4m' ### Fore black = '\033[90m'; red = '\033[91m'; green = '\033[92m'; yellow = '\033[93m'; blue = '\033[94m'; magenta = '\033[95m'; cyan = '\033[96m'; white = '\033[97m' ### Background bg_black = '\033[90m'; bg_red = '\033[91m'; bg_green = '\033[92m'; bg_yellow = '\033[93m'; bg_blue = '\033[94m'; bg_magenta = '\033[95m'; bg_cyan = '\033[96m'; bg_white = '\033[97m' ## Configuration if platform.system() == 'Windows': from ctypes import windll, create_string_buffer h = windll.kernel32.GetStdHandle(-12) csbi = create_string_buffer(22) res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi) if res: import struct (bufx, bufy, curx, cury, wattr, left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw) sizex = right - left + 1 sizey = bottom - top + 1 else: sizex, sizey = 80, 25 elif platform.system() == 'Linux': sizey, sizex = os.popen('stty size', 'r').read().split() ## Date Time month = datetime.date.today().strftime("%B") if datetime.date.today().strftime("%w") == 1 or datetime.date.today().strftime("%w") == '1': day = 'Monday' elif datetime.date.today().strftime("%w") == 2 or datetime.date.today().strftime("%w") == '2': day = 'Tuesay' elif datetime.date.today().strftime("%w") == 3 or datetime.date.today().strftime("%w") == '3': day = 'Wednesday' elif datetime.date.today().strftime("%w") == 4 or datetime.date.today().strftime("%w") == '4': day = 'Thursday' elif datetime.date.today().strftime("%w") == 5 or datetime.date.today().strftime("%w") == '5': day = 'Friday' elif datetime.date.today().strftime("%w") == 6 or datetime.date.today().strftime("%w") == '6': day = 'Saturday' elif datetime.date.today().strftime("%w") == 7 or datetime.date.today().strftime("%w") == '0': day = 'Sunday' mday = datetime.date.today().strftime("%d") year = datetime.date.today().strftime("%Y") current_datetime = datetime.datetime.now() current_time = current_datetime.strftime('%I:%M:%S') ## List ids = [ 'NONE','A','NS','MD','MF','CNAME','SOA','MB','MG','MR','NULL','WKS','PTR','HINFO','MINFO','MX','TXT','RP','AFSDB','X25','ISDN','RT','NSAP','NSAP-PTR','SIG','KEY','PX','GPOS','AAAA','LOC','NXT','SRV','NAPTR','KX','CERT','A6','DNAME','OPT','APL','DS','SSHFP','IPSECKEY','RRSIG','NSEC','DNSKEY','DHCID','NSEC3','NSEC3PARAM','TLSA','HIP','CDS','CDNSKEY','CSYNC','SPF','UNSPEC','EUI48','EUI64','TKEY','TSIG','IXFR','AXFR','MAILB','MAILA','ANY','URI','CAA','TA','DLV' ] admin_panel_list = ['/admin.aspx','/admin.asp','/admin.php','/admin/','/administrator/','/moderator/','/webadmin/','/adminarea/','/bb-admin/','/adminLogin/','/admin_area/','/panel-administracion/','/instadmin/','/memberadmin/','/administratorlogin/','/adm/','/admin/account.php','/admin/index.php','/admin/login.php','/admin/admin.php','/admin/account.php','/joomla/administrator','/login.php','/admin_area/admin.php','/admin_area/login.php','/siteadmin/login.php','/siteadmin/index.php','/siteadmin/login.html','/admin/account.html','/admin/index.html','/admin/login.html','/admin/admin.html','/admin_area/index.php','/bb-admin/index.php','/bb-admin/login.php','/bb-admin/admin.php','/admin/home.php','/admin_area/login.html','/admin_area/index.html','/admin/controlpanel.php','/admincp/index.asp','/admincp/login.asp','/admincp/index.html','/admin/account.html','/adminpanel.html','/webadmin.html','webadmin/index.html','/webadmin/admin.html','/webadmin/login.html','/admin/admin_login.html','/admin_login.html','/panel-administracion/login.html','/admin/cp.php','cp.php','/administrator/index.php','/administrator/login.php','/nsw/admin/login.php','/webadmin/login.php','/admin/admin_login.php','/admin_login.php','/administrator/account.php','/administrator.php','/admin_area/admin.html','/pages/admin/admin-login.php','/admin/admin-login.php','/admin-login.php','/bb-admin/index.html','/bb-admin/login.html','/bb-admin/admin.html','/admin/home.html','/modelsearch/login.php','/moderator.php','/moderator/login.php','/moderator/admin.php','/account.php','/pages/admin/admin-login.html','/admin/admin-login.html','/admin-login.html','/controlpanel.php','/admincontrol.php','/admin/adminLogin.html','/adminLogin.html','/admin/adminLogin.html','/home.html','/rcjakar/admin/login.php','/adminarea/index.html','/adminarea/admin.html','/webadmin.php','/webadmin/index.php','/webadmin/admin.php','/admin/controlpanel.html','/admin.html','/admin/cp.html','cp.html','/adminpanel.php','/moderator.html','/administrator/index.html','/administrator/login.html','/user.html','/administrator/account.html','/administrator.html','/login.html','/modelsearch/login.html','/moderator/login.html','/adminarea/login.html','/panel-administracion/index.html','/panel-administracion/admin.html','/modelsearch/index.html','/modelsearch/admin.html','/admincontrol/login.html','/adm/index.html','/adm.html','/moderator/admin.html','/user.php','/account.html','/controlpanel.html','/admincontrol.html','/panel-administracion/login.php','/wp-login.php','/adminLogin.php','/admin/adminLogin.php','/home.php','/adminarea/index.php','/adminarea/admin.php','/adminarea/login.php','/panel-administracion/index.php','/panel-administracion/admin.php','/modelsearch/index.php','/modelsearch/admin.php','/admincontrol/login.php','/adm/admloginuser.php','/admloginuser.php','/admin2.php','/admin2/login.php','/admin2/index.php','adm/index.php','adm.php','affiliate.php','/adm_auth.php ','/memberadmin.php','/administratorlogin.php','/login/admin.asp','/admin/login.asp','/administratorlogin.asp','/login/asmindstrator.asp','/admin/login.aspx','/login/admin.aspx','/administartorlogin.aspx','login/administrator.aspx','/adminlogin.asp','a/dminlogin.aspx','/admin_login.asp','/admin_login.aspx','/adminhome.asp','/adminhome.aspx''/administrator_login.asp','/administrator_login.aspx'] admin_panel_valid = [] ## Threading Obejct Funtions def TCP_connect(ip, port_number, delay, output): TCPsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) TCPsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) TCPsock.settimeout(delay) try: TCPsock.connect((ip, port_number)) output[port_number] = 'Open' except: output[port_number] = '' def subdomain_scanner(subdomain, so_200, so_301, so_302, so_403): subdomain = 'http://' + subdomain try: subdomain_scanner_request = requests.get(subdomain) subdomain_scanner_code = subdomain_scanner_request.status_code if subdomain_scanner_code == 200: so_200.append(subdomain) elif subdomain_scanner_code == 301: so_301.append(subdomain) elif subdomain_scanner_code == 302: so_302.append(subdomain) elif subdomain_scanner_code == 403: so_403.append(subdomain) except ConnectionError: pass # END GLOBAL ######################################################################################################################################################### class Generator: def deface_page(self, title, shortcut_icon, meta_description, meta_image, logo, hacker_name, message1, message2, groups): deface_page_template = ''' <html> <head> <title>--=[ Hacked By {0} ]=--</title> <meta charset=\"UTF-8\"> <link rel=\"SHORTCUT ICON\" href=\"{1}\"> <meta name=\"Author\" content=\"Cr4sHCoD3 | PureHackers x Blood Security Hackers\"/> <meta name=\"copyright\" content=\"PureHackers | Blood Security Hackers\"/> <meta name=\"description\" content=\"{2}.\"/> <!-- Change this --> <meta name=\"keywords\" content=\"Hacked, Pawned, Defaced, Security, PureHackers, Blood Security Hackers, PureBlood, Cr4sHCoD3\"/> <!-- Change this --> <meta property=\"og:title\" content=\"Hacked By {0}\"/> <meta property=\"og:image\" content=\"{3}\"> <!-- Change this --> <style> {9} url(\"https://cr4shcod3.github.io/python/pureblood/pureblood.css\"); </style> </head> <body> <div class=\"bg\"> <center> <img src=\"{4}\" class=\"logo\"/> <!-- Change This --> <h1 class=\"header glitch\" data-text=\"Hacked By {5}\">Hacked By {5}</h1><br><br> <p class=\"message\">{6}</p> <p class=\"message\">{7}</p><br><br> <p class=\"groups\">Greetings: {8}</p> </center> </div> </body> </html> '''.format(title, shortcut_icon, meta_description, meta_image, logo, hacker_name, message1, message2, groups, '@import') self.deface_page_result = deface_page_template return self.deface_page_result class WebPentest: def banner_grab(self, bg_url): banner_grab_request = requests.get(bg_url) banner_grab_result = banner_grab_request.headers banner_grab_result = str(banner_grab_result).replace("{'", "").replace("'}", "").replace("': '", ": ").replace("', '", ",\n") self.banner_grab_result = banner_grab_result return self.banner_grab_result def whois(self, w_url): whois_query = whois.whois(w_url) self.whois_result = whois_query return self.whois_result def traceroute(self, t_hostname): traceroute_request = requests.get('https://api.hackertarget.com/mtr/?q=' + t_hostname) traceroute_response = traceroute_request.text traceroute_final = """{0}""".format(str(traceroute_response)) self.traceroute_result = traceroute_final return self.traceroute_result def dns_record(self, dr_hostname): dns_record_list = [] for a in ids: try: answers = dns.resolver.query(dr_hostname, a) for rdata in answers: a = str(a); rdata = str(rdata) dns_record_list.append(str(a + ' : ' + rdata)) except Exception: pass self.dns_record_result = dns_record_list return self.dns_record_result def reverse_dns_lookup(self, rdl_ip): rdl_ip = rdl_ip + '/24' reverse_dns_lookup_request = requests.get('https://api.hackertarget.com/reversedns/?q=' + rdl_ip) reverse_dns_lookup_response = reverse_dns_lookup_request.text reverse_dns_lookup_final = """{0}""".format(str(reverse_dns_lookup_response)) self.reverse_ip_lookup_result = reverse_dns_lookup_final return self.reverse_ip_lookup_result def zone_transfer_lookup(self, ztl_hostname): zone_transfer_lookup_request = requests.get('https://api.hackertarget.com/zonetransfer/?q=' + ztl_hostname) zone_transfer_lookup_response = zone_transfer_lookup_request.text zone_transfer_lookup_final = """{0}""".format(str(zone_transfer_lookup_response)) self.zone_transfer_lookup_result = zone_transfer_lookup_final return self.zone_transfer_lookup_result def port_scan(self, ps_hostname, ps_pend): #https://stackoverflow.com/a/38210023 port_scan_list = [] threads = [] output = {} delay = 10 for i in range(ps_pend + 1): t = threading.Thread(target=TCP_connect, args=(ps_hostname, i, delay, output)) threads.append(t) for i in range(ps_pend + 1): threads[i].start() for i in range(ps_pend + 1): threads[i].join() for i in range(ps_pend + 1): if output[i] == 'Open': port_scan_list.append('[+] Port Open - ' + str(i)) self.port_scan_result = port_scan_list return self.port_scan_result def admin_panel_scan(self, ads_url): admin_panel_valid = [] admin_panel_redirect = [] ads_urls = [] r_path = [] ads_r_urls = [] robots = ['/robot.txt', '/robots.txt'] for i in admin_panel_list: ads_urls.append(ads_url + i) for i in robots: r_robots = requests.get(ads_url + i) if r_robots.status_code == 200: r_robots = r_robots else: r_robots = '' if r_robots == '': pass else: robots = str(r_robots.text) for i in robots.split("\n"): if i.startswith('Allow'): r_path.append(i.split(': ')[1].split(' ')[0]) elif i.startswith('Disallow'): r_path.append(i.split(': ')[1].split(' ')[0]) for i in r_path: ads_r_urls.append(ads_url + i) for i in ads_r_urls: ads_r_urls_request = requests.get(i) if 'Admin' in ads_r_urls_request.text or 'Login' in ads_r_urls_request.text: r_admin_panel = i admin_panel_valid.append(i) elif 'admin' in ads_r_urls_request.text or 'login' in ads_r_urls_request.text: r_admin_panel = i admin_panel_valid.append(i) elif 'Username' in ads_r_urls_request.text or 'Password' in ads_r_urls_request.text: r_admin_panel = i admin_panel_valid.append(i) elif 'username' in ads_r_urls_request.text or 'password' in ads_r_urls_request.text: r_admin_panel = i admin_panel_valid.append(i) else: r_admin_panel = None if not admin_panel_valid: for i in ads_urls: admin_scan_request = requests.get(i) if admin_scan_request.status_code == 200: admin_panel_valid.append(i) break elif admin_scan_request.status_code == 403: admin_panel_redirect.append(i) else: pass admin_panel_valid = list(set(admin_panel_valid)) for i in admin_panel_redirect: admin_panel_valid.append(i + ' - 403') if not admin_panel_valid: webbrowser.open_new_tab(google_hacking + 'site:' + ads_url + '+inurl:login | admin | user | cpanel | account | moderator | phpmyadmin | /cp') self.admin_panel_scan_result = admin_panel_valid return self.admin_panel_scan_result def subdomain_scan(self, ss_hostname, subdomain_list): so_200 = [] so_301 = [] so_302 = [] so_403 = [] ss_urls = [] ss_subdomain_list = open(subdomain_list, 'r') ss_subdomain_list = ss_subdomain_list.read().splitlines() for i in ss_subdomain_list: ss_urls.append(i + '.' + ss_hostname) for i in ss_urls: t = threading.Thread(target=subdomain_scanner, args=(i, so_200, so_301, so_302, so_403,)) t.start() t.join() self.ss_200_result = so_200 self.ss_301_result = so_301 self.ss_302_result = so_302 self.ss_403_result = so_403 return self.ss_200_result, self.ss_301_result, self.ss_302_result, self.ss_403_result def cms_detect(self, cd_hostname): cd_cms = [] cd_cms_version = [] cms_detect_request = requests.get('https://whatcms.org/?s=' + cd_hostname) cd_soup = BeautifulSoup(cms_detect_request.content, 'html.parser') cd_soup_div = cd_soup.find('div', attrs={'class': 'large text-center'}) for i in cd_soup_div.find_all('span', attrs={'class': 'nowrap'}): cd_cms_version.append(i.text) cd_cms.append(cd_soup_div.find('a').text) if not cd_cms: cms_detect_final = '[!] - There\'s no CMS Detected!' else: cd_cms_version = cd_cms_version[1] cms_detect_final = cd_cms[0].replace('/c/', '') cms_detect_final = cms_detect_final + ' - ' + cd_cms_version self.cms_detect_result = cms_detect_final return self.cms_detect_result def reverse_ip_lookup(self, ril_hostname): reverse_ip_lookup_request = requests.get('https://api.hackertarget.com/reverseiplookup/?q=' + ril_hostname) reverse_ip_lookup_response = reverse_ip_lookup_request.text reverse_ip_lookup_final = """{0}""".format(str(reverse_ip_lookup_response)) self.reverse_ip_lookup_result = reverse_ip_lookup_final return self.reverse_ip_lookup_result def subnet_lookup(self, subnet_input): subnet_lookup_request = requests.get('https://api.hackertarget.com/subnetcalc/?q=' + subnet_input) subnet_lookup_response = subnet_lookup_request.text subnet_lookup_final = """{0}""".format(str(subnet_lookup_response)) self.subnet_lookup_result = subnet_lookup_final return self.subnet_lookup_result def links_extract(self, le_url): links_extract_request = requests.get('https://api.hackertarget.com/pagelinks/?q=' + le_url) links_extract_response = links_extract_request.text links_extract_final = """{0}""".format(str(links_extract_response)) self.links_extract_result = links_extract_final return self.links_extract_result def clear(): if platform.system() == 'Linux': os.system('clear') elif platform.system() == 'Windows': os.system('cls') elif platform.system() == 'Darwin': os.system('clear') else: os.system('clear') def banner(): if sys.version_info[0] == 3: banner = ("""{1} ██▓███ █ ██ ██▀███ ▓█████ ▄▄▄▄ ██▓ ▒█████ ▒█████ ▓█████▄ ▓██░ ██▒ ██ ▓██▒▓██ ▒ ██▒▓█ ▀ ▓█████▄ ▓██▒ ▒██▒ ██▒▒██▒ ██▒▒██▀ ██▌ ▓██░ ██▓▒▓██ ▒██░▓██ ░▄█ ▒▒███ ▒██▒ ▄██▒██░ ▒██░ ██▒▒██░ ██▒░██ █▌ ▒██▄█▓▒ ▒▓▓█ ░██░▒██▀▀█▄ ▒▓█ ▄ ▒██░█▀ ▒██░ ▒██ ██░▒██ ██░░▓█▄ ▌ ▒██▒ ░ ░▒▒█████▓ ░██▓ ▒██▒░▒████▒ ░▓█ ▀█▓░██████▒░ ████▓▒░░ ████▓▒░░▒████▓ ▒▓▒░ ░ ░░▒▓▒ ▒ ▒ ░ ▒▓ ░▒▓░░░ ▒░ ░ ░▒▓███▀▒░ ▒░▓ ░░ ▒░▒░▒░ ░ ▒░▒░▒░ ▒▒▓ ▒ ░▒ ░ ░░▒░ ░ ░ ░▒ ░ ▒░ ░ ░ ░ ▒░▒ ░ ░ ░ ▒ ░ ░ ▒ ▒░ ░ ▒ ▒░ ░ ▒ ▒ ░░ ░░░ ░ ░ ░░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ {2}--={3}[ {0}{5}Author: Cr4sHCoD3 {3}]{2}=-- {4}| {2}-- --={3}[ {0}{5}Version: 1 {3}]{2}=-- -- {4}| | {2}-- --={3}[ {0}{5}Website: https://github.com/cr4shcod3 {3}]{2}=-- -- {4}| | {2}-- --={3}[ {0}{5}PureHackers ~ Blood Security Hackers {3}]{2}=-- -- {4}| {0}""".format(reset, red, green, blue, yellow, bold)) elif sys.version_info[0] == 2: banner = ("""{1} ██▓███ █ ██ ██▀███ ▓█████ ▄▄▄▄ ██▓ ▒█████ ▒█████ ▓█████▄ ▓██░ ██▒ ██ ▓██▒▓██ ▒ ██▒▓█ ▀ ▓█████▄ ▓██▒ ▒██▒ ██▒▒██▒ ██▒▒██▀ ██▌ ▓██░ ██▓▒▓██ ▒██░▓██ ░▄█ ▒▒███ ▒██▒ ▄██▒██░ ▒██░ ██▒▒██░ ██▒░██ █▌ ▒██▄█▓▒ ▒▓▓█ ░██░▒██▀▀█▄ ▒▓█ ▄ ▒██░█▀ ▒██░ ▒██ ██░▒██ ██░░▓█▄ ▌ ▒██▒ ░ ░▒▒█████▓ ░██▓ ▒██▒░▒████▒ ░▓█ ▀█▓░██████▒░ ████▓▒░░ ████▓▒░░▒████▓ ▒▓▒░ ░ ░░▒▓▒ ▒ ▒ ░ ▒▓ ░▒▓░░░ ▒░ ░ ░▒▓███▀▒░ ▒░▓ ░░ ▒░▒░▒░ ░ ▒░▒░▒░ ▒▒▓ ▒ ░▒ ░ ░░▒░ ░ ░ ░▒ ░ ▒░ ░ ░ ░ ▒░▒ ░ ░ ░ ▒ ░ ░ ▒ ▒░ ░ ▒ ▒░ ░ ▒ ▒ ░░ ░░░ ░ ░ ░░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ {2}--={3}[ {0}{5}Author: Cr4sHCoD3 {3}]{2}=-- {4}| {2}-- --={3}[ {0}{5}Version: 1 {3}]{2}=-- -- {4}| | {2}-- --={3}[ {0}{5}Website: https://github.com/cr4shcod3 {3}]{2}=-- -- {4}| | {2}-- --={3}[ {0}{5}PureHackers ~ Blood Security Hackers {3}]{2}=-- -- {4}| {0}""".format(reset, red, green, blue, yellow, bold)).decode('utf-8') print (banner) def set_url(target, wfunc): global url global hostname global ip if 'http://' in target: url = target hostname = target.replace('http://', '') elif 'https://' in target: url = target hostname = target.replace('https://', '') if '://' not in target: url = 'http://' + target hostname = target ip = socket.gethostbyname(hostname) if wfunc == 1: web_pentest() else: main() def generator(): print ("""\n\n {3}[ {5}Generator {3}] {2}01{3}) {5}Deface Page {2}90{3}) {5}Back To Menu {2}99{3}) {5}Exit {0}""".format(reset, red, green, blue, yellow, cyan)) if sys.version_info[0] == 3: try: choice = int(input('{0}PureBlood{1}({3}Generator{1})> {2}'.format(green, blue, cyan, red))) except KeyboardInterrupt: print ('\n{2}[{1}+{2}] {3}- {4}Exiting...{0}'.format(reset, green, blue, yellow, cyan)) sys.exit() except ValueError: print ('{2}[{1}+{2}] {3}- {4}Please enter a valid number!{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) main() elif sys.version_info[0] == 2: try: choice = int(raw_input('{0}PureBlood{1}({3}Generator{1})> {2}'.format(green, blue, cyan, red))) except KeyboardInterrupt: print ('\n{2}[{1}+{2}] {3}- {4}Exiting...{0}'.format(reset, green, blue, yellow, cyan)) sys.exit() except ValueError: print ('{2}[{1}+{2}] {3}- {4}Please enter a valid number!{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) main() cgenerator = Generator() if choice == 1: print ('{0}='.format(red) * int(sizex)) print (reset + bold) if sys.version_info[0] == 3: title = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Title{1})> {2}'.format(green, blue, cyan, red))) shortcut_icon = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Shortcut Icon{1})> {2}'.format(green, blue, cyan, red))) meta_description = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Meta Description{1})> {2}'.format(green, blue, cyan, red))) meta_image = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Meta Image{1})> {2}'.format(green, blue, cyan, red))) logo = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Logo{1})> {2}'.format(green, blue, cyan, red))) hacker_name = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Hacker Name{1})> {2}'.format(green, blue, cyan, red))) message1 = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Message 1{1})> {2}'.format(green, blue, cyan, red))) message2 = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Message 2{1})> {2}'.format(green, blue, cyan, red))) groups = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Group/s{1})> {2}'.format(green, blue, cyan, red))) deface_page_output_filename = str(input('{0}PureBlood{1}>{0}Generator{1}>({3}Output Filename{1})> {2}'.format(green, blue, cyan, red))) if sys.version_info[0] == 2: title = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Title{1})> {2}'.format(green, blue, cyan, red))) shortcut_icon = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Shortcut Icon{1})> {2}'.format(green, blue, cyan, red))) meta_description = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Meta Description{1})> {2}'.format(green, blue, cyan, red))) meta_image = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Meta Image{1})> {2}'.format(green, blue, cyan, red))) logo = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Logo{1})> {2}'.format(green, blue, cyan, red))) hacker_name = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Hacker Name{1})> {2}'.format(green, blue, cyan, red))) message1 = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Message 1{1})> {2}'.format(green, blue, cyan, red))) message2 = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Message 2{1})> {2}'.format(green, blue, cyan, red))) groups = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Group/s{1})> {2}'.format(green, blue, cyan, red))) deface_page_output_filename = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Output Filename{1})> {2}'.format(green, blue, cyan, red))) gdeface_page = cgenerator.deface_page(title, shortcut_icon, meta_description, meta_image, logo, hacker_name, message1, message2, groups) if '.html' in deface_page_output_filename: deface_page_output_filename = deface_page_output_filename else: deface_page_output_filename = deface_page_output_filename + '.html' deface_page_output_file = open('outputs/deface_page/' + deface_page_output_filename, 'w+') deface_page_output_file.write(gdeface_page) deface_page_output_file.close() print ('\n{2}[{1}+{2}] {3}- {4}Output saved in outputs/deface_page/' + deface_page_output_filename + '{0}'.format(reset, green, blue, yellow, cyan)) print (reset + bold) print ('{0}='.format(red) * int(sizex)) generator() elif choice == 90: print ('\n\n') main() elif choice == 99: print ('\n{2}[{1}+{2}] {3}- {4}Exiting...{0}'.format(reset, green, blue, yellow, cyan)) sys.exit() else: print ('{2}[{1}+{2}] {3}- {4}Please enter a valid choice!{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) generator() def web_pentest(): global web_pentest_outputfile print ("""\n\n {3}[ {5}Web Pentest {3}] {2}01{3}) {5}Banner Grab {2}02{3}) {5}Whois {2}03{3}) {5}Traceroute {2}04{3}) {5}DNS Record {2}05{3}) {5}Reverse DNS Lookup {2}06{3}) {5}Zone Transfer Lookup {2}07{3}) {5}Port Scan {2}08{3}) {5}Admin Panel Scan {2}09{3}) {5}Subdomain Scan {2}10{3}) {5}CMS Identify {2}11{3}) {5}Reverse IP Lookup {2}12{3}) {5}Subnet Lookup {2}13{3}) {5}Extract Page Links {2}90{3}) {5}Back To Menu {2}95{3}) {5}Set Target {2}99{3}) {5}Exit {0}""".format(reset, red, green, blue, yellow, cyan)) if sys.version_info[0] == 3: try: choice = int(input('{0}PureBlood{1}({3}WebPentest{1})> {2}'.format(green, blue, cyan, red))) except KeyboardInterrupt: try: print ('\n{2}[{1}+{2}] {3}- {4}Output saved in outputs/web_pentest/' + web_pentest_output + '{0}'.format(reset, green, blue, yellow, cyan)) except: pass print ('\n{2}[{1}+{2}] {3}- {4}Exiting...{0}'.format(reset, green, blue, yellow, cyan)) sys.exit() except ValueError: print ('{2}[{1}+{2}] {3}- {4}Please enter a valid number!{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) main() elif sys.version_info[0] == 2: try: choice = int(raw_input('{0}PureBlood{1}({3}WebPentest{1})> {2}'.format(green, blue, cyan, red))) except KeyboardInterrupt: try: print ('\n{2}[{1}+{2}] {3}- {4}Exiting...{0}'.format(reset, green, blue, yellow, cyan)) except: pass sys.exit() except ValueError: print ('{2}[{1}+{2}] {3}- {4}Please enter a valid number!{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) main() cweb_pentest = WebPentest() if choice == 1: try: wp_banner_grab = cweb_pentest.banner_grab(url) except: print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) web_pentest() print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('[+] Banner Grab Result - ' + url) web_pentest_outputfile.write('\n============================================================') print (reset + bold) print (wp_banner_grab) web_pentest_outputfile.write('\n' + wp_banner_grab) print (reset) print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('\n') web_pentest_outputfile.write('============================================================\n') web_pentest() elif choice == 2: try: wp_whois = cweb_pentest.whois(url) except: print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) web_pentest() print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('[+] Whois Result - ' + url) web_pentest_outputfile.write('\n============================================================') print (reset + bold) print (wp_whois) web_pentest_outputfile.write('\n' + str(wp_whois)) print (reset) print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('\n') web_pentest_outputfile.write('============================================================\n') web_pentest() elif choice == 3: try: wp_traceroute = cweb_pentest.traceroute(hostname) except: print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) web_pentest() print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('[+] Traceroute Result - ' + url) web_pentest_outputfile.write('\n============================================================') print (reset + bold) print (wp_traceroute) web_pentest_outputfile.write('\n' + wp_traceroute) print (reset) print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('\n') web_pentest_outputfile.write('============================================================\n') web_pentest() elif choice == 4: try: wp_dns_record = cweb_pentest.dns_record(hostname) except: print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) web_pentest() print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('[+] DNS Record Result - ' + url) web_pentest_outputfile.write('\n============================================================') print (reset + bold) web_pentest_outputfile.write('\n') for i in wp_dns_record: print (i) web_pentest_outputfile.write(str(i) + '\n') print (reset) print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('\n') web_pentest_outputfile.write('============================================================\n') web_pentest() elif choice == 5: try: wp_reverse_dns_lookup = cweb_pentest.reverse_dns_lookup(ip) except: print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) web_pentest() print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('[+] Reverse DNS Lookup Result - ' + url) web_pentest_outputfile.write('\n============================================================') print (reset + bold) print (wp_reverse_dns_lookup) web_pentest_outputfile.write('\n' + wp_reverse_dns_lookup) print (reset) print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('\n') web_pentest_outputfile.write('============================================================\n') web_pentest() elif choice == 6: try: wp_zone_transfer_lookup = cweb_pentest.zone_transfer_lookup(hostname) except: print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) web_pentest() print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('[+] Zone Transfer Lookup Result - ' + url) web_pentest_outputfile.write('\n============================================================') print (reset + bold) print (wp_zone_transfer_lookup) web_pentest_outputfile.write('\n' + wp_zone_transfer_lookup) print (reset) print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('\n') web_pentest_outputfile.write('============================================================\n') web_pentest() elif choice == 7: if sys.version_info[0] == 3: port_end = int(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}PortScan{1}>({3}Port End{1})> {2}'.format(green, blue, cyan, red))) if sys.version_info[0] == 2: port_end = int(raw_input('{0}PureBlood{1}>{0}WebPentest{1}>{0}PortScan{1}>({3}Port End{1})> {2}'.format(green, blue, cyan, red))) try: wp_port_scan = cweb_pentest.port_scan(hostname, port_end) except: print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) web_pentest() print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('[+] Port Scan Result - ' + url) web_pentest_outputfile.write('\n============================================================') print (reset + bold) web_pentest_outputfile.write('\n') for i in wp_port_scan: print (i) web_pentest_outputfile.write(str(i) + '\n') print (reset) print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('\n') web_pentest_outputfile.write('============================================================\n') web_pentest() elif choice == 8: try: wp_admin_panel_scan = cweb_pentest.admin_panel_scan(url) except: print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) web_pentest() print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('[+] Admin Panel Scan Result - ' + url) web_pentest_outputfile.write('\n============================================================') print (reset + bold) web_pentest_outputfile.write('\n') for i in wp_admin_panel_scan: print (i) web_pentest_outputfile.write(str(i) + '\n') print (reset) print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('\n') web_pentest_outputfile.write('============================================================\n') web_pentest() elif choice == 9: if sys.version_info[0] == 3: subdomain_list = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}SubdomainScan{1}>({3}Subdomain List{1})> {2}'.format(green, blue, cyan, red))) if sys.version_info[0] == 2: subdomain_list = str(raw_input('{0}PureBlood{1}>{0}WebPentest{1}>{0}SubdomainScan{1}>({3}Subdomain List{1})> {2}'.format(green, blue, cyan, red))) try: wp_subdomain_scan = cweb_pentest.subdomain_scan(hostname, subdomain_list) except: print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) web_pentest() so_200, so_301, so_302, so_403 = wp_subdomain_scan print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('[+] Subdomain Scan Result - ' + url) web_pentest_outputfile.write('\n============================================================') print (reset + bold) web_pentest_outputfile.write('\n') for i in so_200: print ('[+] 200 - ' + i) web_pentest_outputfile.write('[+] 200 - ' + i + '\n') for i in so_301: print ('[!] 301 - ' + i) web_pentest_outputfile.write('[+] 301 - ' + i + '\n') for i in so_302: print ('[!] 302 - ' + i) web_pentest_outputfile.write('[+] 302 - ' + i + '\n') for i in so_403: print ('[!] 403 - ' + i) web_pentest_outputfile.write('[+] 403 - ' + i + '\n') print (reset) print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('\n') web_pentest_outputfile.write('============================================================\n') web_pentest() elif choice == 10: try: wp_cms_detect = cweb_pentest.cms_detect(hostname) except: print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) web_pentest() print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('[+] CMS Detect - ' + url) web_pentest_outputfile.write('\n============================================================') print (reset + bold) print (wp_cms_detect) web_pentest_outputfile.write('\n' + wp_cms_detect) print (reset) print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('\n') web_pentest_outputfile.write('============================================================\n') web_pentest() elif choice == 11: try: wp_reverse_ip_lookup = cweb_pentest.reverse_ip_lookup(hostname) except: print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) web_pentest() print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('[+] Reverse IP Lookup Result - ' + url) web_pentest_outputfile.write('\n============================================================') print (reset + bold) print (wp_reverse_ip_lookup) web_pentest_outputfile.write('\n' + wp_reverse_ip_lookup) print (reset) print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('\n') web_pentest_outputfile.write('============================================================\n') web_pentest() elif choice == 12: if sys.version_info[0] == 3: subnet_input = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}SubnetLookup{1}>({3}CIDR or IP with NetMask{1})> {2}'.format(green, blue, cyan, red))) if sys.version_info[0] == 2: subnet_input = str(raw_input('{0}PureBlood{1}>{0}WebPentest{1}>{0}SubnetLookup{1}>({3}CIDR or IP with NetMask{1})> {2}'.format(green, blue, cyan, red))) try: wp_subnet_lookup = cweb_pentest.subnet_lookup(subnet_input) except: print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) web_pentest() print ('{0}='.format(red) * int(sizex)) print (reset + bold) print (wp_subnet_lookup) print (reset) print ('{0}='.format(red) * int(sizex)) web_pentest() elif choice == 13: try: wp_links_extract = cweb_pentest.links_extract(url) except: print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) web_pentest() print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('[+] Links Extract Result - ' + url) web_pentest_outputfile.write('\n============================================================') print (reset + bold) print (wp_links_extract) web_pentest_outputfile.write('\n' + wp_links_extract) print (reset) print ('{0}='.format(red) * int(sizex)) web_pentest_outputfile.write('\n') web_pentest_outputfile.write('============================================================\n') web_pentest() elif choice == 90: print ('\n\n') main() elif choice == 95: print ('{2}[{1}#{2}] {3}- {4}Please don\'t put "/" in the end of the Target.{0}'.format(reset, green, blue, yellow, cyan)) if sys.version_info[0] == 3: target = str(input('{0}PureBlood{1}>{0}WebPentest{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red))) if sys.version_info[0] == 2: target = str(raw_input('{0}PureBlood{1}>{0}WebPentest{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red))) if '://' in target: ourl = target.replace('https://', '').replace('http://', '') else: ourl = target web_pentest_output = ourl + '-' + month + mday + '.txt' web_pentest_outputfile = open('outputs/web_pentest/' + web_pentest_output, 'a+') web_pentest_outputfile.write('\n\n\n[#] - ' + month + ' ' + mday + ' ' + current_time + '\n') set_url(target, 1) elif choice == 99: try: print ('\n{2}[{1}+{2}] {3}- {4}Output saved in outputs/web_pentest/' + web_pentest_output + '{0}'.format(reset, green, blue, yellow, cyan)) except: pass print ('\n{2}[{1}+{2}] {3}- {4}Exiting...{0}'.format(reset, green, blue, yellow, cyan)) sys.exit() else: print ('{2}[{1}+{2}] {3}- {4}Please enter a valid choice!{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) web_pentest() def main(): print (""" {3}[ {5}PureBlood Menu {3}] {2}01{3}) {5}Web Pentest {2}02{3}) {5}Generator {2}99{3}) {5}Exit {0}""".format(reset, red, green, blue, yellow, cyan)) if sys.version_info[0] == 3: try: choice = int(input('{0}PureBlood{1}> {2}'.format(green, blue, cyan))) except KeyboardInterrupt: print ('\n{2}[{1}+{2}] {3}- {4}Exiting...{0}'.format(reset, green, blue, yellow, cyan)) sys.exit() except ValueError: print ('{2}[{1}+{2}] {3}- {4}Please enter a valid number!{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) print ('\n\n') main() elif sys.version_info[0] == 2: try: choice = int(raw_input('{0}PureBlood{1}> {2}'.format(green, blue, cyan))) except KeyboardInterrupt: print ('\n{2}[{1}+{2}] {3}- {4}Exiting...{0}'.format(reset, green, blue, yellow, cyan)) sys.exit() except ValueError: print ('{2}[{1}+{2}] {3}- {4}Please enter a valid number!{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) print ('\n\n') main() if choice == 1: web_pentest() elif choice == 2: generator() elif choice == 99: print ('\n{2}[{1}+{2}] {3}- {4}Exiting...{0}'.format(reset, green, blue, yellow, cyan)) sys.exit() else: print ('{2}[{1}+{2}] {3}- {4}Please enter a valid choice!{0}'.format(reset, green, blue, yellow, cyan)) time.sleep(2) print ('\n\n') main() if __name__ == '__main__': if not os.path.exists('outputs'): os.mkdir('outputs') else: pass if not os.path.exists('outputs/generator'): os.mkdir('outputs/generator') else: pass if not os.path.exists('outputs/web_pentest'): os.mkdir('outputs/web_pentest') else: pass clear() banner() google_hacking = 'https://www.google.com/search?q=' main()
server.py
import os import signal import ssl import threading from base64 import b64encode from contextlib import contextmanager from textwrap import dedent from typing import TYPE_CHECKING from unittest.mock import Mock from pip._vendor.contextlib2 import nullcontext from werkzeug.serving import WSGIRequestHandler from werkzeug.serving import make_server as _make_server if TYPE_CHECKING: from types import TracebackType from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Type from werkzeug.serving import BaseWSGIServer Environ = Dict[str, str] Status = str Headers = Iterable[Tuple[str, str]] ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType] Write = Callable[[bytes], None] StartResponse = Callable[[Status, Headers, Optional[ExcInfo]], Write] Body = List[bytes] Responder = Callable[[Environ, StartResponse], Body] class MockServer(BaseWSGIServer): mock = Mock() # type: Mock # Applies on Python 2 and Windows. if not hasattr(signal, "pthread_sigmask"): # We're not relying on this behavior anywhere currently, it's just best # practice. blocked_signals = nullcontext else: @contextmanager def blocked_signals(): """Block all signals for e.g. starting a worker thread. """ # valid_signals() was added in Python 3.8 (and not using it results # in a warning on pthread_sigmask() call) try: mask = signal.valid_signals() except AttributeError: mask = set(range(1, signal.NSIG)) old_mask = signal.pthread_sigmask(signal.SIG_SETMASK, mask) try: yield finally: signal.pthread_sigmask(signal.SIG_SETMASK, old_mask) class _RequestHandler(WSGIRequestHandler): def make_environ(self): environ = super().make_environ() # From pallets/werkzeug#1469, will probably be in release after # 0.16.0. try: # binary_form=False gives nicer information, but wouldn't be # compatible with what Nginx or Apache could return. peer_cert = self.connection.getpeercert(binary_form=True) if peer_cert is not None: # Nginx and Apache use PEM format. environ["SSL_CLIENT_CERT"] = ssl.DER_cert_to_PEM_cert( peer_cert, ) except ValueError: # SSL handshake hasn't finished. self.server.log("error", "Cannot fetch SSL peer certificate info") except AttributeError: # Not using TLS, the socket will not have getpeercert(). pass return environ def _mock_wsgi_adapter(mock): # type: (Callable[[Environ, StartResponse], Responder]) -> Responder """Uses a mock to record function arguments and provide the actual function that should respond. """ def adapter(environ, start_response): # type: (Environ, StartResponse) -> Body try: responder = mock(environ, start_response) except StopIteration: raise RuntimeError('Ran out of mocked responses.') return responder(environ, start_response) return adapter def make_mock_server(**kwargs): # type: (Any) -> MockServer """Creates a mock HTTP(S) server listening on a random port on localhost. The `mock` property of the returned server provides and records all WSGI interactions, so one approach to testing could be server = make_mock_server() server.mock.side_effects = [ page1, page2, ] with server_running(server): # ... use server... ... assert server.mock.call_count > 0 call_args_list = server.mock.call_args_list # `environ` is a dictionary defined as per PEP 3333 with the associated # contents. Additional properties may be added by werkzeug. environ, _ = call_args_list[0].args assert environ["PATH_INFO"].startswith("/hello/simple") Note that the server interactions take place in a different thread, so you do not want to touch the server.mock within the `server_running` block. Note also for pip interactions that "localhost" is a "secure origin", so be careful using this for failure tests of `--trusted-host`. """ kwargs.setdefault("request_handler", _RequestHandler) mock = Mock() app = _mock_wsgi_adapter(mock) server = _make_server("localhost", 0, app=app, **kwargs) server.mock = mock return server @contextmanager def server_running(server): # type: (BaseWSGIServer) -> None """Context manager for running the provided server in a separate thread. """ thread = threading.Thread(target=server.serve_forever) thread.daemon = True with blocked_signals(): thread.start() try: yield finally: server.shutdown() thread.join() # Helper functions for making responses in a declarative way. def text_html_response(text): # type: (str) -> Responder def responder(environ, start_response): # type: (Environ, StartResponse) -> Body start_response("200 OK", [ ("Content-Type", "text/html; charset=UTF-8"), ]) return [text.encode('utf-8')] return responder def html5_page(text): # type: (str) -> str return dedent(""" <!DOCTYPE html> <html> <body> {} </body> </html> """).strip().format(text) def index_page(spec): # type: (Dict[str, str]) -> Responder def link(name, value): return '<a href="{}">{}</a>'.format( value, name ) links = ''.join(link(*kv) for kv in spec.items()) return text_html_response(html5_page(links)) def package_page(spec): # type: (Dict[str, str]) -> Responder def link(name, value): return '<a href="{}">{}</a>'.format( value, name ) links = ''.join(link(*kv) for kv in spec.items()) return text_html_response(html5_page(links)) def file_response(path): # type: (str) -> Responder def responder(environ, start_response): # type: (Environ, StartResponse) -> Body size = os.stat(path).st_size start_response( "200 OK", [ ("Content-Type", "application/octet-stream"), ("Content-Length", str(size)), ], ) with open(path, 'rb') as f: return [f.read()] return responder def authorization_response(path): # type: (str) -> Responder correct_auth = "Basic " + b64encode(b"USERNAME:PASSWORD").decode("ascii") def responder(environ, start_response): # type: (Environ, StartResponse) -> Body if environ.get('HTTP_AUTHORIZATION') == correct_auth: size = os.stat(path).st_size start_response( "200 OK", [ ("Content-Type", "application/octet-stream"), ("Content-Length", str(size)), ], ) else: start_response( "401 Unauthorized", [ ("WWW-Authenticate", "Basic"), ], ) with open(path, 'rb') as f: return [f.read()] return responder
mobility.py
#!/bin/sh ### BEGIN INIT INFO # Provides: RoverMobilityServer # Required-Start: $remote_fs $network $syslog # Required_Stop: $remote_fs $syslog # Default-Start: 2 3 4 5 # Default-Stop: 0 1 6 # Short-Description: Simple script to start a program at boot # Description: Rover Mobility Server ### END INIT INFO from socket import * from datetime import datetime import subprocess from subprocess import Popen from threading import Thread from deepstream import post, get import time import pygame import numpy as np import sys import os uname = str(Popen([ "uname", "-m" ], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0].decode("utf-8")) isPi = True if (uname == "armv7l\n" or uname == "arm6l\n") else False isNvidia = True if uname == "arm64\n" else False if isPi: import RPi.GPIO as GPIO GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) redLed = 18 greenLed = 23 blueLed = 24 GPIO.setup(redLed, GPIO.OUT) # Red LED GPIO.setup(greenLed, GPIO.OUT) # Green LED GPIO.setup(blueLed, GPIO.OUT) # Blue LED # System setup wait #time.sleep(5) # Arduino address and connection info address = ("192.168.1.10", 5000) client_socket = socket(AF_INET, SOCK_DGRAM) client_socket.settimeout(0.5) # Initialize pygame and joysticks os.environ["SDL_VIDEODRIVER"] = "dummy" pygame.init() pygame.joystick.init() #Global declarations global paused global controlString global controls # Holds file configurations global modeNum # Current mode index to toggle modeNames lst global mode # Current set name (string) in use global modeNames # List of set names (strings) from .txt file global actionTime # Seconds needed to trigger pause / mode change global pausedLEDs # LED settings for paused mode paused = False modeNum = 0 actionTime = 3 pausedLEDs = { "R" : True, "G" : False, "B" : False } # Red for paused print("Going through startup process...") while True: success = None try: success = post({"mode": dsMode}, "mode") except: pass time.sleep(.1) print(str(success)) if success == "SUCCESS": break print("Posted mode to manual...") actionList = ["motor1", "motor2", "arm2", "arm3", "joint1", "joint4", "joint5a", "joint5b", "reserved1", "ledMode"] # List in order of socket output values global roverActions def setRoverActions(): global roverActions roverActions = { "motor1": {"special": "motor", "rate": "motor", "direction": 1, "value": 0}, "motor2": {"special": "motor", "rate": "motor", "direction": 1, "value": 0}, "arm3": {"special": "motor", "rate": "none", "direction": 1, "value": 0}, "joint1": {"special": "none", "rate": "none", "direction": 1, "value": 0}, "arm2": {"special": "motor", "rate": "none", "direction": 1, "value": 0}, "joint4": {"special": "none", "rate": "none", "direction": 1, "value": 0}, "joint5a": {"special": "none", "rate": "none", "direction": 1, "value": 0}, "joint5b": {"special": "none", "rate": "none", "direction": 1, "value": 0}, "reserved1": {"special": "none", "rate": "none", "direction": 1, "value": 0}, "ledMode": {"special": "none", "rate": "none", "direction": 1, "value": 0}} # Not rover actions, but stored in same location. These actions trigger events within this module roverActions["pause"] = {"held": False, "direction": 1, "value": 0, "set": 0} # Added to support "pause" action roverActions["mode"] = {"held": False, "direction": 1, "value": 0} # Added to support "mode" action roverActions["throttle"] = {"direction": 1, "value": 0.5} # Throttle value for "motor" rate multiplier (-1 to 1) roverActions["throttleStep"] = {"held": False, "direction": 1, "value": 0} # Added to support button throttle #roverActions["auto"] = {"held": False, "direction": 1, "value": 0, "set": 0} # Added to support "autoManual" mode setRoverActions() # Initiate roverActions to enter loop # Initialize connection to Arduino def initArduinoConnection(): client_socket.sendto(bytes("0,0,0,0,0,0,0,0,0,1", "utf-8"), address) initArduinoConnection() def startUp(argv): global controlString, controls, modeNames, mode, roverActions fileName = "logitech3dReset.txt" if len(sys.argv) == 2: fileName = str(sys.argv[1]) elif len(sys.argv) > 2: print("Exceeded arguments") sys.exit() try: controlString = open(fileName).read().replace('\n', '').replace('\r', '') except IOError: print ("Unable to open file") sys.exit() controls = eval(controlString) modeNames = list(sorted(controls.keys())) mode = modeNames[modeNum] # mode 0 = both, mode 1 = mobility, mode 2 = arm roverActions["mode"]["set"] = modeNum roverActions["ledMode"]["value"] = controls[mode]["ledCode"] setLed() def stop(): global paused paused = True def getZero(*arg): return 0 def getOne(*arg): return 1 def getRate(): return roverActions["throttle"]["direction"] * roverActions["throttle"]["value"] # If axis needs to be reversed specialMultipliers = {"motor": 127, "none": 1} rateMultipliers = {"motor": getRate, "none": getOne} def throttleStep(): global roverActions if (not roverActions["throttleStep"]["held"] and roverActions["throttleStep"]["value"]): # New button press roverActions["throttleStep"]["held"] = True throttle = round(roverActions["throttle"]["value"] * 10.0) / 10 # Round out analog value to tenths place change = roverActions["throttleStep"]["direction"] * roverActions["throttleStep"]["value"] * 0.2 throttle += change if throttle < -0.6: throttle = -0.6 if throttle > 0.8: throttle = 0.8 roverActions["throttle"]["value"] = throttle if (roverActions["throttleStep"]["held"] and not roverActions["throttleStep"]["value"]): # Button held, but released roverActions["throttleStep"]["held"] = False def computeSpeed(key): val = roverActions[key] throttleValue = rateMultipliers[val["rate"]]() # Get current rate multiplier (-1 to +1), calls getRate or getOne accordingly calcThrot = np.interp(throttleValue, [-1 , 1], [0, 1]) speed = int(specialMultipliers[val["special"]] * calcThrot * val["direction"] * val["value"]) return speed def setLed(): if paused: myLeds = pausedLEDs else: myLeds = controls[mode]["leds"] if isPi: GPIO.output(redLed,GPIO.HIGH) if myLeds["R"] else GPIO.output(redLed,GPIO.LOW) GPIO.output(greenLed,GPIO.HIGH) if myLeds["G"] else GPIO.output(greenLed,GPIO.LOW) GPIO.output(blueLed,GPIO.HIGH) if myLeds["B"] else GPIO.output(blueLed,GPIO.LOW) def checkPause(): global paused, roverActions if (not roverActions["pause"]["held"] and roverActions["pause"]["value"]): # New button press roverActions["pause"]["held"] = True roverActions["pause"]["lastpress"] = datetime.now() if (roverActions["pause"]["held"] and not roverActions["pause"]["value"]): # Button held, but now released roverActions["pause"]["held"] = False if (roverActions["pause"]["held"] and roverActions["pause"]["value"] and ( datetime.now() - roverActions["pause"]["lastpress"]).seconds >= actionTime): # Button held for required time roverActions["pause"]["lastpress"] = datetime.now() # Keep updating time as button may continue to be held paused = not paused def checkModes(): global modeNum, mode, roverActions if (not roverActions["mode"]["held"] and roverActions["mode"]["value"]): # New button press roverActions["mode"]["held"] = True roverActions["mode"]["lastpress"] = datetime.now() if (roverActions["mode"]["held"] and not roverActions["mode"]["value"]): # Button held, but now released roverActions["mode"]["held"] = False if (roverActions["mode"]["held"] and roverActions["mode"]["value"] and (datetime.now() - roverActions["mode"][ "lastpress"]).seconds >= actionTime and not paused): # Button held for required time roverActions["mode"]["lastpress"] = datetime.now() # Keep updating time as button may continue to be held modeNum += 1 if modeNum >= len(modeNames): modeNum = 0 mode = modeNames[modeNum] setRoverActions() # Clear all inputs roverActions["mode"]["set"] = modeNum roverActions["ledMode"]["value"] = controls[mode]["ledCode"] def checkButtons(currentJoystick): global roverActions name = currentJoystick.get_name() joyForSet = controls[mode].get(name) # Get joystick in current set if (joyForSet): typeForJoy = joyForSet.get("buttons") # Get joystick control type if (typeForJoy): buttons = currentJoystick.get_numbuttons() for i in range(buttons): control_input = typeForJoy.get(i) # Check if input defined for controller if (control_input): val = currentJoystick.get_button(i) # Read button value, assign to roverActions if (val == 0 and roverActions[control_input[0]]["direction"] == control_input[1]) or val != 0: roverActions[control_input[0]]["value"] = val roverActions[control_input[0]]["direction"] = control_input[1] # Set direction multiplier def checkAxes(currentJoystick): global roverActions name = currentJoystick.get_name() joyForSet = controls[mode].get(name) # Get joystick in current set if (joyForSet): typeForJoy = joyForSet.get("axes") # Get joystick control type if (typeForJoy): axes = currentJoystick.get_numaxes() for i in range(axes): control_input = typeForJoy.get(i) # Check if input defined for controller if (control_input): val = currentJoystick.get_axis(i) # Read axis value, assign to roverActions roverActions[control_input[0]]["value"] = val roverActions[control_input[0]]["direction"] = control_input[1] # Set direction multiplier def checkHats(currentJoystick): global roverActions name = currentJoystick.get_name() joyForSet = controls[mode].get(name) # Get joystick in current set if (joyForSet): typeForJoy = joyForSet.get("hats") # Get joystick control type if (typeForJoy): count = currentJoystick.get_numhats() for x in range(count): val = currentJoystick.get_hat(x) # Store hat value, needed more than once for y in range(len(val)): # Get the number of controller values # Input may be stored multiple times, check both control_input = typeForJoy.get((x, y)) # Check if east/west defined if (control_input): roverActions[control_input[0]]["value"] = val[y] roverActions[control_input[0]]["direction"] = control_input[1] # Set direction multiplier def checkDsButton(): global dsButton, roverActions if (not roverActions["auto"]["held"] and roverActions["auto"]["value"]): # New button press roverActions["auto"]["held"] = True roverActions["auto"]["lastpress"] = datetime.now() if (roverActions["auto"]["held"] and not roverActions["auto"]["value"]): # Button held, but now released roverActions["auto"]["held"] = False dsButton = False if (roverActions["auto"]["held"] and roverActions["auto"]["value"] and ( datetime.now() - roverActions["auto"]["lastpress"]).seconds >= actionTime): # Button held for required time roverActions["auto"]["lastpress"] = datetime.now() # Keep updating time as button may continue to be held dsButton = True global dsMode while True: try: post({"mobilityTime": int(time.time())}, "mobilityTime") time.sleep(.1) m = get("mode") if type(m) == dict: dsMode = m["mode"] if prevDsMode != dsMode: client_socket.sendto(bytes("0,0,0,0,0,0,0,0,0,1", "utf-8"), address) except: print("Cannot send to Deepstream") time.sleep(.1) def requestControl(): try: modeRecord = post({"mode": "manual"}, "mode") print("Updated mode record:", str(modeRecord)) time.sleep(.1) initArduinoConnection() print("Trying to initialize a connection to the arduino...") except: print("Cannot access mode record") def main(*argv): global paused startUp(argv) # Load appropriate controller(s) config file joystick_count = pygame.joystick.get_count() for i in range(joystick_count): pygame.joystick.Joystick(i).init() while True: pygame.event.pump() # Keeps pygame in sync with system, performs internal upkeep joystick_count = pygame.joystick.get_count() if joystick_count == 0: stop() for i in range(joystick_count): joystick = pygame.joystick.Joystick(i) checkAxes(joystick) checkHats(joystick) checkButtons(joystick) throttleStep() checkPause() checkModes() setLed() print("Sending Arduino command") try: #client_socket.sendto(bytes("0,0,0,0,0,0,0,0,0,1", "utf-8"), address) re_data = client_socket.recvfrom(512) #print(bytes.decode(re_data[0])) # Debug if bytes.decode(re_data[0]) == "r": #print("Received packet") # Debug if paused: outVals = list(map(getZero, actionList)) else: outVals = list(map(computeSpeed, actionList)) # Output string determined by actionList[] order outVals = list(map(str, outVals)) outString = ",".join(outVals) if dsMode == "manual": #print("Into the DSManual Mode") client_socket.sendto(bytes(outString,"utf-8"), address) #print("After Sending The Commands To The Socket") print(outString) else: print("Not in manual mode") except: print("Send failed") #client_socket.sendto(bytes("0,0,0,0,0,0,0,0,0,1", "utf-8"), address) if __name__ == '__main__': main() ''' t1 = Thread(target = main) t2 = Thread(target = sendToDeepstream) t1.start() t2.start() '''
02-olog_integration.py
import os if os.getenv("BS_ENABLE_OLOG_CALLBACK", 1) == 1: from bluesky.callbacks.olog import logbook_cb_factory from functools import partial from pyOlog import SimpleOlogClient import queue import threading from warnings import warn import bluesky.callbacks.olog bluesky.callbacks.olog.TEMPLATES['long'] = """ {{- start.plan_name }} ['{{ start.uid[:6] }}'] (scan num: {{ start.scan_id }}) Scan Plan --------- {{ start.plan_name }} {% if 'plan_args' in start %} {%- for k, v in start.plan_args | dictsort %} {{ k }}: {{ v }} {%- endfor %} {% endif %} {% if 'signature' in start -%} Call: {{ start.signature }} {% endif %} Metadata -------- {% for k, v in start.items() -%} {%- if k not in ['plan_name', 'plan_args'] -%}{{ k }} : {{ v }} {% endif -%} {%- endfor -%}""" bluesky.callbacks.olog.TEMPLATES['desc'] = """ {{- start.plan_name }} ['{{ start.uid[:6] }}'] (scan num: {{ start.scan_id }})""" bluesky.callbacks.olog.TEMPLATES['call'] = """RE({{ start.plan_name }}( {%- for k, v in start.plan_args.items() %}{%- if not loop.first %} {% endif %}{{ k }}={{ v }} {%- if not loop.last %}, {% endif %}{% endfor %})) """ LOGBOOKS = ['Comissioning'] # list of logbook names to publish to # Set up the logbook. This configures bluesky's summaries of # data acquisition (scan type, ID, etc.). LOGBOOKS = ['Data Acquisition'] # list of logbook names to publish to simple_olog_client = SimpleOlogClient() generic_logbook_func = simple_olog_client.log configured_logbook_func = partial(generic_logbook_func, logbooks=LOGBOOKS) # This is for ophyd.commands.get_logbook, which simply looks for # a variable called 'logbook' in the global IPython namespace. logbook = simple_olog_client cb = logbook_cb_factory(configured_logbook_func, desc_dispatch=desc_templates) def submit_to_olog(queue, cb): while True: name, doc = queue.get() # waits until document is available try: cb(name, doc) except Exception as exc: warn('This olog is giving errors. This will not be logged.' 'Error:' + str(exc)) olog_queue = queue.Queue(maxsize=100) olog_thread = threading.Thread(target=submit_to_olog, args=(olog_queue, cb), daemon=True) olog_thread.start() def send_to_olog_queue(name, doc): try: olog_queue.put((name, doc), block=False) except queue.Full: warn('The olog queue is full. This will not be logged.') RE.subscribe(send_to_olog_queue, 'start')
crash.py
import os, re, stat, sys import logging import argparse import syzscope.interface.utilities as utilities import time import threading import json import pathlib import queue from syzscope.interface.vm import VM from subprocess import call, Popen, PIPE, STDOUT from .syzbotCrawler import Crawler from dateutil import parser as time_parser startup_regx = r'Debian GNU\/Linux \d+ syzkaller ttyS\d+' boundary_regx = r'======================================================' call_trace_regx = r'Call Trace:' message_drop_regx = r'printk messages dropped' panic_regx = r'Kernel panic' kasan_mem_regx = r'BUG: KASAN: ([a-z\\-]+) in ([a-zA-Z0-9_]+).*' kasan_double_free_regx = r'BUG: KASAN: double-free or invalid-free in ([a-zA-Z0-9_]+).*' kasan_write_regx = r'KASAN: ([a-z\\-]+) Write in ([a-zA-Z0-9_]+).*' kasan_read_regx = r'KASAN: ([a-z\\-]+) Read in ([a-zA-Z0-9_]+).*' double_free_regx = r'KASAN: double-free or invalid-free in ([a-zA-Z0-9_]+).*' magic_regx = r'\?!\?MAGIC\?!\?read->(\w*) size->(\d*)' write_regx = r'Write of size (\d+) at addr (\w*)' read_regx = r'Read of size (\d+) at addr (\w*)' default_port = 3777 project_path = "" NONCRASH = 0 CONFIRM = 1 SUSPICIOUS = 2 thread_fn = None class CrashChecker: def __init__(self, project_path, case_path, ssh_port, logger, debug, offset, qemu_num, store_read=True, compiler="gcc-7", max_compiling_kernel=1): os.makedirs("{}/poc".format(case_path), exist_ok=True) self.logger = logger self.project_path = project_path self.package_path = os.path.join(project_path, "syzscope") self.case_path = case_path self.image_path = "{}/img".format(self.case_path) self.linux_path = "{}/linux".format(self.case_path) self.qemu_num = qemu_num self.ssh_port = ssh_port self.kasan_func_list = self.read_kasan_funcs() self.debug = debug self.store_read = store_read self.compiler = compiler self.kill_qemu = False self.max_compiling_kernel = max_compiling_kernel self.queue = queue.Queue() self.case_logger = self.__init_case_logger("{}-info".format(case_path)) def run(self, syz_repro, syz_commit, log=None, linux_commit=None, config=None, c_repro=None, i386=None): self.case_logger.info("=============================crash.run=============================") if log != None: exitcode = self.deploy_linux(linux_commit, config, 0) if exitcode == 1: self.logger.info("Error occur at deploy_linux.sh") return [False, None] ori_crash_report, trigger = self.read_crash(syz_repro, syz_commit, log, 0, c_repro, i386) if ori_crash_report == []: self.logger.info("No crash trigger by original poc") return [False, None] crashes_path = self.extract_existed_crash(self.case_path) self.case_logger.info("Found {} existed crashes".format(len(crashes_path))) for path in crashes_path: self.case_logger.info("Inspect crash: {}".format(path)) new_crash_reports = self.read_existed_crash(path) if self.compare_crashes(ori_crash_report, new_crash_reports): return [True, path] return [False, None] def check_read_before_write(self, path): new_crash_reports = self.read_existed_crash(path) for each_report in new_crash_reports: for line in each_report: if utilities.regx_match(magic_regx, line): return True return False def diff_testcase(self, crash_path, syz_repro): new_testcase = [] old_testcase = [] f = open(os.path.join(crash_path, "repro.prog"), "r") text = f.readlines() for line in text: if len(line) > 0 and line[0] != '#': line = line.strip('\n') new_testcase.append(line) r = utilities.request_get(syz_repro) text = r.text.split('\n') for line in text: if len(line) > 0 and line[0] != '#': line = line.strip('\n') old_testcase.append(line) return utilities.levenshtein("\n".join(old_testcase), "\n".join(new_testcase)) def repro_on_fixed_kernel(self, syz_commit, linux_commit=None, config=None, c_repro=None, i386=None, patch_commit=None, crashes_path=None, limitedMutation=False): if crashes_path == None: crashes_path = self.extract_existed_crash(self.case_path) if len(crashes_path) == 0: return [] self.case_logger.info("=============================crash.repro_on_fixed_kernel=============================") res = [] reproduceable = {} #check if the patch can be applied exitcode = self.patch_applying_check(linux_commit, config, patch_commit) if exitcode == 1: self.logger.info("Error occur at patch_applying_check.sh") return res #reproduce on unfixed kernel for path in crashes_path: key = os.path.basename(path) path_repro = os.path.join(path, "repro.prog") self.case_logger.info("Go for {}".format(path_repro)) ori_crash_report, trigger = self.read_crash(path_repro, syz_commit, None, 0, c_repro, i386) if ori_crash_report != []: if trigger: reproduceable[key] = CONFIRM else: reproduceable[key] = SUSPICIOUS else: reproduceable[key] = NONCRASH #apply the patch exitcode = self.deploy_linux(patch_commit, config, 1) if exitcode == 1: self.logger.info("Error occur at deploy_linux.sh") return res #reproduce on fixed kernel for path in crashes_path: key = os.path.basename(path) path_repro = os.path.join(path, "repro.prog") ori_crash_report, trigger = self.read_crash(path_repro, syz_commit, None, 1, c_repro, i386) if ori_crash_report != []: if trigger: self.logger.info("Reproduceable: {}".format(key)) else: if reproduceable[key] == CONFIRM: # still crash but no OOB/UAF write any more self.logger.info("Slightly Fixed: {}".format(key)) res.append(path) else: if reproduceable[key] == CONFIRM: self.logger.info("Fixed: {}".format(key)) res.append(path) if reproduceable[key] == NONCRASH: self.logger.info("Invalid crash: {} unreproduceable on both fixed and unfixed kernel".format(key)) if reproduceable[key] == SUSPICIOUS: self.logger.info("Suspicious crash: {} triggered a crash but doesn't belong to OOB/UAF write".format(key)) exitcode = self.deploy_linux(linux_commit, config, 0) return res def patch_applying_check(self, linux_commit, config, patch_commit): target = os.path.join(self.package_path, "scripts/patch_applying_check.sh") utilities.chmodX(target) p = Popen([target, self.linux_path, linux_commit, config, patch_commit, self.compiler, str(self.max_compiling_kernel), ], stdout=PIPE, stderr=STDOUT) with p.stdout: self.__log_subprocess_output(p.stdout, logging.INFO) exitcode = p.wait() return exitcode def read_kasan_funcs(self): res = [] path = os.path.join(self.package_path, "resources/kasan_related_funcs") with open(path, "r") as f: lines = f.readlines() for line in lines: res.append(line.strip('\n')) return res def compare_crashes(self, ori_crash_report, new_crash_reports): ratio_allocation = 1 ratio_call_race = 1 res_allocation = False res_call_trace = False for report1 in ori_crash_report: if len(report1) > 2: for report2 in new_crash_reports: if len(report2) > 2: res1 = self.__match_allocated_section(report1, report2) res2 = self.__match_call_trace(report1, report2) if ratio_allocation > res1[1]: ratio_allocation = res1[1] res_allocation = res1[0] if ratio_call_race > res2[1]: ratio_call_race = res2[1] res_call_trace = res2[0] self.logger.info("ratio for allocation: {} ratio for call trace: {}".format(ratio_allocation, ratio_call_race)) return res_allocation or res_call_trace def extract_existed_crash(self, path): crash_path = os.path.join(path, "crashes") #extrace the latest crashes if os.path.isdir(crash_path): for i in range(0,99): crash_path_tmp = os.path.join(path, "crashes-{}".format(i)) if os.path.isdir(crash_path_tmp): crash_path = crash_path_tmp else: break res = [] if os.path.isdir(crash_path): for case in os.listdir(crash_path): description_file = "{}/{}/description".format(crash_path, case) if os.path.isfile(description_file): with open(description_file, "r") as f: line = f.readline() if self.store_read and utilities.regx_match(kasan_read_regx, line) and os.path.isfile('{}/{}/repro.prog'.format(crash_path, case)): res.append(os.path.join(crash_path, case)) continue if utilities.regx_match(kasan_write_regx, line) and os.path.isfile('{}/{}/repro.prog'.format(crash_path, case)): res.append(os.path.join(crash_path, case)) continue if utilities.regx_match(double_free_regx, line) and os.path.isfile('{}/{}/repro.prog'.format(crash_path, case)): res.append(os.path.join(crash_path, case)) continue return res def read_crash(self, syz_repro, syz_commit, log, fixed, c_repro, i386): self.kill_qemu = False res = [] trigger = False repro_type = utilities.CASE if utilities.regx_match(r'https:\/\/syzkaller\.appspot\.com\/', syz_repro): repro_type = utilities.URL c_hash = "" if repro_type == utilities.CASE: try: c_hash = syz_repro.split('/')[-2] except: self.logger.info("Failed to parse repro {}".format(syz_repro)) else: c_hash = syz_commit + "-ori" if log != None: res = self.read_from_log(log) else: self.case_logger.info("=============================crash.read_crash=============================") for i in range(0, self.qemu_num): x = threading.Thread(target=self.trigger_ori_crash, args=(syz_repro, syz_commit, c_repro, i386, i, c_hash, repro_type, fixed, ), name="trigger_ori_crash-{}".format(i)) x.start() if self.debug: x.join() #crashes, trigger = self.trigger_ori_crash(syz_repro, syz_commit, c_repro, i386, fixed) for i in range(0, self.qemu_num): [crashes, high_risk] = self.queue.get(block=True) if not trigger and high_risk: trigger = high_risk res = crashes self.kill_qemu = True if utilities.regx_match(r'https:\/\/syzkaller\.appspot\.com\/', syz_repro): self.save_crash_log(res, "ori") else: self.save_crash_log(res, c_hash[:7]) if res == []: res = crashes if len(res) == 1 and isinstance(res[0], str): self.case_logger.error(res[0]) self.logger.error(res[0]) return [], trigger return res, trigger def read_existed_crash(self, crash_path): res = [] crash = [] record_flag = 0 kasan_flag = 0 report_path = os.path.join(crash_path, "repro.report") if os.path.isfile(report_path): with open(report_path, "r") as f: lines = f.readlines() for line in lines: if utilities.regx_match(boundary_regx, line) or \ utilities.regx_match(message_drop_regx, line): record_flag ^= 1 if record_flag == 0 and kasan_flag == 1: res.append(crash) crash = [] kasan_flag ^= 1 continue if utilities.regx_match(kasan_mem_regx, line) or \ utilities.regx_match(kasan_double_free_regx, line): kasan_flag ^= 1 if record_flag and kasan_flag: crash.append(line) return res def read_from_log(self, log): res = [] crash = [] record_flag = 0 kasan_flag = 0 r = utilities.request_get(log) text = r.text.split('\n') for line in text: if record_flag == 0 and utilities.regx_match(call_trace_regx, line): record_flag ^= 1 kasan_flag ^= 1 if utilities.regx_match(boundary_regx, line) or \ utilities.regx_match(message_drop_regx, line): record_flag ^= 1 if record_flag == 0 and kasan_flag == 1: res.append(crash) crash = [] continue if utilities.regx_match(kasan_mem_regx, line) or \ utilities.regx_match(kasan_double_free_regx, line): kasan_flag ^= 1 if record_flag and kasan_flag: crash.append(line) return res def save_crash_log(self, log, name): with open("{}/poc/crash_log-{}".format(self.case_path, name), "w+") as f: for each in log: for line in each: f.write(line+"\n") f.write("\n") def deploy_linux(self, commit, config, fixed): target = os.path.join(self.package_path, "scripts/deploy_linux.sh") utilities.chmodX(target) p = None if commit == None and config == None: #self.logger.info("run: scripts/deploy_linux.sh {} {}".format(self.linux_path, patch_path)) p = Popen([target, self.compiler, str(fixed), self.linux_path, self.project_path, str(self.max_compiling_kernel)], stdout=PIPE, stderr=STDOUT) else: #self.logger.info("run: scripts/deploy_linux.sh {} {} {} {}".format(self.linux_path, patch_path, commit, config)) p = Popen([target, self.compiler, str(fixed), self.linux_path, self.project_path, str(self.max_compiling_kernel), commit, config, "0"], stdout=PIPE, stderr=STDOUT) with p.stdout: self.__log_subprocess_output(p.stdout, logging.INFO) exitcode = p.wait() return exitcode def trigger_ori_crash(self, syz_repro, syz_commit, c_repro, i386, th_index,c_hash,repro_type,fixed=0): res = [] trgger_hunted_bug = False qemu = VM(hash_tag=c_hash, linux=self.linux_path, port=self.ssh_port+th_index, image=self.image_path, proj_path="{}/poc/".format(self.case_path) ,log_name="qemu-{}.log".format(c_hash), log_suffix=str(th_index), timeout=10*60, debug=self.debug) qemu.qemu_logger.info("QEMU-{} launched. Fixed={}\n".format(th_index, fixed)) p = qemu.run() extract_report = False qemu_close = False out_begin = 0 record_flag = 0 kasan_flag = 0 write_flag = 0 double_free_flag = 0 read_flag = 0 crash = [] try: while not qemu_close: # We need one more iteration to get remain output from qemu if p.poll() != None and not qemu.qemu_ready: qemu_close = True if qemu.qemu_ready and out_begin == 0: ok = self.upload_exp(syz_repro, self.ssh_port+th_index, syz_commit, repro_type, c_repro, i386, fixed, qemu.qemu_logger) if not ok: p.kill() break ok = self.run_exp(syz_repro, self.ssh_port+th_index, repro_type, ok, i386, th_index, qemu.qemu_logger) if not ok: p.kill() break extract_report=True if extract_report: out_end = len(qemu.output) for line in qemu.output[out_begin:]: if utilities.regx_match(call_trace_regx, line) or \ utilities.regx_match(message_drop_regx, line): record_flag = 1 if utilities.regx_match(boundary_regx, line) or \ utilities.regx_match(panic_regx, line): if record_flag == 1: res.append(crash) crash = [] if kasan_flag and (write_flag or read_flag or double_free_flag): trgger_hunted_bug = True if write_flag: self.logger.debug("QEMU threaded {}: OOB/UAF write triggered".format(th_index)) qemu.kill_qemu = True if double_free_flag: self.logger.debug("QEMU threaded {}: Double free triggered".format(th_index)) if read_flag: self.logger.debug("QEMU threaded {}: OOB/UAF read triggered".format(th_index)) break record_flag = 1 continue if (utilities.regx_match(kasan_mem_regx, line) and 'null-ptr-deref' not in line): kasan_flag = 1 if utilities.regx_match(write_regx, line): write_flag = 1 if utilities.regx_match(kasan_double_free_regx, line): double_free_flag = 1 if self.store_read and utilities.regx_match(read_regx, line): read_flag = 1 if record_flag or kasan_flag: if crash == []: crash.append(boundary_regx) crash.append(line) out_begin = out_end except Exception as e: self.case_logger.error("Exception occur when reporducing crash: {}".format()) if p.poll() == None: p.kill() if not extract_report: res = ['QEMU threaded {}: Error occur at booting qemu'.format(th_index)] if p.poll() == None: p.kill() self.queue.put([res, trgger_hunted_bug]) return def upload_exp(self, syz_repro, port, syz_commit, repro_type, c_repro, i386, fixed, logger): target = os.path.join(self.package_path, "scripts/upload-exp.sh") utilities.chmodX(target) p = Popen([target, self.case_path, syz_repro, str(port), self.image_path, syz_commit, str(repro_type), str(c_repro), str(i386), str(fixed), self.compiler], stdout=PIPE, stderr=STDOUT) with p.stdout: log_anything(p.stdout, logger, self.debug) exitcode = p.wait() if exitcode != 2 and exitcode != 3: return 0 return exitcode def upload_custom_exp(self, path, port, logger=None): p = Popen(["scp", "-F", "/dev/null", "-o", "UserKnownHostsFile=/dev/null", \ "-o", "BatchMode=yes", "-o", "IdentitiesOnly=yes", "-o", "StrictHostKeyChecking=no", \ "-i", "{}/stretch.img.key".format(self.image_path), "-P", str(port), path, "root@localhost:/root/poc"], stdout=PIPE, stderr=STDOUT) with p.stdout: if logger != None: log_anything(p.stdout, logger, self.debug) exitcode = p.wait() return exitcode def run_exp(self, syz_repro, port, repro_type, exitcode, i386, th_index, logger=None): if repro_type == utilities.URL: r = utilities.request_get(syz_repro) text = r.text.split('\n') command = self.make_commands(text, exitcode, i386) else: with open(syz_repro, "r") as f: text = f.readlines() #Temporarily disable read command from repro.command #It may cause misbehavior of bugs. #Since the new capabilities are from a specific version of syzkaler #We just need to parse one type of testcase, it's totally OK """dirname = os.path.dirname(syz_repro) command_path = os.path.join(dirname, "repro.command") if os.path.isfile(command_path): with open(command_path, 'r') as f: command = f.readline().strip('\n') else:""" command = self.make_commands(text, exitcode, i386) target = os.path.join(self.package_path, "scripts/run-script.sh") utilities.chmodX(target) p1 = Popen([target, command, str(port), self.image_path, self.case_path], stdout=PIPE, stderr=STDOUT) with p1.stdout: if logger != None: log_anything(p1.stdout, logger, self.debug) exitcode = p1.wait() if exitcode == 1: self.case_logger.error("QEMU threaded {}: Usually, there is no reproducer in the crash".format(th_index)) return 0 p2 = Popen(["ssh", "-F", "/dev/null", "-o", "UserKnownHostsFile=/dev/null", "-o", "BatchMode=yes", "-o", "IdentitiesOnly=yes", "-o", "StrictHostKeyChecking=no", "-i", "{}/stretch.img.key".format(self.image_path), "-p", str(port), "root@localhost", "chmod +x run.sh && ./run.sh "+str(th_index & 1)], stdout=PIPE, stderr=STDOUT) with p2.stdout: if logger != None: x = threading.Thread(target=log_anything, args=(p2.stdout, logger, self.debug), name="{} run.sh logger".format(th_index)) x.start() """ call(["ssh", "-F", "/dev/null", "-o", "UserKnownHostsFile=/dev/null", "-o", "BatchMode=yes", "-o", "IdentitiesOnly=yes", "-o", "StrictHostKeyChecking=no", "-i", "{}/stretch.img.key".format(self.image_path), "-p", str(port), "root@localhost", "chmod +x run.sh && ./run.sh "+str(th_index & 1)]) """ return 1 def run_custom_exp(self, port, logger=None): p2 = Popen(["ssh", "-F", "/dev/null", "-o", "UserKnownHostsFile=/dev/null", "-o", "BatchMode=yes", "-o", "IdentitiesOnly=yes", "-o", "StrictHostKeyChecking=no", "-i", "{}/stretch.img.key".format(self.image_path), "-p", str(port), "root@localhost", "./poc"], stdout=PIPE, stderr=STDOUT) with p2.stdout: if logger != None: x = threading.Thread(target=log_anything, args=(p2.stdout, logger, self.debug), name="{} run.sh logger") x.start() def make_commands(self, text, support_enable_features, i386): command = "/syz-execprog -executor=/syz-executor " if text[0][:len(command)] == command: # If read from repro.command, text[0] was already the command return text[0] enabled = "-enable=" normal_pm = {"arch":"amd64", "threaded":"false", "collide":"false", "sandbox":"none"} for line in text: if line.find('{') != -1 and line.find('}') != -1: pm = {} try: pm = json.loads(line[1:]) except json.JSONDecodeError: self.case_logger.info("Using old syz_repro") pm = utilities.syzrepro_convert_format(line[1:]) for each in normal_pm: if each in pm and pm[each] != "": command += "-" + each + "=" +str(pm[each]).lower() + " " else: if each=='arch' and i386: command += "-" + each + "=386" + " " else: command += "-" + each + "=" +str(normal_pm[each]).lower() + " " if "procs" in pm and str(pm["procs"]) != "1": num = int(pm["procs"]) command += "-procs=" + str(num*2) + " " else: command += "-procs=1" + " " if "repeat" in pm and pm["repeat"] != "": command += "-repeat=" + "0 " if "slowdown" in pm and pm["slowdown"] != "": command += "-slowdown=" + "1 " #It makes no sense that limiting the features of syz-execrpog, just enable them all if support_enable_features != 2: if "tun" in pm and str(pm["tun"]).lower() == "true": enabled += "tun," if "binfmt_misc" in pm and str(pm["binfmt_misc"]).lower() == 'true': enabled += "binfmt_misc," if "cgroups" in pm and str(pm["cgroups"]).lower() == "true": enabled += "cgroups," if "close_fds" in pm and str(pm["close_fds"]).lower() == "true": enabled += "close_fds," if "devlinkpci" in pm and str(pm["devlinkpci"]).lower() == "true": enabled += "devlink_pci," if "netdev" in pm and str(pm["netdev"]).lower() == "true": enabled += "net_dev," if "resetnet" in pm and str(pm["resetnet"]).lower() == "true": enabled += "net_reset," if "usb" in pm and str(pm["usb"]).lower() == "true": enabled += "usb," if "ieee802154" in pm and str(pm["ieee802154"]).lower() == "true": enabled += "ieee802154," if "sysctl" in pm and str(pm["sysctl"]).lower() == "true": enabled += "sysctl," if "vhci" in pm and str(pm["vhci"]).lower() == "true": enabled += "vhci," if "wifi" in pm and str(pm["wifi"]).lower() == "true": enabled += "wifi," if enabled[-1] == ',': command += enabled[:-1] + " testcase" else: command += "testcase" break return command def monitor_execution(self, p): count = 0 while (count <60): if self.kill_qemu: self.case_logger.info('Signal kill qemu received.') p.kill() return count += 1 time.sleep(10) poll = p.poll() if poll != None: return self.case_logger.info('Time out, kill qemu') p.kill() def __match_allocated_section(self, report1 ,report2): self.case_logger.info("match allocated section") ratio = 1 allocation1 = utilities.extract_allocated_section(report1, self.kasan_func_list) allocation2 = utilities.extract_allocated_section(report2, self.kasan_func_list) seq1 = [utilities.extract_func_name(x) for x in allocation1 if utilities.extract_func_name(x) != None] seq2 = [utilities.extract_func_name(x) for x in allocation2 if utilities.extract_func_name(x) != None] counter = 0 """ for i in range(0, min(len(seq1), len(seq2))): if seq1[i] == seq2[i]: counter += 1 else: break if counter == 2 or counter == min(len(seq1), len(seq2)): return [True, ratio] """ diff = utilities.levenshtein(seq1, seq2) m = max(len(seq1), len(seq2)) if m > 0: ratio = diff/float(m) else: self.case_logger.error("Allocation do not exist") self.case_logger.info("diff ratio: {}".format(ratio)) if ratio > 0.3: return [False, ratio] return [True, ratio] def __match_call_trace(self, report1, report2): self.case_logger.info("match call trace") ratio = 1 trace1 = utilities.extrace_call_trace(report1, self.kasan_func_list) trace2 = utilities.extrace_call_trace(report2, self.kasan_func_list) seq1 = [utilities.extract_func_name(x) for x in trace1 if utilities.extract_func_name(x) != None] seq2 = [utilities.extract_func_name(x) for x in trace2 if utilities.extract_func_name(x) != None] counter = 0 """ for i in range(0, min(len(seq1), len(seq2))): if seq1[i] == seq2[i]: counter += 1 else: break if counter == 2 or counter == min(len(seq1), len(seq2)): return [True, ratio] """ diff = utilities.levenshtein(seq1, seq2) m = max(len(seq1), len(seq2)) if m > 0: ratio = diff/float(m) else: self.case_logger.error("Call trace do not exist") self.case_logger.info("diff ratio: {}".format(ratio)) if ratio > 0.3: return [False, ratio] return [True, ratio] def __init_case_logger(self, logger_name): handler = logging.FileHandler("{}/poc/log".format(self.case_path)) format = logging.Formatter('%(asctime)s %(message)s') handler.setFormatter(format) logger = logging.getLogger(logger_name) logger.setLevel(self.logger.level) logger.addHandler(handler) logger.propagate = False if self.debug: logger.propagate = True return logger def __log_subprocess_output(self, pipe, log_level): for line in iter(pipe.readline, b''): line = line.decode("utf-8").strip('\n').strip('\r') if log_level == logging.INFO: self.case_logger.info(line) if log_level == logging.DEBUG: self.case_logger.debug(line) def log_anything(pipe, logger, debug): try: for line in iter(pipe.readline, b''): try: line = line.decode("utf-8").strip('\n').strip('\r') except: logger.info('bytes array \'{}\' cannot be converted to utf-8'.format(line)) continue if logger.level == logging.INFO: logger.info(line) if logger.level == logging.DEBUG: logger.debug(line) if debug: print(line) except ValueError: if pipe.close: return def log_by_pwn_process(p, logger, debug): while p.poll() == None: try: line = p.recvuntil("\n", timeout=10) except EOFError: break if logger.level == logging.INFO: logger.info(line) if logger.level == logging.DEBUG: logger.debug(line) if debug: print(line) def link_correct_linux_repro(case_path, index): dst = os.path.join(case_path, "linux") try: os.remove(dst) except: pass src = "{}/tools/linux-{}".format(project_path, index) os.symlink(src, dst) def reproduce_with_ori_poc(index): while(1): lock.acquire(blocking=True) l = list(crawler.cases.keys()) if len(l) == 0: lock.release() break hash = l[0] case = crawler.cases.pop(hash) lock.release() print("Thread {}: running case {} [{}/{}]".format(index, hash, len(l)-1, total)) case_path = "{}/work/{}/{}".format(project_path, path, hash[:7]) if not os.path.isdir(case_path): print("Thread {}: running case {}: {} does not exist".format(index, hash[:7], case_path)) continue if args.linux != "-1": offset = int(args.linux) index = int(args.linux) link_correct_linux_repro(case_path, index) #hdlr = logging.FileHandler('./replay.out') #logger = logging.getLogger('crash-{}'.format(hash)) #formatter = logging.Formatter('%(asctime)s Thread {}: {}: %(message)s'.format(index, hash[:7])) #hdlr.setFormatter(formatter) #logger.addHandler(hdlr) #logger.setLevel(logging.INFO) logger = logging.getLogger("thread-{}".format(index)) handler = logging.StreamHandler(sys.stdout) logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s Thread {}: {}: %(message)s'.format(index, hash[:7])) handler.setFormatter(formatter) logger.addHandler(handler) syz_repro = case["syz_repro"] syz_commit = case["syzkaller"] commit = case["commit"] config = case["config"] c_repro = case["c_repro"] i386 = None if utilities.regx_match(r'386', case["manager"]): i386 = True log = case["log"] logger.info("Running case: {}".format(hash)) offset = index gcc = utilities.set_gcc_version(time_parser.parse(case["time"])) checker = CrashChecker(project_path, case_path, default_port, logger, args.debug, offset, 4, gcc=gcc) if checker.deploy_linux(commit,config,0) == 1: print("Thread {}: running case {}: Error occur in deploy_linux.sh".format(index, hash[:7])) continue report, trigger = checker.read_crash(case["syz_repro"], case["syzkaller"], None, 0, case["c_repro"], i386) if report != [] and trigger: for each in report: for line in each: if utilities.regx_match(r'BUG: (KASAN: [a-z\\-]+ in [a-zA-Z0-9_]+)', line) or\ utilities.regx_match(r'BUG: (KASAN: double-free or invalid-free in [a-zA-Z0-9_]+)', line): m = re.search(r'BUG: (KASAN: [a-z\\-]+ in [a-zA-Z0-9_]+)', line) if m != None and len(m.groups()) > 0: title = m.groups()[0] m = re.search(r'BUG: (KASAN: double-free or invalid-free in [a-zA-Z0-9_]+)', line) if m != None and len(m.groups()) > 0: title = m.groups()[0] if utilities.regx_match(r'Write of size (\d+) at addr (\w*)', line): write_without_mutating = True print("Thread {}: running case {}: OOB/UAF Write without mutating".format(index, hash[:7])) print("Thread {}: running case {}: Detect read before write".format(index, hash[:7])) break def reproduce_one_case(index): while(1): lock.acquire(blocking=True) l = list(crawler.cases.keys()) if len(l) == 0: lock.release() break hash = l[0] case = crawler.cases.pop(hash) lock.release() print("Thread {}: running case {} [{}/{}]".format(index, hash, len(l)-1, total)) case_path = "{}/work/{}/{}".format(project_path, path, hash[:7]) if not os.path.isdir(case_path): print("{} does not exist".format(case_path)) continue if args.linux != "-1": offset = int(args.linux) index = int(args.linux) link_correct_linux_repro(case_path, index) #hdlr = logging.FileHandler('./replay.out') #logger = logging.getLogger('crash-{}'.format(hash)) #formatter = logging.Formatter('%(asctime)s Thread {}: {}: %(message)s'.format(index, hash[:7])) #hdlr.setFormatter(formatter) #logger.addHandler(hdlr) #logger.setLevel(logging.INFO) logger = logging.getLogger("case-{}".format(hash)) handler = logging.StreamHandler(sys.stdout) logger.setLevel(logging.INFO) if args.debug: logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s Thread {}: {}: %(message)s'.format(index, hash[:7])) handler.setFormatter(formatter) logger.addHandler(handler) syz_repro = case["syz_repro"] syz_commit = case["syzkaller"] commit = case["commit"] config = case["config"] c_repro = case["c_repro"] i386 = None if utilities.regx_match(r'386', case["manager"]): i386 = True log = case["log"] logger.info("Running case: {}".format(hash)) offset = index gcc = utilities.set_gcc_version(time_parser.parse(case["time"])) checker = CrashChecker(project_path, case_path, default_port, logger, args.debug, offset, 4, gcc=gcc) checker.case_logger.info("=============================A reproducing process starts=============================") if args.identify_by_trace: if args.reproduce: res = checker.run(syz_repro, syz_commit, None, commit, config, c_repro, i386) else: res = checker.run(syz_repro, syz_commit, log, commit, config, c_repro, i386) checker.logger.info("{}:{}".format(hash, res[0])) if res[0]: n = checker.diff_testcase(res[1], syz_repro) checker.logger.info("difference of characters of two testcase: {}".format(n)) checker.logger.info("successful crash: {}".format(res[1])) if args.identify_by_patch: commit = utilities.get_patch_commit(hash) if commit != None: checker.repro_on_fixed_kernel(syz_commit, case["commit"], config, c_repro, i386, commit) print("Thread {} exit->".format(index)) def args_parse(): parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description='Determine if the new crashes are from the same root cause of the old one\n' 'eg. python crash.py -i 7fd1cbe3e1d2b3f0366d5026854ee5754d451405') parser.add_argument('-i', '--input', nargs='?', action='store', help='By default it analyze all cases under folder \'succeed\', but you can indicate a specific one.') parser.add_argument('--ignore', nargs='?', action='store', help='A file contains cases hashs which are ignored. One line for each hash.') parser.add_argument('-r', '--reproduce', action='store_true', help='Reproduce cases with the original testcase') parser.add_argument('-pm', '--parallel-max', nargs='?', action='store', default='5', help='The maximum of parallel processes\n' '(default valus is 5)') parser.add_argument('--folder', const='succeed', nargs='?', default='succeed', choices=['succeed', 'completed', 'incomplete', 'error'], help='Reproduce cases with the original testcase') parser.add_argument('--linux', nargs='?', action='store', default='-1', help='Indicate which linux repo to be used for running\n' '(--parallel-max will be set to 1)') parser.add_argument('-p', '--port', nargs='?', default='3777', help='The default port that is used by reproducing\n' '(default value is 3777)') parser.add_argument('--identify-by-trace', '-ibt', action='store_true', help='Reproduce on fixed kernel') parser.add_argument('--store-read', action='store_true', help='Do not ignore memory reading') parser.add_argument('--identify-by-patch', '-ibp', action='store_true', help='Reproduce on unfixed kernel') parser.add_argument('--test-original-poc', action='store_true', help='Reproduce with original PoC') parser.add_argument('--debug', action='store_true', help='Enable debug mode') args = parser.parse_args() return args if __name__ == '__main__': print("running crash.py") args = args_parse() crawler = Crawler() if args.debug: args.parallel_max="1" ignore = [] if args.ignore != None: with open(args.ignore, "r") as f: text = f.readlines() for line in text: line = line.strip('\n') ignore.append(line) path = args.folder type = utilities.FOLDER if args.input != None: if len(args.input) == 40: crawler.run_one_case(args.input) else: with open(args.input, 'r') as f: text = f.readlines() for line in text: line = line.strip('\n') crawler.run_one_case(line) else: for url in utilities.urlsOfCases(path, type): if url not in ignore: crawler.run_one_case(url) project_path = os.getcwd() lock = threading.Lock() l = list(crawler.cases.keys()) total = len(l) default_port = int(args.port) parallel_max = int(args.parallel_max) if args.test_original_poc: thread_fn = reproduce_with_ori_poc else: thread_fn = reproduce_one_case for i in range(min(len(crawler.cases), parallel_max)): x = threading.Thread(target=thread_fn, args=(i,)) x.start()
linkCreationHelpers.py
#!/usr/bin/env python3 # Corona-Info-App # # © 2020 Tobias Höpp. # Include utilities import urllib import json from sqlalchemy import or_ import bs4 import visvalingamwyatt as vw # Include db connection from main import db, api # Include models from models.districts import districts, updateDistrictIncidence, createRegionIfNotExists from models.measures import sources, regionHasGroup, display, createSource from utils.measure_utils import createDefaultGroup from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound import requests #For multithreading import multiprocessing import threading from queue import Queue def part1(): with open('landkreise.json') as f: data = json.load(f) result = { "ok" : [], "err" : [] } for d in data: region_id = createRegionIfNotExists(d["Bundesland"]).id print(region_id) html_soup = bs4.BeautifulSoup(d["Regionale Einschränkungen"], 'html.parser') for l in html_soup.findAll('a'): category = None name = None if l.text[0:10] == "Landkreis ": category = "Landkreis" name = l.text[10:] elif l.text[-10:] == " Landkreis": category = "Landkreis" name = l.text[:-11] elif l.text[0:11] == "Stadtkreis ": category = "Stadtkreis" name = l.text[11:] elif l.text[0:17] == "Kreisfreie Stadt ": category = "Kreisfreie Stadt" name = l.text[17:] elif l.text[-17:] == " kreisfreie Stadt": category = "Kreisfreie Stadt" name = l.text[:-18] elif l.text[0:6] == "Stadt ": category = "Kreisfreie Stadt" name = l.text[6:] elif l.text[0:6] == "Kreis ": category = "Landkreis" name = l.text[6:] elif not "RKI" in l.text: name = l.text if name != None: try: if category != None: if category == "Landkreis": d = districts.query.filter(districts.name.like("%{}%".format(name)), districts.region_id == region_id, or_(districts.category == "Landkreis", districts.category == "Kreis")).one() else: d = districts.query.filter(districts.name.like("%{}%".format(name)), districts.region_id == region_id, districts.category == category).one() else: d = districts.query.filter(districts.name.like("%{}%".format(name)), districts.region_id == region_id).one() result["ok"].append({"id": d.id, "link": l["href"], "comment": l.text}) except NoResultFound: result["err"].append({"id": None, "link": l["href"], "comment": l.text}) except MultipleResultsFound: result["err"].append({"id": None, "link": l["href"], "comment": l.text}) with open('districtlinks.json', 'w') as json_file: json.dump(result, json_file) def part2(): with open('links.json') as f: data = json.load(f) abgedeckt = {} for d in data: abgedeckt[d["id"]] = d result = { "ok" : data, "missing" : [] } for d in districts.query.all(): if d.id not in abgedeckt: result["missing"].append({"id": d.id, "link": "", "comment": d.name_de}) print(d.id) #with open('districtlinks2.json', 'w') as json_file: # json.dump(result, json_file) def part3(): with open('links.json') as f: data = json.load(f) jobQueue = Queue() resultQueue = Queue() for d in data: jobQueue.put(d) for i in range(multiprocessing.cpu_count()): worker = threading.Thread(target=part3_helper, args=(jobQueue,resultQueue)) worker.start() jobQueue.join() print("DONE") result = [] for q_item in resultQueue.queue: result.append(q_item) with open('unsuccessfull.json', 'w') as json_file: json.dump(result, json_file) def part3_helper(q, resultQueue): while not q.empty(): job = q.get() try: headers = {"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:77.0) Gecko/20190101 Firefox/77.0"} r = requests.get(job["link"], timeout=(5, 10), headers=headers) if r.status_code != 200: res = job res["statusCode"] = r.status_code, print(res) resultQueue.put(res) except requests.exceptions.RequestException as e: # This is the correct syntax res = job res["exception"] = str(e), print(res) resultQueue.put(res) q.task_done() #part3() import os def tiles(): from pathlib import Path jobQueue = Queue() files = list(Path("../app/src/static/tiles").rglob("*.png")) for f in files: jobQueue.put(str(f)) for i in range(multiprocessing.cpu_count()): worker = threading.Thread(target=tile_helper, args=(jobQueue,)) worker.start() jobQueue.join() print("DONE") def tile_helper(q): while not q.empty(): job = q.get() try: os.system("convert "+job+" -quality 85 "+job[:-3]+"jpg") os.system("rm "+job) except: # This is the correct syntax print("Something went wrong:", job) q.task_done() tiles()
SBFI_Profiler.py
# Pre-injection Profiling # Measures switching activity on the inputs of macrocells # Identifies the used/unused memory cells # Any other metric that can improve the fault injection in any way # Author: Ilya Tuzov, Universitat Politecnica de Valencia import sys import xml.etree.ElementTree as ET import re import os import stat import subprocess import shutil import datetime import time import random import glob from Davos_Generic import * from Datamanager import * from SBFI_Initializer import * from BitstreamParser import * import multiprocessing class ProfilingType: actime, value = range(2) #---------------------------------------------- # Profiling logic #---------------------------------------------- class ProfilingAddressDescriptor: def __init__(self, Iaddress): self.address = Iaddress self.rate = float(0) self.time_from = float(0) self.time_to = float(0) self.total_time = float(0) self.entries = int(0) self.effective_switches = int(0) self.profiled_value = '' def to_xml(self): return '<address val = \"{0:s}\" rate = \"{1:.4f}\" time_from = \"{2:.2f}\" time_to = \"{3:.2f}\" total_time = \"{4:.1f}\" entries = \"{5:d}\" effective_switches = \"{6:d}\"/>'.format(self.address, self.rate, self.time_from, self.time_to, self.total_time, self.entries, self.effective_switches) class ProfilingDescriptor: def __init__(self, Iprim_type, Iprim_name, Icase): self.prim_type = Iprim_type self.prim_name = Iprim_name self.inj_case = Icase #object from dictionary->injection_rule(prim_type, fault_model)->injection_case self.trace_descriptor = None self.address_descriptors = [] self.indetermination = False self.indetermination_time = float(0) self.profiled_value = '' def to_xml(self): res = '\n\t<simdesc prim_type = \"' + self.prim_type + '\" prim_name = \"'+ self.prim_name + '\" inj_case = \"' + self.inj_case.label + '\" >' for i in self.address_descriptors: res += '\n\t\t' + i.to_xml() return(res + '\n\t</simdesc>') def get_by_adr(self, iadr): for i in self.address_descriptors: if i.address == str(iadr): return(i) return(None) class ProfilingResult: def __init__(self, Iconfig, Ifaultmodel): self.config = Iconfig self.faultmodel = Ifaultmodel self.items = [] def append(self, Iprim_type, Iprim_name, Icase): self.items.append(ProfilingDescriptor(Iprim_type, Iprim_name, Icase)) def get(self, prim_type, prim_name, inj_case_label): for i in self.items: if(i.prim_type == prim_type and i.prim_name == prim_name and i.inj_case.label == inj_case_label): return(i) def ProfileHdlModels(config, toolconf, datamodel): for p in ProfilingConfig.items: if p.type == ProfilingType.actime: if len(p.indexes) > 1: for i1 in range(p.indexes[0].low, p.indexes[0].high+1): for i2 in range: pass else: pass else: pass def process_activity_dumps(procid, LutMapList, DAVOS_Config, resdict): stat = 0 ProfileRes=[] workload_t = float(DAVOS_Config.SBFIConfig.genconf.std_workload_time) print('Starting thread: {0} to {1}'.format(LutMapList[0]['Label'],LutMapList[-1]['Label'])) for lut in LutMapList: if lut['node_main'] == None: continue inj_dump = simDump() combining = (lut['node_compl'] != None and len(lut['cbelinputs']) > 0) dual_output = (lut['celltype']=='LUT6' and lut['node_compl'] != None and len(lut['cbelinputs']) == 0) paired_shadow_cell = len(lut['cbelinputs']) > 0 and lut['node_compl']==None inj_dump.internal_labels = ['Item', 'Compl'] if combining else ['Item'] inj_dump.build_vectors_from_file("./Traces/{0}.lst".format(lut['Label'])) #inj_dump.normalize(True) (lut['actime'], lut['switchcount']) = inj_dump.get_activity_time(0, workload_t, 1 if combining else None) if dual_output: print('info: replicating DUAL OUTPUT LUT6 activity for O5: {0}'.format(lut['name'])) for k,v in lut['actime'].items(): if k&0x1F not in lut['actime']: lut['actime'][k&0x1F]=v lut['switchcount'][k&0x1F] = lut['switchcount'][k] for i in range(2**len(lut['connections'])): if not combining: if i not in lut['actime']: lut['actime'][i]=0.0 lut['switchcount'][i] = 0 elif lut['actime'][i]==0.0: lut['actime'][i] = 1.0 else: for j in range(2**len(lut['cbelinputs'])): if (i,j) not in lut['actime']: lut['actime'][(i,j)] = 0.0 lut['switchcount'][(i,j)] = 0 elif lut['actime'][(i,j)] == 0.0: lut['actime'][(i,j)] = 1.0 #Paired cell not represented in the simulation netist (Pathhrough, constant, etc) if paired_shadow_cell: for i in range(2**len(lut['connections'])): v, c = lut['actime'][i], lut['switchcount'][i] del lut['actime'][i] del lut['switchcount'][i] lut['actime'][(i,0)] = v lut['switchcount'][(i,0)] = c for j in range(1, 2**len(lut['cbelinputs'])): lut['actime'][(i,j)] = v if v > 0 else 1.0 #assume that paired cmem cell has non-zero activity time lut['switchcount'][(i,j)] = c if c > 0 else 1 for i in range(2**len(lut['connections'])): for j in range(2**len(lut['cbelinputs'])) if (combining or paired_shadow_cell) else range(1): x= { 'Label' : lut['Label'], 'LutBit' : i, 'ComplBit' : j, 'BitstreamCoordinates' : lut['globalmap'][i][j], 'Actime' : 100.0*float(lut['actime'][(i,j)])/workload_t if (combining or paired_shadow_cell) else 100.0*float(lut['actime'][i])/workload_t, 'SwitchCount' : lut['switchcount'][(i,j)] if (combining or paired_shadow_cell) else lut['switchcount'][i]} ProfileRes.append(x) #print(str(x)) stat += 1 if stat % 100 == 0: print('Profiling: processed {0} items'.format(lut['Label'])) resdict[procid]=ProfileRes #update LutMapList records with switching_activity [Address/ComplementaryAddress:AcTime] def Estimate_LUT_switching_activity(LutMapList, DAVOS_Config): #raw_input('Starting profiling') #InitializeHDLModels(DAVOS_Config.SBFIConfig, DAVOS_Config.toolconf) CellTypes = list(set([i['celltype'].lower() for i in LutMapList])) nodetree = ET.parse(os.path.join(DAVOS_Config.parconf[0].work_dir, DAVOS_Config.toolconf.injnode_list)).getroot() inj_nodes = ConfigNodes(DAVOS_Config.parconf[0].label, nodetree) nodelist = inj_nodes.get_all_by_typelist(CellTypes) #f = open('Log.txt','w') index = 0 stdenv = DAVOS_Config.ExperimentalDesignConfig.design_genconf.uut_root if not stdenv.endswith('/'): stdenv += '/' for lut in LutMapList: index+=1 lut['Label'] = 'CELL_{0:05d}'.format(index) for node in nodelist: if lut['name'].endswith(node.name.replace(' ','').replace(stdenv,'').replace('\\','')): lut['node_main'] = node break if lut['combcell']!=None: for node in nodelist: if lut['combcell']['name'].endswith(node.name.replace(' ','').replace(stdenv,'').replace('\\','')): lut['node_compl'] = node break #f.write('{0} : {1} : {2}\n'.format(lut['name'], '' if node_main == None else node_main.name, '==' if node_compl==None else node_compl.name)) if lut['node_main'] != None: lut['trace_script'] = "\nquietly virtual signal -env {0} -install {0} {{ ((concat_range ({1:d} downto 0)) ({2}) )}} {3}".format(stdenv, len(lut['connections'])-1, ' & '.join(['{0}/{1}'.format(lut['node_main'].name.replace(stdenv,''), port) for port in sorted(lut['connections'].keys(), reverse=True)]), lut['Label']) if lut['node_compl'] != None and len(lut['cbelinputs']) > 0: combcell_I = [] for a in lut['cbelinputs']: for k, v in lut['combcell']['connections'].iteritems(): if a==v: combcell_I.append(k) lut['trace_script'] += "\nquietly virtual signal -env {0} -install {0} {{ ((concat_range ({1:d} downto 0)) ({2}) )}} {3}_Compl".format(stdenv, len(combcell_I)-1, ' & '.join(['{0}/{1}'.format(lut['node_compl'].name.replace(stdenv,''), port) for port in combcell_I[::-1]]), lut['Label']) lut['trace_script'] += "\nset {0} [view list -new -title {0}]".format(lut['Label']) lut['trace_script'] += "\nradix bin" lut['trace_script'] += "\nadd list {0}/{1} -window ${2}".format(stdenv, lut['Label'], lut['Label']) if lut['node_compl'] != None and len(lut['cbelinputs']) > 0: lut['trace_script'] += "\nadd list {0}/{1}_Compl -window ${2}".format(stdenv, lut['Label'], lut['Label']) else: print('No mathing simulation node for {0}'.format(lut['name'])) os.chdir(DAVOS_Config.parconf[0].work_dir) create_restricted_file('vsim.wlf') threadnum = DAVOS_Config.SBFIConfig.injector.maxproc script_list = ["#Profiling script\ndo {}".format(DAVOS_Config.SBFIConfig.genconf.run_script)]*threadnum chunksize = len(LutMapList)/threadnum + (1 if len(LutMapList)%threadnum > 0 else 0) raw_input('chunksize: {0}, LutMapList: {1}, threadnum: {2}'.format(chunksize, len(LutMapList), threadnum)) for i in range(threadnum): for lut in LutMapList[i*chunksize : (i+1)*chunksize]: if lut['node_main'] != None: script_list[i] += lut['trace_script'] script_list[i] += "\n\n\nrun {0:d} ns\n".format(DAVOS_Config.SBFIConfig.genconf.std_workload_time) for lut in LutMapList[i*chunksize : (i+1)*chunksize]: if lut['node_main'] != None: script_list[i] += "\nview list -window ${0}".format(lut['Label']) script_list[i] += "\nwrite list -window ${0} ./Traces/{0}.lst".format(lut['Label']) script_list[i] += "\nquit\n" with open('Profiling_Thread_{0:03d}.do'.format(i), 'w') as f: f.write(script_list[i]) proclist = [] for i in range(threadnum): proc = subprocess.Popen("vsim -c -do \"Profiling_Thread_{0:03d}.do\" > Profiling_Thread_{0:03d}.log".format(i, i), shell=True) proclist.append(proc) while get_active_proc_number(proclist) > 0: tracenum = len(os.listdir(os.path.join(DAVOS_Config.parconf[0].work_dir, 'Traces'))) console_message("Running Processes: {0}, Traces stored: {1}\r".format(len(get_active_proc_indexes(proclist)), tracenum), ConsoleColors.Green, True) time.sleep(5) proclist = [] procnum = DAVOS_Config.ExperimentalDesignConfig.max_proc step = len(LutMapList)/procnum res = [[] for i in range(procnum)] manager=multiprocessing.Manager() return_dict = manager.dict() for i in range(procnum): t = multiprocessing.Process(target = process_activity_dumps, args = (i, LutMapList[i*step:(i+1)*step], DAVOS_Config, return_dict)) proclist.append(t) for t in proclist: t.start() for t in proclist: t.join() print('Aggregating ') NonfilteredRes = [] for k,v in return_dict.items(): NonfilteredRes+=v unique_res = dict() for item in NonfilteredRes: if not item['BitstreamCoordinates'] in unique_res: unique_res[item['BitstreamCoordinates']] = item elif unique_res[item['BitstreamCoordinates']]['Actime'] < item['Actime']: unique_res[item['BitstreamCoordinates']] = item ProfileRes = [unique_res[k] for k in sorted(unique_res.keys(), reverse=False)] res = Table('Actime') for lbl in ['Label', 'LutBit', 'BitstreamCoordinates', 'Actime', 'SwitchCount']: res.add_column(lbl, map(str, [ProfileRes[i][lbl] for i in range(len(ProfileRes))])) for lut in LutMapList: combining = len(lut['cbelinputs']) > 0 and lut['node_compl'] != None paired_shadow_cell = len(lut['cbelinputs']) > 0 and lut['node_compl'] == None lut['Actime'] = [] lut['SwitchCount'] = [] for i in range(len(lut['globalmap'])): lut['Actime'].append([]) lut['SwitchCount'].append([]) for j in range(len(lut['globalmap'][i])): lut['Actime'][i].append( unique_res[ lut['globalmap'][i][j] ]['Actime'] if lut['globalmap'][i][j] in unique_res else float(-1.0)) lut['SwitchCount'][i].append( unique_res[ lut['globalmap'][i][j] ]['SwitchCount'] if lut['globalmap'][i][j] in unique_res else int(0)) #for lut in LutMapList: # x = [i for i in ProfileRes if lut['Label'] == i['Label']] # combining = len(lut['cbelinputs']) > 0 and lut['node_compl'] != None # paired_shadow_cell = len(lut['cbelinputs']) > 0 and lut['node_compl'] == None # buf = [y['Actime'] for y in sorted(x, key=lambda i: (i['LutBit'], i['ComplBit']))] # chunksize = (2**len(lut['cbelinputs'])) if (combining or paired_shadow_cell) else 1 # lut['Actime'] = [buf[i:i+chunksize] for i in range(0, len(buf), chunksize)] with open('Profiled.csv','w') as f: f.write(res.to_csv()) #with open('Temp1.txt','w') as f: # f.write( '\n'.join(i.type + ' : ' + i.name.replace(DAVOS_Config.ExperimentalDesignConfig.design_genconf.uut_root,'').replace('\\','') for i in nodelist)) #Build Trace/List script for ModelSim return(ProfileRes) if __name__ == "__main__": path = "C:/Projects/Profiling/Models/MC8051_ZC" T=Table('ProflingResult') T.build_from_csv(os.path.join(path, 'LutMapList_Upd_ext.csv')) items = [] Actime_ind, SWcount, Fmode_ind = T.labels.index('Actime'), T.labels.index('SwitchCount'), T.labels.index('FailureModeEmul') for row in range(T.rownum()): item = (float(T.get(row, Actime_ind)), int(T.get(row, SWcount)), int(T.get(row, Fmode_ind)), float(T.get(row, Actime_ind))*int(T.get(row, SWcount))) if item[0] > 0 and item[1] > 0 and item[2] >= 0: items.append(item) items.sort(key=lambda i: i[3]) N = 20 S = len(items)/N for i in range(N): actime = [items[j][0] for j in range(S*i, S*(i+1))] swcount = map(float, [items[j][1] for j in range(S*i, S*(i+1))]) fmode = map(float, [items[j][2] for j in range(S*i, S*(i+1))]) sortfunc = map(float, [items[j][3] for j in range(S*i, S*(i+1))]) print('Group {0:3d} : Mean_Actime {1:10.5f} : Mean_SwitchCount: {2:10.0f} : FailureRate : {3:6.2f} : SortFunc : {4:10.0f}'.format(i, sum(actime)/len(actime), sum(swcount)/len(swcount), 100*sum(fmode)/len(fmode), sum(sortfunc)/len(sortfunc) ))
video_utils.py
import cv2 import time import threading class VideoStream: # Opens a video with OpenCV from file in a thread def __init__(self, src, name="VideoStream", real_time=True): """Initialize the video stream from a video Args: src (str): Video file to process. name (str, default='VideoStream'): Name for the thread. real_time (bool, default='True'): Defines if the video is going to be read at full speed or adjusted to the original frame rate. Attributes: name (str, default='VideoStream'): Name for the thread. stream (cv2.VideoCapture): Video file stream. real_time (bool, default='True'): Defines if the video is going to be read at full speed or adjusted to the original frame rate. frame_rate (float): Frame rate of the video. grabbed (bool): Tells if the current frame's been correctly read. frame (nparray): OpenCV image containing the current frame. lock (_thread.lock): Lock to avoid race condition. _stop_event (threading.Event): Event used to gently stop the thread. """ self.name = name self.stream = cv2.VideoCapture(src) self.real_time = real_time self.frame_rate = self.stream.get(cv2.CAP_PROP_FPS) self.grabbed, self.frame = self.stream.read() self.lock = threading.Lock() self._stop_event = threading.Event() def start(self): # Start the thread to read frames from the video stream with target function update threading.Thread(target=self.update, daemon=True, name=self.name).start() return self def update(self): # Continuosly iterate through the video stream until stopped while self.stream.isOpened(): if not self.stopped(): if self.real_time: self.grabbed, self.frame = self.stream.read() # Wait to match the original video frame rate time.sleep(1.0/self.frame_rate) else: self.grabbed, self.frame = self.stream.read() else: return self.stop() def read(self): if self.stopped(): print("Video ended") return self.frame def stop(self): self.lock.acquire() self.stream.release() self._stop_event.set() self.lock.release() def stopped(self): return self._stop_event.is_set() class WebcamVideoStream: # Opens a video stream with OpenCV from a wired webcam in a thread def __init__(self, src, shape=None, name="WebcamVideoStream"): """Initialize the video stream from a video Args: src (int): ID of the camera to use. From 0 to N. name (str, default='WebcamVideoStream'): Name for the thread. Attributes: name (str, default='WebcamVideoStream'): Name for the thread. stream (cv2.VideoCapture): Webcam video stream. real_time (bool, default='True'): Defines if the video is going to be read at full speed or adjusted to the original frame rate. frame_rate (float): Frame rate of the video. grabbed (bool): Tells if the current frame's been correctly read. frame (nparray): OpenCV image containing the current frame. lock (_thread.lock): Lock to avoid race condition. _stop_event (threading.Event): Event used to gently stop the thread. """ self.name = name self.stream = cv2.VideoCapture(src) self.shape = shape if self.shape is not None: self.stream.set(3, shape[0]) self.stream.set(4, shape[1]) self.grabbed, self.frame = self.stream.read() self.lock = threading.Lock() self._stop_event = threading.Event() def start(self): # Start the thread to read frames from the video stream threading.Thread(target=self.update, daemon=True, name=self.name).start() return self def update(self): # Continuosly iterate through the video stream until stopped while self.stream.isOpened(): if not self.stopped(): self.grabbed, self.frame = self.stream.read() else: return self.stopped def read(self): return self.frame def stop(self): self.lock.acquire() self.stream.release() self._stop_event.set() self.lock.release() def stopped(self): return self._stop_event.is_set()
transfer.py
import os import gzip from operator import itemgetter from sys import exit from threading import Thread from threading import Condition from Queue import Queue from fabric.api import local, put, sudo, cd from fabric.colors import red class FileSplitter: """ Works like the UNIX split command break up a file into parts like: filename_aaaaaaaaa filename_aaaaaaaab etc... """ def __init__(self, chunk_size, destination_directory, callback): self.chunk_size = chunk_size * 1024 * 1024 self.destination_directory = destination_directory self.chunk_callback = callback def split_file(self, path, compress, transfer_target): basename = os.path.basename(path) file_size = os.path.getsize(path) total_bytes = 0 chunk_num = 0 suffix = '' if compress: suffix = '.gz' input = open(path, 'rb') while True: chunk_name = "%s_part%08d%s" % (basename, chunk_num, suffix) chunk_path = os.path.join(self.destination_directory, chunk_name) this_chunk_size = min(self.chunk_size, file_size - total_bytes) if this_chunk_size <= 0: break chunk = input.read(this_chunk_size) total_bytes += len(chunk) if compress: chunk_output = gzip.open(chunk_path, 'wb') else: chunk_output = file(chunk_path, 'wb') chunk_output.write(chunk) chunk_output.close() self.chunk_callback.handle_chunk(chunk_path, transfer_target) chunk_num += 1 class TransferTarget: def __init__(self, file, precompressed, transfer_manager): self.file = file self.precompressed = precompressed self.do_compress = transfer_manager.compress self.do_split = transfer_manager.chunk_size > 0 self.local_temp = transfer_manager.local_temp basename = os.path.basename(file) if len(basename) < 1: print red(Exception("Invalid file specified - %s" % file)) exit(-1) self.basename = basename def should_compress(self): return not self.precompressed and self.do_compress def split_up(self): return self.do_split def clean(self): if self.should_compress(): local("rm -rf '%s'" % self.compressed_file()) def compressed_basename(self): if not self.precompressed: compressed_basename = "%s.gz" % self.basename else: compressed_basename = self.basename return compressed_basename def decompressed_basename(self): basename = self.basename if basename.endswith(".gz"): decompressed_basename = basename[:-len(".gz")] else: decompressed_basename = basename return decompressed_basename def compressed_file(self): compressed_file = "%s/%s.gz" % (self.local_temp, self.basename) return compressed_file def build_simple_chunk(self): if self.should_compress(): compressed_file = self.compressed_file() local("gzip -f -9 '%s' -c > '%s'" % (self.file, compressed_file)) return TransferChunk(compressed_file, self) else: return TransferChunk(self.file, self) class TransferChunk: def __init__(self, chunk_path, transfer_target): self.chunk_path = chunk_path self.transfer_target = transfer_target def clean_up(self): was_split = self.transfer_target.split_up() was_compressed = self.transfer_target.should_compress() if was_split or was_compressed: local("rm '%s'" % self.chunk_path) class FileTransferManager: def __init__(self, compress=True, num_compress_threads=1, num_transfer_threads=1, num_decompress_threads=1, chunk_size=0, transfer_retries=3, destination="/tmp", transfer_as="root", local_temp=None): self.compress = compress self.num_compress_threads = num_compress_threads self.num_transfer_threads = num_transfer_threads self.num_decompress_threads = num_decompress_threads self.chunk_size = chunk_size self.transfer_retries = transfer_retries self.destination = destination self.transfer_as = transfer_as self.local_temp = local_temp if not self.local_temp: self.local_temp = "/tmp" local("mkdir -p '%s'" % self.local_temp) self.file_splitter = FileSplitter(self.chunk_size, self.local_temp, self) def handle_chunk(self, chunk, transfer_target): self._enqueue_chunk(TransferChunk(chunk, transfer_target)) def transfer_files(self, files=[], compressed_files=[]): self.transfer_complete = False self.transfer_complete_condition = Condition() self._setup_destination_directory() self._setup_workers() self._enqueue_files(files, compressed_files) self._wait_for_completion() def _setup_workers(self): self._setup_compress_threads() self._setup_transfer_threads() self._setup_decompress_threads() def _setup_destination_directory(self): sudo("mkdir -p %s" % self.destination) self._chown(self.destination) def _setup_compress_threads(self): self.compress_queue = Queue() self._launch_threads(self.num_compress_threads, self._compress_files) def _setup_decompress_threads(self): self.decompress_queue = Queue() self._launch_threads(self.num_decompress_threads, self._decompress_files) def _setup_transfer_threads(self): self.transfer_queue = Queue() # For now just transfer one file at a time self._launch_threads(self.num_transfer_threads, self._put_files) def _launch_threads(self, num_threads, func): for thread_index in range(num_threads): t = Thread(target=func) t.daemon = True t.start() def _enqueue_files(self, files, compressed_files): transfer_targets = [] for file in files: transfer_target = TransferTarget(file, False, self) transfer_targets.append(transfer_target) for compressed_file in compressed_files: transfer_target = TransferTarget(compressed_file, True, self) transfer_targets.append(transfer_target) transfer_targets = self._sort_transfer_targets(transfer_targets) for transfer_target in transfer_targets: self.compress_queue.put(transfer_target) def _sort_transfer_targets(self, transfer_targets): for i in range(len(transfer_targets)): transfer_target = transfer_targets[i] transfer_targets[i] = transfer_target, os.stat(transfer_target.file).st_size transfer_targets.sort(key=itemgetter(1), reverse=True) return [transfer_target[0] for transfer_target in transfer_targets] def _wait_for_completion(self): self.compress_queue.join() self.transfer_queue.join() self.transfer_complete_condition.acquire() self.transfer_complete = True self.transfer_complete_condition.notifyAll() self.transfer_complete_condition.release() self.decompress_queue.join() def _compress_files(self): while True: try: transfer_target = self.compress_queue.get() file = transfer_target.file if self.chunk_size > 0: should_compress = transfer_target.should_compress() self.file_splitter.split_file(file, should_compress, transfer_target) self.decompress_queue.put(transfer_target) else: simple_chunk = transfer_target.build_simple_chunk() self._enqueue_chunk(simple_chunk) except Exception as e: print red("Failed to compress a file to transfer") print red(e) finally: self.compress_queue.task_done() def _decompress_files(self): if self.chunk_size > 0: self.transfer_complete_condition.acquire() while not self.transfer_complete: self.transfer_complete_condition.wait() self.transfer_complete_condition.release() while True: try: transfer_target = self.decompress_queue.get() basename = transfer_target.basename chunked = transfer_target.split_up() compressed = transfer_target.do_compress or transfer_target.precompressed with cd(self.destination): if compressed and chunked: destination = transfer_target.decompressed_basename() if transfer_target.precompressed: sudo("cat '%s_part'* | gunzip -c > %s" % (basename, destination), user=self.transfer_as) else: sudo("zcat '%s_part'* > %s" % (basename, destination), user=self.transfer_as) sudo("rm '%s_part'*" % (basename), user=self.transfer_as) elif compressed: sudo("gunzip -f '%s'" % transfer_target.compressed_basename(), user=self.transfer_as) elif chunked: sudo("cat '%s'_part* > '%s'" % (basename, basename), user=self.transfer_as) sudo("rm '%s_part'*" % (basename), user=self.transfer_as) except Exception as e: print red("Failed to decompress or unsplit a transfered file.") print red(e) finally: self.decompress_queue.task_done() def _put_files(self): while True: try: transfer_chunk = self.transfer_queue.get() transfer_target = transfer_chunk.transfer_target compressed_file = transfer_chunk.chunk_path basename = os.path.basename(compressed_file) self._put_as_user(compressed_file, "%s/%s" % (self.destination, basename)) if not transfer_target.split_up(): self.decompress_queue.put(transfer_target) except Exception as e: print red("Failed to upload a file.") print red(e) finally: transfer_chunk.clean_up() self.transfer_queue.task_done() def _chown(self, destination): sudo("chown %s:%s '%s'" % (self.transfer_as, self.transfer_as, destination)) def _put_as_user(self, source, destination): for attempt in range(self.transfer_retries): retry = False try: put(source, destination, use_sudo=True) self._chown(destination) except BaseException as e: retry = True print red(e) print red("Failed to upload %s on attempt %d" % (source, attempt + 1)) except: # Should never get here, delete this block when more confident retry = True print red("Failed to upload %s on attempt %d" % (source, attempt + 1)) finally: if not retry: return print red("Failed to transfer file %s, exiting..." % source) exit(-1) def _enqueue_chunk(self, transfer_chunk): self.transfer_queue.put(transfer_chunk)
pod.py
""" Pod related functionalities and context info Each pod in the openshift cluster will have a corresponding pod object """ import logging import os import re import yaml import tempfile import time import calendar from threading import Thread import base64 from ocs_ci.ocs.ocp import OCP, verify_images_upgraded from tests import helpers from ocs_ci.ocs import workload from ocs_ci.ocs import constants, defaults, node from ocs_ci.framework import config from ocs_ci.ocs.exceptions import CommandFailed, NonUpgradedImagesFoundError, UnavailableResourceException from ocs_ci.ocs.utils import setup_ceph_toolbox from ocs_ci.ocs.resources.ocs import OCS from ocs_ci.utility import templating from ocs_ci.utility.utils import run_cmd, check_timeout_reached from ocs_ci.utility.utils import check_if_executable_in_path from ocs_ci.utility.retry import retry logger = logging.getLogger(__name__) FIO_TIMEOUT = 600 TEXT_CONTENT = ( "Lorem ipsum dolor sit amet, consectetur adipiscing elit, " "sed do eiusmod tempor incididunt ut labore et dolore magna " "aliqua. Ut enim ad minim veniam, quis nostrud exercitation " "ullamco laboris nisi ut aliquip ex ea commodo consequat. " "Duis aute irure dolor in reprehenderit in voluptate velit " "esse cillum dolore eu fugiat nulla pariatur. Excepteur sint " "occaecat cupidatat non proident, sunt in culpa qui officia " "deserunt mollit anim id est laborum." ) TEST_FILE = '/var/lib/www/html/test' FEDORA_TEST_FILE = '/mnt/test' class Pod(OCS): """ Handles per pod related context """ def __init__(self, **kwargs): """ Initializer function kwargs: Copy of ocs/defaults.py::<some pod> dictionary """ self.pod_data = kwargs super(Pod, self).__init__(**kwargs) self.temp_yaml = tempfile.NamedTemporaryFile( mode='w+', prefix='POD_', delete=False ) self._name = self.pod_data.get('metadata').get('name') self._labels = self.get_labels() self._roles = [] self.ocp = OCP( api_version=defaults.API_VERSION, kind=constants.POD, namespace=self.namespace ) self.fio_thread = None # TODO: get backend config !! self.wl_obj = None self.wl_setup_done = False @property def name(self): return self._name @property def namespace(self): return self._namespace @property def roles(self): return self._roles @property def labels(self): return self._labels @property def restart_count(self): return self.get().get('status').get('containerStatuses')[0].get('restartCount') def __setattr__(self, key, val): self.__dict__[key] = val def add_role(self, role): """ Adds a new role for this pod Args: role (str): New role to be assigned for this pod """ self._roles.append(role) def get_fio_results(self): """ Get FIO execution results Returns: dict: Dictionary represents the FIO execution results Raises: Exception: In case of exception from FIO """ logger.info(f"Waiting for FIO results from pod {self.name}") try: result = self.fio_thread.result(FIO_TIMEOUT) if result: return yaml.safe_load(result) raise CommandFailed(f"FIO execution results: {result}.") except CommandFailed as ex: logger.exception(f"FIO failed: {ex}") raise except Exception as ex: logger.exception(f"Found Exception: {ex}") raise def exec_cmd_on_pod( self, command, out_yaml_format=True, secrets=None, timeout=600, **kwargs ): """ Execute a command on a pod (e.g. oc rsh) Args: command (str): The command to execute on the given pod out_yaml_format (bool): whether to return yaml loaded python object OR to return raw output secrets (list): A list of secrets to be masked with asterisks This kwarg is popped in order to not interfere with subprocess.run(``**kwargs``) timeout (int): timeout for the exec_oc_cmd, defaults to 600 seconds Returns: Munch Obj: This object represents a returned yaml file """ rsh_cmd = f"rsh {self.name} " rsh_cmd += command return self.ocp.exec_oc_cmd( rsh_cmd, out_yaml_format, secrets=secrets, timeout=timeout, **kwargs ) def exec_sh_cmd_on_pod(self, command, sh="bash"): """ Execute a pure bash command on a pod via oc exec where you can use bash syntaxt like &&, ||, ;, for loop and so on. Args: command (str): The command to execute on the given pod Returns: str: stdout of the command """ cmd = f'exec {self.name} -- {sh} -c "{command}"' return self.ocp.exec_oc_cmd(cmd, out_yaml_format=False) def get_labels(self): """ Get labels from pod Raises: NotFoundError: If resource not found Returns: dict: All the openshift labels on a given pod """ return self.pod_data.get('metadata').get('labels') def exec_ceph_cmd(self, ceph_cmd, format='json-pretty'): """ Execute a Ceph command on the Ceph tools pod Args: ceph_cmd (str): The Ceph command to execute on the Ceph tools pod format (str): The returning output format of the Ceph command Returns: dict: Ceph command output Raises: CommandFailed: In case the pod is not a toolbox pod """ if 'rook-ceph-tools' not in self.labels.values(): raise CommandFailed( "Ceph commands can be executed only on toolbox pod" ) ceph_cmd = ceph_cmd if format: ceph_cmd += f" --format {format}" out = self.exec_cmd_on_pod(ceph_cmd) # For some commands, like "ceph fs ls", the returned output is a list if isinstance(out, list): return [item for item in out if item] return out def get_storage_path(self, storage_type='fs'): """ Get the pod volume mount path or device path Returns: str: The mount path of the volume on the pod (e.g. /var/lib/www/html/) if storage_type is fs else device path of raw block pv """ # TODO: Allow returning a path of a specified volume of a specified # container if storage_type == 'block': return self.pod_data.get('spec').get('containers')[0].get( 'volumeDevices')[0].get('devicePath') return ( self.pod_data.get( 'spec' ).get('containers')[0].get('volumeMounts')[0].get('mountPath') ) def workload_setup(self, storage_type, jobs=1): """ Do setup on pod for running FIO Args: storage_type (str): 'fs' or 'block' jobs (int): Number of jobs to execute FIO """ work_load = 'fio' name = f'test_workload_{work_load}' path = self.get_storage_path(storage_type) # few io parameters for Fio self.wl_obj = workload.WorkLoad( name, path, work_load, storage_type, self, jobs ) assert self.wl_obj.setup(), f"Setup for FIO failed on pod {self.name}" self.wl_setup_done = True def run_io( self, storage_type, size, io_direction='rw', rw_ratio=75, jobs=1, runtime=60, depth=4, rate='1m', rate_process='poisson', fio_filename=None ): """ Execute FIO on a pod This operation will run in background and will store the results in 'self.thread.result()'. In order to wait for the output and not continue with the test until FIO is done, call self.thread.result() right after calling run_io. See tests/manage/test_pvc_deletion_during_io.py::test_run_io for usage of FIO Args: storage_type (str): 'fs' or 'block' size (str): Size in MB, e.g. '200M' io_direction (str): Determines the operation: 'ro', 'wo', 'rw' (default: 'rw') rw_ratio (int): Determines the reads and writes using a <rw_ratio>%/100-<rw_ratio>% (e.g. the default is 75 which means it is 75%/25% which equivalent to 3 reads are performed for every 1 write) jobs (int): Number of jobs to execute FIO runtime (int): Number of seconds IO should run for depth (int): IO depth rate (str): rate of IO default 1m, e.g. 16k rate_process (str): kind of rate process default poisson, e.g. poisson fio_filename(str): Name of fio file created on app pod's mount point """ if not self.wl_setup_done: self.workload_setup(storage_type=storage_type, jobs=jobs) if io_direction == 'rw': self.io_params = templating.load_yaml( constants.FIO_IO_RW_PARAMS_YAML ) self.io_params['rwmixread'] = rw_ratio else: self.io_params = templating.load_yaml( constants.FIO_IO_PARAMS_YAML ) self.io_params['runtime'] = runtime size = size if isinstance(size, str) else f"{size}G" self.io_params['size'] = size if fio_filename: self.io_params['filename'] = fio_filename self.io_params['iodepth'] = depth self.io_params['rate'] = rate self.io_params['rate_process'] = rate_process self.fio_thread = self.wl_obj.run(**self.io_params) def run_git_clone(self): """ Execute git clone on a pod to simulate a Jenkins user """ name = 'test_workload' work_load = 'jenkins' wl = workload.WorkLoad( name=name, work_load=work_load, pod=self, path=self.get_storage_path() ) assert wl.setup(), "Setup up for git failed" wl.run() def install_packages(self, packages): """ Install packages in a Pod Args: packages (list): List of packages to install """ if isinstance(packages, list): packages = ' '.join(packages) cmd = f"yum install {packages} -y" self.exec_cmd_on_pod(cmd, out_yaml_format=False) def copy_to_server(self, server, authkey, localpath, remotepath, user=None): """ Upload a file from pod to server Args: server (str): Name of the server to upload authkey (str): Authentication file (.pem file) localpath (str): Local file/dir in pod to upload remotepath (str): Target path on the remote server user (str): User name to connect to server """ if not user: user = "root" cmd = ( f"scp -i {authkey} -o \"StrictHostKeyChecking no\"" f" -r {localpath} {user}@{server}:{remotepath}" ) self.exec_cmd_on_pod(cmd, out_yaml_format=False) def exec_cmd_on_node(self, server, authkey, cmd, user=None): """ Run command on a remote server from pod Args: server (str): Name of the server to run the command authkey (str): Authentication file (.pem file) cmd (str): command to run on server from pod user (str): User name to connect to server """ if not user: user = "root" cmd = f"ssh -i {authkey} -o \"StrictHostKeyChecking no\" {user}@{server} {cmd}" self.exec_cmd_on_pod(cmd, out_yaml_format=False) # Helper functions for Pods def get_all_pods( namespace=None, selector=None, selector_label='app', exclude_selector=False, wait=False ): """ Get all pods in a namespace. Args: namespace (str): Name of the namespace If namespace is None - get all pods selector (list) : List of the resource selector to search with. Example: ['alertmanager','prometheus'] selector_label (str): Label of selector (default: app). exclude_selector (bool): If list of the resource selector not to search with Returns: list: List of Pod objects """ ocp_pod_obj = OCP(kind=constants.POD, namespace=namespace) # In case of >4 worker nodes node failures automatic failover of pods to # other nodes will happen. # So, we are waiting for the pods to come up on new node if wait: wait_time = 180 logger.info(f"Waiting for {wait_time}s for the pods to stabilize") time.sleep(wait_time) pods = ocp_pod_obj.get()['items'] if selector: if exclude_selector: pods_new = [ pod for pod in pods if pod['metadata']['labels'].get(selector_label) not in selector ] else: pods_new = [ pod for pod in pods if pod['metadata']['labels'].get(selector_label) in selector ] pods = pods_new pod_objs = [Pod(**pod) for pod in pods] return pod_objs def get_ceph_tools_pod(): """ Get the Ceph tools pod Returns: Pod object: The Ceph tools pod object """ ocp_pod_obj = OCP( kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace'] ) ct_pod_items = ocp_pod_obj.get( selector='app=rook-ceph-tools' )['items'] if not ct_pod_items: # setup ceph_toolbox pod if the cluster has been setup by some other CI setup_ceph_toolbox() ct_pod_items = ocp_pod_obj.get( selector='app=rook-ceph-tools' )['items'] assert ct_pod_items, "No Ceph tools pod found" # In the case of node failure, the CT pod will be recreated with the old # one in status Terminated. Therefore, need to filter out the Terminated pod running_ct_pods = list() for pod in ct_pod_items: if ocp_pod_obj.get_resource_status( pod.get('metadata').get('name') ) == constants.STATUS_RUNNING: running_ct_pods.append(pod) assert running_ct_pods, "No running Ceph tools pod found" ceph_pod = Pod(**running_ct_pods[0]) return ceph_pod def get_csi_provisioner_pod(interface): """ Get the provisioner pod based on interface Returns: Pod object: The provisioner pod object based on iterface """ ocp_pod_obj = OCP( kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace'] ) selector = 'app=csi-rbdplugin-provisioner' if ( interface == constants.CEPHBLOCKPOOL ) else 'app=csi-cephfsplugin-provisioner' provision_pod_items = ocp_pod_obj.get( selector=selector )['items'] assert provision_pod_items, f"No {interface} provisioner pod found" provisioner_pod = ( Pod(**provision_pod_items[0]).name, Pod(**provision_pod_items[1]).name ) return provisioner_pod def get_rgw_pod(rgw_label=constants.RGW_APP_LABEL, namespace=None): """ Fetches info about rgw pods in the cluster Args: rgw_label (str): label associated with rgw pods (default: defaults.RGW_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: none) Returns: Pod object: rgw pod object """ namespace = namespace or config.ENV_DATA['cluster_namespace'] rgws = get_pods_having_label(rgw_label, namespace) rgw_pod = Pod(**rgws[0]) return rgw_pod def list_ceph_images(pool_name='rbd'): """ Args: pool_name (str): Name of the pool to get the ceph images Returns (List): List of RBD images in the pool """ ct_pod = get_ceph_tools_pod() return ct_pod.exec_ceph_cmd(ceph_cmd=f"rbd ls {pool_name}", format='json') @retry(TypeError, tries=5, delay=2, backoff=1) def check_file_existence(pod_obj, file_path): """ Check if file exists inside the pod Args: pod_obj (Pod): The object of the pod file_path (str): The full path of the file to look for inside the pod Returns: bool: True if the file exist, False otherwise """ try: check_if_executable_in_path(pod_obj.exec_cmd_on_pod("which find")) except CommandFailed: pod_obj.install_packages("findutils") ret = pod_obj.exec_cmd_on_pod(f"bash -c \"find {file_path}\"") if re.search(file_path, ret): return True return False def get_file_path(pod_obj, file_name): """ Get the full path of the file Args: pod_obj (Pod): The object of the pod file_name (str): The name of the file for which path to get Returns: str: The full path of the file """ path = ( pod_obj.get().get('spec').get('containers')[0].get( 'volumeMounts')[0].get('mountPath') ) file_path = os.path.join(path, file_name) return file_path def cal_md5sum(pod_obj, file_name): """ Calculates the md5sum of the file Args: pod_obj (Pod): The object of the pod file_name (str): The name of the file for which md5sum to be calculated Returns: str: The md5sum of the file """ file_path = get_file_path(pod_obj, file_name) md5sum_cmd_out = pod_obj.exec_cmd_on_pod( command=f"bash -c \"md5sum {file_path}\"", out_yaml_format=False ) md5sum = md5sum_cmd_out.split()[0] logger.info(f"md5sum of file {file_name}: {md5sum}") return md5sum def verify_data_integrity(pod_obj, file_name, original_md5sum): """ Verifies existence and md5sum of file created from first pod Args: pod_obj (Pod): The object of the pod file_name (str): The name of the file for which md5sum to be calculated original_md5sum (str): The original md5sum of the file Returns: bool: True if the file exists and md5sum matches Raises: AssertionError: If file doesn't exist or md5sum mismatch """ file_path = get_file_path(pod_obj, file_name) assert check_file_existence(pod_obj, file_path), ( f"File {file_name} doesn't exists" ) current_md5sum = cal_md5sum(pod_obj, file_name) logger.info(f"Original md5sum of file: {original_md5sum}") logger.info(f"Current md5sum of file: {current_md5sum}") assert current_md5sum == original_md5sum, ( 'Data corruption found' ) logger.info(f"File {file_name} exists and md5sum matches") return True def get_fio_rw_iops(pod_obj): """ Execute FIO on a pod Args: pod_obj (Pod): The object of the pod """ fio_result = pod_obj.get_fio_results() logging.info(f"FIO output: {fio_result}") logging.info("IOPs after FIO:") logging.info( f"Read: {fio_result.get('jobs')[0].get('read').get('iops')}" ) logging.info( f"Write: {fio_result.get('jobs')[0].get('write').get('iops')}" ) def run_io_in_bg(pod_obj, expect_to_fail=False, fedora_dc=False): """ Run I/O in the background Args: pod_obj (Pod): The object of the pod expect_to_fail (bool): True for the command to be expected to fail (disruptive operations), False otherwise fedora_dc (bool): set to False by default. If set to True, it runs IO in background on a fedora dc pod. Returns: Thread: A thread of the I/O execution """ logger.info(f"Running I/O on pod {pod_obj.name}") def exec_run_io_cmd(pod_obj, expect_to_fail, fedora_dc): """ Execute I/O """ try: # Writing content to a new file every 0.01 seconds. # Without sleep, the device will run out of space very quickly - # 5-10 seconds for a 5GB device if fedora_dc: FILE = FEDORA_TEST_FILE else: FILE = TEST_FILE pod_obj.exec_cmd_on_pod( command=f"bash -c \"let i=0; while true; do echo " f"{TEXT_CONTENT} >> {FILE}$i; let i++; sleep 0.01; done\"", timeout=2400 ) # Once the pod gets deleted, the I/O execution will get terminated. # Hence, catching this exception except CommandFailed as ex: if expect_to_fail: if re.search("code 137", str(ex)) or ( re.search("code 143", str(ex)) ): logger.info("I/O command got terminated as expected") return raise ex thread = Thread(target=exec_run_io_cmd, args=(pod_obj, expect_to_fail, fedora_dc)) thread.start() time.sleep(2) # Checking file existence if fedora_dc: FILE = FEDORA_TEST_FILE else: FILE = TEST_FILE test_file = FILE + "1" assert check_file_existence(pod_obj, test_file), ( f"I/O failed to start inside {pod_obj.name}" ) return thread def get_admin_key_from_ceph_tools(): """ Fetches admin key secret from ceph Returns: admin keyring encoded with base64 as a string """ tools_pod = get_ceph_tools_pod() out = tools_pod.exec_ceph_cmd(ceph_cmd='ceph auth get-key client.admin') base64_output = base64.b64encode(out['key'].encode()).decode() return base64_output def run_io_and_verify_mount_point(pod_obj, bs='10M', count='950'): """ Run I/O on mount point Args: pod_obj (Pod): The object of the pod bs (str): Read and write up to bytes at a time count (str): Copy only N input blocks Returns: used_percentage (str): Used percentage on mount point """ pod_obj.exec_cmd_on_pod( command=f"dd if=/dev/urandom of=/var/lib/www/html/dd_a bs={bs} count={count}" ) # Verify data's are written to mount-point mount_point = pod_obj.exec_cmd_on_pod(command="df -kh") mount_point = mount_point.split() used_percentage = mount_point[mount_point.index('/var/lib/www/html') - 1] return used_percentage def get_pods_having_label(label, namespace): """ Fetches pod resources with given label in given namespace Args: label (str): label which pods might have namespace (str): Namespace in which to be looked up Return: list: of pods info """ ocp_pod = OCP(kind=constants.POD, namespace=namespace) pods = ocp_pod.get(selector=label).get('items') return pods def get_deployments_having_label(label, namespace): """ Fetches deployment resources with given label in given namespace Args: label (str): label which deployments might have namespace (str): Namespace in which to be looked up Return: list: deployment OCP instances """ ocp_deployment = OCP(kind=constants.DEPLOYMENT, namespace=namespace) pods = ocp_deployment.get(selector=label).get('items') return pods def get_mds_pods(mds_label=constants.MDS_APP_LABEL, namespace=None): """ Fetches info about mds pods in the cluster Args: mds_label (str): label associated with mds pods (default: defaults.MDS_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : of mds pod objects """ namespace = namespace or config.ENV_DATA['cluster_namespace'] mdss = get_pods_having_label(mds_label, namespace) mds_pods = [Pod(**mds) for mds in mdss] return mds_pods def get_mon_pods(mon_label=constants.MON_APP_LABEL, namespace=None): """ Fetches info about mon pods in the cluster Args: mon_label (str): label associated with mon pods (default: defaults.MON_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : of mon pod objects """ namespace = namespace or config.ENV_DATA['cluster_namespace'] mons = get_pods_having_label(mon_label, namespace) mon_pods = [Pod(**mon) for mon in mons] return mon_pods def get_mgr_pods(mgr_label=constants.MGR_APP_LABEL, namespace=None): """ Fetches info about mgr pods in the cluster Args: mgr_label (str): label associated with mgr pods (default: defaults.MGR_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : of mgr pod objects """ namespace = namespace or config.ENV_DATA['cluster_namespace'] mgrs = get_pods_having_label(mgr_label, namespace) mgr_pods = [Pod(**mgr) for mgr in mgrs] return mgr_pods def get_osd_pods(osd_label=constants.OSD_APP_LABEL, namespace=None): """ Fetches info about osd pods in the cluster Args: osd_label (str): label associated with osd pods (default: defaults.OSD_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : of osd pod objects """ namespace = namespace or config.ENV_DATA['cluster_namespace'] osds = get_pods_having_label(osd_label, namespace) osd_pods = [Pod(**osd) for osd in osds] return osd_pods def get_osd_prepare_pods( osd_prepare_label=constants.OSD_PREPARE_APP_LABEL, namespace=defaults.ROOK_CLUSTER_NAMESPACE ): """ Fetches info about osd prepare pods in the cluster Args: osd_prepare_label (str): label associated with osd prepare pods (default: constants.OSD_PREPARE_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list: OSD prepare pod objects """ namespace = namespace or config.ENV_DATA['cluster_namespace'] osds = get_pods_having_label(osd_prepare_label, namespace) osd_pods = [Pod(**osd) for osd in osds] return osd_pods def get_osd_deployments(osd_label=constants.OSD_APP_LABEL, namespace=None): """ Fetches info about osd deployments in the cluster Args: osd_label (str): label associated with osd deployments (default: defaults.OSD_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list: OSD deployment OCS instances """ namespace = namespace or config.ENV_DATA['cluster_namespace'] osds = get_deployments_having_label(osd_label, namespace) osd_deployments = [OCS(**osd) for osd in osds] return osd_deployments def get_pod_count(label, namespace=None): namespace = namespace or config.ENV_DATA['cluster_namespace'] pods = get_pods_having_label(label=label, namespace=namespace) return len(pods) def get_cephfsplugin_provisioner_pods( cephfsplugin_provisioner_label=constants.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL, namespace=None ): """ Fetches info about CSI Cephfs plugin provisioner pods in the cluster Args: cephfsplugin_provisioner_label (str): label associated with cephfs provisioner pods (default: defaults.CSI_CEPHFSPLUGIN_PROVISIONER_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : csi-cephfsplugin-provisioner Pod objects """ namespace = namespace or config.ENV_DATA['cluster_namespace'] pods = get_pods_having_label(cephfsplugin_provisioner_label, namespace) fs_plugin_pods = [Pod(**pod) for pod in pods] return fs_plugin_pods def get_rbdfsplugin_provisioner_pods( rbdplugin_provisioner_label=constants.CSI_RBDPLUGIN_PROVISIONER_LABEL, namespace=None ): """ Fetches info about CSI Cephfs plugin provisioner pods in the cluster Args: rbdplugin_provisioner_label (str): label associated with RBD provisioner pods (default: defaults.CSI_RBDPLUGIN_PROVISIONER_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : csi-rbdplugin-provisioner Pod objects """ namespace = namespace or config.ENV_DATA['cluster_namespace'] pods = get_pods_having_label(rbdplugin_provisioner_label, namespace) ebd_plugin_pods = [Pod(**pod) for pod in pods] return ebd_plugin_pods def get_pod_obj(name, namespace=None): """ Returns the pod obj for the given pod Args: name (str): Name of the resources Returns: obj : A pod object """ ocp_obj = OCP(api_version='v1', kind=constants.POD, namespace=namespace) ocp_dict = ocp_obj.get(resource_name=name) pod_obj = Pod(**ocp_dict) return pod_obj def get_pod_logs(pod_name, container=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE, previous=False): """ Get logs from a given pod pod_name (str): Name of the pod container (str): Name of the container namespace (str): Namespace of the pod previous (bool): True, if pod previous log required. False otherwise. Returns: str: Output from 'oc get logs <pod_name> command """ pod = OCP( kind=constants.POD, namespace=namespace ) cmd = f"logs {pod_name}" if container: cmd += f" -c {container}" if previous: cmd += " --previous" return pod.exec_oc_cmd(cmd, out_yaml_format=False) def get_pod_node(pod_obj): """ Get the node that the pod is running on Args: pod_obj (OCS): The pod object Returns: ocs_ci.ocs.ocp.OCP: The node object """ node_name = pod_obj.get().get('spec').get('nodeName') return node.get_node_objs(node_names=node_name)[0] def delete_pods(pod_objs, wait=True): """ Deletes list of the pod objects Args: pod_objs (list): List of the pod objects to be deleted wait (bool): Determines if the delete command should wait for completion """ for pod in pod_objs: pod.delete(wait=wait) def validate_pods_are_respinned_and_running_state(pod_objs_list): """ Verifies the list of the pods are respinned and in running state Args: pod_objs_list (list): List of the pods obj Returns: bool : True if the pods are respinned and running, False otherwise """ for pod in pod_objs_list: helpers.wait_for_resource_state(pod, constants.STATUS_RUNNING, timeout=180) for pod in pod_objs_list: pod_obj = pod.get() start_time = pod_obj['status']['startTime'] ts = time.strptime(start_time, '%Y-%m-%dT%H:%M:%SZ') ts = calendar.timegm(ts) current_time_utc = time.time() sec = current_time_utc - ts if (sec / 3600) >= 1: logger.error( f'Pod {pod.name} is not respinned, the age of the pod is {start_time}' ) return False return True def verify_node_name(pod_obj, node_name): """ Verifies that the pod is running on a particular node Args: pod_obj (Pod): The pod object node_name (str): The name of node to check Returns: bool: True if the pod is running on a particular node, False otherwise """ logger.info( f"Checking whether the pod {pod_obj.name} is running on " f"node {node_name}" ) actual_node = pod_obj.get().get('spec').get('nodeName') if actual_node == node_name: logger.info( f"The pod {pod_obj.name} is running on the specified node " f"{actual_node}" ) return True else: logger.info( f"The pod {pod_obj.name} is not running on the specified node " f"specified node: {node_name}, actual node: {actual_node}" ) return False def get_pvc_name(pod_obj): """ Function to get pvc_name from pod_obj Args: pod_obj (str): The pod object Returns: str: The pvc name of a given pod_obj, Raises: UnavailableResourceException: If no pvc attached """ pvc = pod_obj.get().get('spec').get('volumes')[0].get('persistentVolumeClaim') if not pvc: raise UnavailableResourceException return pvc.get('claimName') def get_used_space_on_mount_point(pod_obj): """ Get the used space on a mount point Args: pod_obj (POD): The pod object Returns: int: Percentage represent the used space on the mount point """ # Verify data's are written to mount-point mount_point = pod_obj.exec_cmd_on_pod(command="df -kh") mount_point = mount_point.split() used_percentage = mount_point[mount_point.index(constants.MOUNT_POINT) - 1] return used_percentage def get_plugin_pods(interface, namespace=None): """ Fetches info of csi-cephfsplugin pods or csi-rbdplugin pods Args: interface (str): Interface type. eg: CephBlockPool, CephFileSystem namespace (str): Name of cluster namespace Returns: list : csi-cephfsplugin pod objects or csi-rbdplugin pod objects """ if interface == constants.CEPHFILESYSTEM: plugin_label = constants.CSI_CEPHFSPLUGIN_LABEL if interface == constants.CEPHBLOCKPOOL: plugin_label = constants.CSI_RBDPLUGIN_LABEL namespace = namespace or config.ENV_DATA['cluster_namespace'] plugins_info = get_pods_having_label(plugin_label, namespace) plugin_pods = [Pod(**plugin) for plugin in plugins_info] return plugin_pods def plugin_provisioner_leader(interface, namespace=None): """ Find csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader pod Args: interface (str): Interface type. eg: CephBlockPool, CephFileSystem namespace (str): Name of cluster namespace Returns: Pod: csi-cephfsplugin-provisioner or csi-rbdplugin-provisioner leader pod """ non_leader_msg = 'failed to acquire lease' lease_acq_msg = 'successfully acquired lease' lease_renew_msg = 'successfully renewed lease' leader_pod = '' if interface == constants.CEPHBLOCKPOOL: pods = get_rbdfsplugin_provisioner_pods(namespace=namespace) if interface == constants.CEPHFILESYSTEM: pods = get_cephfsplugin_provisioner_pods(namespace=namespace) pods_log = {} for pod in pods: pods_log[pod] = get_pod_logs( pod_name=pod.name, container='csi-provisioner' ).split('\n') for pod, log_list in pods_log.items(): # Reverse the list to find last occurrence of message without # iterating over all elements log_list.reverse() for log_msg in log_list: # Check for last occurrence of leader messages. # This will be the first occurrence in reversed list. if (lease_renew_msg in log_msg) or (lease_acq_msg in log_msg): curr_index = log_list.index(log_msg) # Ensure that there is no non leader message logged after # the last occurrence of leader message if not any( non_leader_msg in msg for msg in log_list[:curr_index] ): assert not leader_pod, ( "Couldn't identify plugin provisioner leader pod by " "analysing the logs. Found more than one match." ) leader_pod = pod break assert leader_pod, "Couldn't identify plugin provisioner leader pod." logger.info(f"Plugin provisioner leader pod is {leader_pod.name}") return leader_pod def get_operator_pods(operator_label=constants.OPERATOR_LABEL, namespace=None): """ Fetches info about rook-ceph-operator pods in the cluster Args: operator_label (str): Label associated with rook-ceph-operator pod namespace (str): Namespace in which ceph cluster lives Returns: list : of rook-ceph-operator pod objects """ namespace = namespace or config.ENV_DATA['cluster_namespace'] operators = get_pods_having_label(operator_label, namespace) operator_pods = [Pod(**operator) for operator in operators] return operator_pods def upload(pod_name, localpath, remotepath, namespace=None): """ Upload a file to pod Args: pod_name (str): Name of the pod localpath (str): Local file to upload remotepath (str): Target path on the pod """ namespace = namespace or constants.DEFAULT_NAMESPACE cmd = f"oc -n {namespace} cp {os.path.expanduser(localpath)} {pod_name}:{remotepath}" run_cmd(cmd) def verify_pods_upgraded(old_images, selector, count=1, timeout=720): """ Verify that all pods do not have old image. Args: old_images (set): Set with old images. selector (str): Selector (e.g. app=ocs-osd) count (int): Number of resources for selector. timeout (int): Timeout in seconds to wait for pods to be upgraded. Raises: TimeoutException: If the pods didn't get upgraded till the timeout. """ namespace = config.ENV_DATA['cluster_namespace'] pod = OCP( kind=constants.POD, namespace=namespace, ) info_message = ( f"Waiting for {count} pods with selector: {selector} to be running " f"and upgraded." ) logger.info(info_message) start_time = time.time() selector_label, selector_value = selector.split('=') while True: pod_count = 0 try: pods = get_all_pods(namespace, [selector_value], selector_label) pods_len = len(pods) logger.info(f"Found {pods_len} pod(s) for selector: {selector}") if pods_len != count: logger.warning( f"Number of found pods {pods_len} is not as expected: " f"{count}" ) for pod in pods: verify_images_upgraded(old_images, pod.get()) pod_count += 1 except CommandFailed as ex: logger.warning( f"Failed when getting pods with selector {selector}." f"Error: {ex}" ) except NonUpgradedImagesFoundError as ex: logger.warning(ex) check_timeout_reached(start_time, timeout, info_message) if pods_len != count: logger.error(f"Found pods: {pods_len} but expected: {count}!") elif pod_count == count: return def get_noobaa_pods(noobaa_label=constants.NOOBAA_APP_LABEL, namespace=None): """ Fetches info about noobaa pods in the cluster Args: noobaa_label (str): label associated with osd pods (default: defaults.NOOBAA_APP_LABEL) namespace (str): Namespace in which ceph cluster lives (default: defaults.ROOK_CLUSTER_NAMESPACE) Returns: list : of noobaa pod objects """ namespace = namespace or config.ENV_DATA['cluster_namespace'] noobaas = get_pods_having_label(noobaa_label, namespace) noobaa_pods = [Pod(**noobaa) for noobaa in noobaas] return noobaa_pods def wait_for_dc_app_pods_to_reach_running_state( dc_pod_obj, timeout=120, exclude_state=None ): """ Wait for DC app pods to reach running state Args: dc_pod_obj (list): list of dc app pod objects timeout (int): Timeout in seconds to wait for pods to be in Running state. exclude_state (str): A resource state to ignore """ for pod_obj in dc_pod_obj: name = pod_obj.get_labels().get('name') dpod_list = get_all_pods(selector_label=f"name={name}", wait=True) for dpod in dpod_list: if '-1-deploy' not in dpod.name and dpod.status != exclude_state: helpers.wait_for_resource_state( dpod, constants.STATUS_RUNNING, timeout=timeout )
Active_Directory_Query_test.py
import demistomock as demisto from Active_Directory_Query import main, group_dn import socket import ssl from threading import Thread import time import os import pytest import json from IAMApiModule import * from unittest.mock import patch BASE_TEST_PARAMS = { 'server_ip': '127.0.0.1', 'secure_connection': 'None', 'page_size': '500', 'credentials': {'identifier': 'bad', 'password': 'bad'} } RETURN_ERROR_TARGET = 'Active_Directory_Query.return_error' def test_bad_host_no_ssl(mocker): mocker.patch.object(demisto, 'params', return_value=BASE_TEST_PARAMS) return_error_mock = mocker.patch(RETURN_ERROR_TARGET) # validate our mock of params assert demisto.params().get('server_ip') == '127.0.0.1' main() assert return_error_mock.call_count == 1 # call_args last call with a tuple of args list and kwargs err_msg = return_error_mock.call_args[0][0] assert len(err_msg) < 100 assert 'Failed to access' in err_msg @pytest.mark.filterwarnings("ignore::ResourceWarning") def test_bad_ssl(mocker): params = BASE_TEST_PARAMS.copy() params['server_ip'] = '185.199.108.153' # disable-secrets-detection params['secure_connection'] = 'SSL' params['port'] = 443 mocker.patch.object(demisto, 'params', return_value=params) return_error_mock = mocker.patch(RETURN_ERROR_TARGET) demisto_info_mock = mocker.patch.object(demisto, "info") # validate our mock of params assert demisto.params().get('secure_connection') == 'SSL' main() assert return_error_mock.call_count == 1 # call_args last call with a tuple of args list and kwargs err_msg = return_error_mock.call_args[0][0] assert len(err_msg) < 100 assert 'Failed to access' in err_msg assert 'SSL error' in err_msg # call_args_list holds all calls (we need the first) with a tuple of args list and kwargs info_msg = demisto_info_mock.call_args_list[0][0][0] # ip is not in the certificate. so it should fail on host match assert "doesn't match any name" in info_msg def ssl_bad_socket_server(port): context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) # cert and keyfile generated with # openssl req -x509 -nodes -days 3000 -newkey rsa:2048 -keyout key.pem -out cert.pem try: context.load_cert_chain('cert.pem', 'key.pem') with socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) as sock: sock.bind(('127.0.0.1', port)) sock.listen(5) with context.wrap_socket(sock, server_side=True) as ssock: try: conn, addr = ssock.accept() except ssl.SSLError as err: if 'TLSV1_ALERT_UNKNOWN_CA' in str(err): # all is ok. client refused our cert return raise conn.recv(32) msg = b'THIS IS A TEST SERVER WHICH IGNORES PROTOCOL\n\n' for x in range(10): msg += msg conn.send(msg) conn.shutdown(socket.SHUT_RDWR) conn.close() except Exception as ex: pytest.fail("Failed starting ssl_bad_socket_server: {}".format(ex)) raise @pytest.mark.filterwarnings("ignore::ResourceWarning") def test_faulty_server(mocker): port = 9638 t = Thread(target=ssl_bad_socket_server, args=(port,)) t.start() time.sleep(1) # wait for socket server to startup params = BASE_TEST_PARAMS.copy() params['server_ip'] = '127.0.0.1' # disable-secrets-detection params['secure_connection'] = 'SSL' params['unsecure'] = True params['port'] = port mocker.patch.object(demisto, 'params', return_value=params) return_error_mock = mocker.patch(RETURN_ERROR_TARGET) # validate our mock of params assert demisto.params().get('secure_connection') == 'SSL' main() t.join(5) assert return_error_mock.call_count == 1 # call_args last call with a tuple of args list and kwargs err_msg = return_error_mock.call_args[0][0] assert len(err_msg) < 100 assert 'Failed to access' in err_msg def test_ssl_custom_cert(mocker, request): ENV_KEY = 'SSL_CERT_FILE' os.environ[ENV_KEY] = 'cert.pem' def cleanup(): os.environ.pop(ENV_KEY) request.addfinalizer(cleanup) port = 9637 t = Thread(target=ssl_bad_socket_server, args=(port,)) t.start() time.sleep(1) # wait for socket server to startup params = BASE_TEST_PARAMS.copy() params['server_ip'] = '127.0.0.1' # disable-secrets-detection params['secure_connection'] = 'SSL' params['port'] = port mocker.patch.object(demisto, 'params', return_value=params) return_error_mock = mocker.patch(RETURN_ERROR_TARGET) # validate our mock of params assert demisto.params().get('secure_connection') == 'SSL' main() t.join(5) assert return_error_mock.call_count == 1 # call_args last call with a tuple of args list and kwargs err_msg = return_error_mock.call_args[0][0] assert len(err_msg) < 100 assert 'Failed to access' in err_msg assert 'SSL error' not in err_msg def test_endpoint_entry(): """ Given: Custom attributes to filter the computer object entry. When: The function filters the computer object according to the custom attributes. Then: The function will return all the computer object entry because custom attributes contain '*'. """ from Active_Directory_Query import endpoint_entry custom_attributes_with_asterisk = endpoint_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf'}, ['*']) assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'Hostname': 'name', 'ID': 'dn', 'Type': 'AD'} def get_outputs_from_user_profile(user_profile): entry_context = user_profile.to_entry() outputs = entry_context.get('Contents') return outputs def mock_demisto_map_object(object, mapper_name, incident_type): email = object.get('email') email_prefix = email.split('@')[0] return { 'cn': email_prefix, 'mail': email, 'sAMAccountName': email_prefix, 'userPrincipalName': email_prefix, "ou": "OU=Americas,OU=Demisto" } def test_get_iam_user_profile(mocker): from Active_Directory_Query import get_iam_user_profile mocker.patch.object(demisto, 'mapObject', side_effect=mock_demisto_map_object) user_profile = {"email": "test2@paloaltonetworks.com", "username": "test", "locationregion": "Americas", "olduserdata": {"email": "test@paloaltonetworks.com", "username": "test", "locationregion": "Americas"}} _, ad_user, sam_account_name = get_iam_user_profile(user_profile, 'mock_mapper_out') assert sam_account_name == 'test' assert ad_user def test_update_user_iam__username_change(mocker): """ Given: A valid user profile with valid mapping When: Running the `create_user_iam` command Then: The user was created successfully in AD. """ import Active_Directory_Query add_args, add_kwargs = [], {} class ConnectionMocker: entries = [] result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}} def search(self, *args, **kwargs): return def add(self, *args, **kwargs): nonlocal add_args, add_kwargs add_args, add_kwargs = args, kwargs return True def modify(self, *args, **kwargs): return True def modify_dn(self, *args, **kwargs): return True Active_Directory_Query.conn = ConnectionMocker() args = {"user-profile": json.dumps({"email": "test2@paloaltonetworks.com", "username": "test", "locationregion": "Americas", "olduserdata": {"email": "test@paloaltonetworks.com", "username": "test", "locationregion": "Americas"}})} mocker.patch.object(demisto, 'mapObject', side_effect=mock_demisto_map_object) mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=True) mocker.patch('Active_Directory_Query.get_user_activity_by_samaccountname', return_value=True) mocker.patch('Active_Directory_Query.user_dn', return_value='mock_dn') user_profile = Active_Directory_Query.update_user_iam( default_base_dn='mock_base_dn', args=args, create_if_not_exists=False, mapper_out='mock_mapper_out', disabled_users_group_cn='mock_disabled_users_group_cn' ) outputs = get_outputs_from_user_profile(user_profile) assert outputs.get('action') == IAMActions.UPDATE_USER assert outputs.get('success') is True assert outputs.get('email') == 'test2@paloaltonetworks.com' assert outputs.get('username') == 'test2' def test_create_user_iam(mocker): """ Given: A valid user profile with valid mapping When: Running the `create_user_iam` command Then: The user was created successfully in AD. """ import Active_Directory_Query add_args, add_kwargs = [], {} class ConnectionMocker: entries = [] result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}} def search(self, *args, **kwargs): return def add(self, *args, **kwargs): nonlocal add_args, add_kwargs add_args, add_kwargs = args, kwargs return True Active_Directory_Query.conn = ConnectionMocker() args = {"user-profile": json.dumps({"email": "test@paloaltonetworks.com", "username": "test", "locationregion": "Americas"})} mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False) mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': 'test@paloaltonetworks.com', 'sAMAccountName': 'test', 'userPrincipalName': 'test', "ou": "OU=Americas,OU=Demisto"}) user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '') outputs = get_outputs_from_user_profile(user_profile) assert outputs.get('action') == IAMActions.CREATE_USER assert outputs.get('success') is True assert outputs.get('active') is False assert outputs.get('email') == 'test@paloaltonetworks.com' def test_unseccsseful_create_user_iam_missing_ou(mocker): """ Given: A valid user profile with missing ou in the mapping When: Running the `create_user_iam` command Then: - The user was not created in AD. - An error message was returned. """ import Active_Directory_Query add_args, add_kwargs = [], {} class ConnectionMocker: entries = [] result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}} def search(self, *args, **kwargs): return def add(self, *args, **kwargs): nonlocal add_args, add_kwargs add_args, add_kwargs = args, kwargs return True Active_Directory_Query.conn = ConnectionMocker() args = {"user-profile": json.dumps({"email": "test@paloaltonetworks.com", "username": "test", "locationregion": "Americas"})} mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False) mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': 'test@paloaltonetworks.com', 'sAMAccountName': 'test', 'userPrincipalName': 'test'}) user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '') outputs = get_outputs_from_user_profile(user_profile) assert outputs.get('action') == IAMActions.CREATE_USER assert outputs.get('success') is False assert outputs.get('email') == 'test@paloaltonetworks.com' assert 'User must have an Organizational Unit (OU)' in outputs.get('errorMessage') def test_unseccsseful_create_user_iam_missing_samaccountname(mocker): """ Given: A valid user profile with missing samaccountname in the mapping When: Running the `create_user_iam` command Then: - The user was not created in AD. - An error message was returned. """ import Active_Directory_Query add_args, add_kwargs = [], {} class ConnectionMocker: entries = [] result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}} def search(self, *args, **kwargs): return def add(self, *args, **kwargs): nonlocal add_args, add_kwargs add_args, add_kwargs = args, kwargs return True Active_Directory_Query.conn = ConnectionMocker() args = {"user-profile": json.dumps({"email": "test@paloaltonetworks.com", "username": "test", "locationregion": "Americas"})} mocker.patch('Active_Directory_Query.check_if_user_exists_by_attribute', return_value=False) mocker.patch.object(IAMUserProfile, 'map_object', return_value={'cn': 'test', 'mail': 'test@paloaltonetworks.com', "ou": "OU=Americas,OU=Demisto", 'userPrincipalName': 'test'}) user_profile = Active_Directory_Query.create_user_iam('', args, 'mapper_out', '') outputs = get_outputs_from_user_profile(user_profile) assert outputs.get('action') == IAMActions.CREATE_USER assert outputs.get('success') is False assert outputs.get('email') == 'test@paloaltonetworks.com' assert 'User must have a sAMAccountName' in outputs.get('errorMessage') def test_group_entry_no_custom_attributes(): """ Given: Custom attributes to filter the group object entry. When: The function filters the group object according to the custom attributes. Then: The function will return all the group object entry because custom attributes contain '*'. """ from Active_Directory_Query import group_entry custom_attributes_with_asterisk = group_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf'}, ['*']) assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'ID': 'dn', 'Name': 'name', 'Type': 'AD'} def test_group_entry(): """ Given: Custom attributes to filter the group object entry. When: The function filters the group object according to the custom attributes. Then: The function will return all the group object entry because custom attributes contain '*'. """ from Active_Directory_Query import group_entry custom_attributes_with_asterisk = group_entry({'dn': 'dn', 'name': 'name', 'memberOf': 'memberOf', 'displayName': 'display name'}, ['displayName']) assert custom_attributes_with_asterisk == {'Groups': 'memberOf', 'ID': 'dn', 'Name': 'name', 'Type': 'AD', 'displayName': 'display name'} def test_search_group_members(mocker): """ sanity test for search_group_members method """ import Active_Directory_Query class EntryMocker: def entry_to_json(self): return '{"dn": "dn","attributes": {"memberOf": ["memberOf"], "name": ["name"]}}' class ConnectionMocker: entries = [EntryMocker()] result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}} def search(self, *args, **kwargs): return expected_results = {'ContentsFormat': 'json', 'Type': 1, 'Contents': [{'dn': 'dn', 'attributes': {'memberOf': ['memberOf'], 'name': ['name']}}], 'ReadableContentsFormat': 'markdown', 'HumanReadable': '### Active Directory - Get Group Members\n|' 'dn|memberOf|name|\n|---|---|---|\n| dn | memberOf | name |\n', 'EntryContext': {'ActiveDirectory.Groups(obj.dn ==dn)': {'dn': 'dn', 'members': [ {'dn': 'dn', 'category': 'group'}]}, 'ActiveDirectory.Groups(obj.dn == val.dn)': [{'dn': 'dn', 'memberOf': ['memberOf'], 'name': ['name']}], 'Group': [{'Type': 'AD', 'ID': 'dn', 'Name': ['name'], 'Groups': ['memberOf']}]}} expected_results = f'demisto results: {json.dumps(expected_results, indent=4, sort_keys=True)}' mocker.patch.object(demisto, 'args', return_value={'member-type': 'group', 'group-dn': 'dn'}) Active_Directory_Query.conn = ConnectionMocker() with patch('logging.Logger.info') as mock: Active_Directory_Query.search_group_members('dc', 1) mock.assert_called_with(expected_results) def test_group_dn_escape_characters(): """ Given: Group name with parentheses When: Running the function group_dn Then: The function search gets the group name after escape special characters. """ import Active_Directory_Query class EntryMocker: def entry_to_json(self): return '{"dn": "dn","attributes": {"memberOf": ["memberOf"], "name": ["name"]}}' class ConnectionMocker: entries = [EntryMocker()] result = {'controls': {'1.2.840.113556.1.4.319': {'value': {'cookie': '<cookie>'}}}} Active_Directory_Query.conn = ConnectionMocker() with patch('Active_Directory_Query.search', return_value=[EntryMocker()]) as mock: group_dn('group(group)', '') mock.assert_called_with('(&(objectClass=group)(cn=group\\28group\\29))', '') def test_search__no_control_exist(mocker): """ Given: No control key in the result When: Run any search query Then: The result return 'no entries' instead of throw exception """ import Active_Directory_Query class ConnectionMocker: entries = [] result = {} def search(self, *args, **kwargs): return mocker.patch.object(demisto, 'results') Active_Directory_Query.conn = ConnectionMocker() Active_Directory_Query.search_users('dc=test,dc=test_1', page_size=20) assert '**No entries.**' in demisto.results.call_args[0][0]['HumanReadable'] def test_user_account_to_boolean_fields(): """ Given: a userAccountControl value When: parsing the userAccountControl fields Then: Only the relevant fields will be marked as true """ import Active_Directory_Query fields = Active_Directory_Query.user_account_to_boolean_fields(0x50) assert {k for k, v in fields.items() if v} == {'LOCKOUT', 'PASSWD_CANT_CHANGE'}
main.py
import os import sys import random import traceback from tensorflow.keras.optimizers import RMSprop, Adam from scipy.stats import rankdata import math import numpy as np from tqdm import tqdm import argparse random.seed(42) import threading import configs import logging logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s") from utils import normalize, pad, convert, revert import models, configs, data_loader class SearchEngine: def __init__(self, args, conf=None): self.data_path = args.data_path + args.dataset+'/' self.train_params = conf.get('training_params', dict()) self.data_params = conf.get('data_params',dict()) self.model_params = conf.get('model_params',dict()) self._eval_sets = None self._code_reprs = None self._codebase = None self._codebase_chunksize = 2000000 ##### Model Loading / saving ##### def save_model(self, model, epoch): model_path = f"./output/{model.__class__.__name__}/models/" os.makedirs(model_path, exist_ok=True) model.save(model_path + f"epo{epoch}_code.h5", model_path + f"epo{epoch}_desc.h5", overwrite=True) def load_model(self, model, epoch): model_path = f"./output/{model.__class__.__name__}/models/" assert os.path.exists(model_path + f"epo{epoch}_code.h5"),f"Weights at epoch {epoch} not found" assert os.path.exists(model_path + f"epo{epoch}_desc.h5"),f"Weights at epoch {epoch} not found" model.load(model_path + f"epo{epoch}_code.h5", model_path + f"epo{epoch}_desc.h5") ##### Training ##### def train(self, model): if self.train_params['reload']>0: self.load_model(model, self.train_params['reload']) valid_every = self.train_params.get('valid_every', None) save_every = self.train_params.get('save_every', None) batch_size = self.train_params.get('batch_size', 128) nb_epoch = self.train_params.get('nb_epoch', 1) split = self.train_params.get('validation_split', 0) val_loss = {'loss': 1., 'epoch': 0} chunk_size = self.train_params.get('chunk_size', 100000) for i in range(self.train_params['reload']+1, nb_epoch): print('Epoch %d :: \n' % i, end='') logger.debug('loading data chunk..') offset = (i-1)*self.train_params.get('chunk_size', 100000) names = data_loader.load_hdf5(self.data_path+self.data_params['train_methname'], offset, chunk_size) apis = data_loader.load_hdf5(self.data_path+self.data_params['train_apiseq'], offset, chunk_size) tokens = data_loader.load_hdf5(self.data_path+self.data_params['train_tokens'], offset, chunk_size) descs = data_loader.load_hdf5(self.data_path+self.data_params['train_desc'], offset, chunk_size) logger.debug('padding data..') methnames = pad(names, self.data_params['methname_len']) apiseqs = pad(apis, self.data_params['apiseq_len']) tokens = pad(tokens, self.data_params['tokens_len']) good_descs = pad(descs,self.data_params['desc_len']) bad_descs=[desc for desc in descs] random.shuffle(bad_descs) bad_descs = pad(bad_descs, self.data_params['desc_len']) hist = model.fit([methnames, apiseqs, tokens, good_descs, bad_descs], epochs=1, batch_size=batch_size, validation_split=split) if hist.history['val_loss'][0] < val_loss['loss']: val_loss = {'loss': hist.history['val_loss'][0], 'epoch': i} print('Best: Loss = {}, Epoch = {}'.format(val_loss['loss'], val_loss['epoch'])) if save_every is not None and i % save_every == 0: self.save_model(model, i) if valid_every is not None and i % valid_every == 0: acc, mrr, map, ndcg = self.valid(model, 1000, 1) ##### Evaluation in the develop set ##### def valid(self, model, poolsize, K): """ validate in a code pool. param: poolsize - size of the code pool, if -1, load the whole test set """ def ACC(real,predict): sum=0.0 for val in real: try: index=predict.index(val) except ValueError: index=-1 if index!=-1: sum=sum+1 return sum/float(len(real)) def MAP(real,predict): sum=0.0 for id,val in enumerate(real): try: index=predict.index(val) except ValueError: index=-1 if index!=-1: sum=sum+(id+1)/float(index+1) return sum/float(len(real)) def MRR(real,predict): sum=0.0 for val in real: try: index=predict.index(val) except ValueError: index=-1 if index!=-1: sum=sum+1.0/float(index+1) return sum/float(len(real)) def NDCG(real,predict): dcg=0.0 idcg=IDCG(len(real)) for i,predictItem in enumerate(predict): if predictItem in real: itemRelevance=1 rank = i+1 dcg+=(math.pow(2,itemRelevance)-1.0)*(math.log(2)/math.log(rank+1)) return dcg/float(idcg) def IDCG(n): idcg=0 itemRelevance=1 for i in range(n): idcg+=(math.pow(2, itemRelevance)-1.0)*(math.log(2)/math.log(i+2)) return idcg #load valid dataset if self._eval_sets is None: methnames = data_loader.load_hdf5(self.data_path+self.data_params['valid_methname'], 0, poolsize) apiseqs= data_loader.load_hdf5(self.data_path+self.data_params['valid_apiseq'], 0, poolsize) tokens = data_loader.load_hdf5(self.data_path+self.data_params['valid_tokens'], 0, poolsize) descs = data_loader.load_hdf5(self.data_path+self.data_params['valid_desc'], 0, poolsize) self._eval_sets={'methnames':methnames, 'apiseqs':apiseqs, 'tokens':tokens, 'descs':descs} accs,mrrs,maps,ndcgs = [], [], [], [] data_len = len(self._eval_sets['descs']) for i in tqdm(range(data_len)): desc=self._eval_sets['descs'][i]#good desc descs = pad([desc]*data_len,self.data_params['desc_len']) methnames = pad(self._eval_sets['methnames'],self.data_params['methname_len']) apiseqs= pad(self._eval_sets['apiseqs'],self.data_params['apiseq_len']) tokens= pad(self._eval_sets['tokens'],self.data_params['tokens_len']) n_results = K sims = model.predict([methnames, apiseqs,tokens, descs], batch_size=data_len).flatten() negsims= np.negative(sims) predict = np.argpartition(negsims, kth=n_results-1) predict = predict[:n_results] predict = [int(k) for k in predict] real=[i] accs.append(ACC(real,predict)) mrrs.append(MRR(real,predict)) maps.append(MAP(real,predict)) ndcgs.append(NDCG(real,predict)) acc, mrr, map_, ndcg = np.mean(accs), np.mean(mrrs), np.mean(maps), np.mean(ndcgs) logger.info(f'ACC={acc}, MRR={mrr}, MAP={map_}, nDCG={ndcg}') return acc,mrr,map_,ndcg ##### Compute Representation ##### def repr_code(self, model): logger.info('Loading the use data ..') methnames = data_loader.load_hdf5(self.data_path+self.data_params['use_methname'],0,-1) apiseqs = data_loader.load_hdf5(self.data_path+self.data_params['use_apiseq'],0,-1) tokens = data_loader.load_hdf5(self.data_path+self.data_params['use_tokens'],0,-1) methnames = pad(methnames, self.data_params['methname_len']) apiseqs = pad(apiseqs, self.data_params['apiseq_len']) tokens = pad(tokens, self.data_params['tokens_len']) logger.info('Representing code ..') vecs= model.repr_code([methnames, apiseqs, tokens], batch_size=10000) vecs= vecs.astype(np.float) vecs= normalize(vecs) return vecs def search(self, model, vocab, query, n_results=10): desc=[convert(vocab, query)]#convert desc sentence to word indices padded_desc = pad(desc, self.data_params['desc_len']) desc_repr=model.repr_desc([padded_desc]) desc_repr=desc_repr.astype(np.float32) desc_repr = normalize(desc_repr).T # [dim x 1] codes, sims = [], [] threads=[] for i,code_reprs_chunk in enumerate(self._code_reprs): t = threading.Thread(target=self.search_thread, args = (codes,sims,desc_repr,code_reprs_chunk,i,n_results)) threads.append(t) for t in threads: t.start() for t in threads:#wait until all sub-threads finish t.join() return codes,sims def search_thread(self, codes, sims, desc_repr, code_reprs, i, n_results): #1. compute similarity chunk_sims=np.dot(code_reprs, desc_repr) # [pool_size x 1] chunk_sims = np.squeeze(chunk_sims, axis=1) #2. choose top results negsims=np.negative(chunk_sims) maxinds = np.argpartition(negsims, kth=n_results-1) maxinds = maxinds[:n_results] chunk_codes = [self._codebase[i][k] for k in maxinds] chunk_sims = chunk_sims[maxinds] codes.extend(chunk_codes) sims.extend(chunk_sims) def postproc(self,codes_sims): codes_, sims_ = zip(*codes_sims) codes= [code for code in codes_] sims= [sim for sim in sims_] final_codes=[] final_sims=[] n=len(codes_sims) for i in range(n): is_dup=False for j in range(i): if codes[i][:80]==codes[j][:80] and abs(sims[i]-sims[j])<0.01: is_dup=True if not is_dup: final_codes.append(codes[i]) final_sims.append(sims[i]) return zip(final_codes,final_sims) def parse_args(): parser = argparse.ArgumentParser("Train and Test Code Search(Embedding) Model") parser.add_argument("--data_path", type=str, default='./data/', help="working directory") parser.add_argument("--model", type=str, default="JointEmbeddingModel", help="model name") parser.add_argument("--dataset", type=str, default="github", help="dataset name") parser.add_argument("--mode", choices=["train","eval","repr_code","search"], default='train', help="The mode to run. The `train` mode trains a model;" " the `eval` mode evaluat models in a test set " " The `repr_code/repr_desc` mode computes vectors" " for a code snippet or a natural language description with a trained model.") parser.add_argument("--verbose",action="store_true", default=True, help="Be verbose") return parser.parse_args() if __name__ == '__main__': args = parse_args() config=getattr(configs, 'config_'+args.model)() engine = SearchEngine(args, config) ##### Define model ###### logger.info('Build Model') model = getattr(models, args.model)(config)#initialize the model model.build() model.summary(export_path = f"./output/{args.model}/") optimizer = config.get('training_params', dict()).get('optimizer', 'adam') model.compile(optimizer=optimizer) data_path = args.data_path+args.dataset+'/' if args.mode=='train': engine.train(model) elif args.mode=='eval': # evaluate for a specific epoch assert config['training_params']['reload']>0, "please specify the number of epoch of the optimal checkpoint in config.py" engine.load_model(model, config['training_params']['reload']) engine.valid(model, -1, 10) elif args.mode=='repr_code': engine.load_model(model, config['training_params']['reload']) vecs = engine.repr_code(model) data_loader.save_code_reprs(vecs, data_path+config['data_params']['use_codevecs']) elif args.mode=='search': #search code based on a desc engine.load_model(model, config['training_params']['reload']) engine._code_reprs = data_loader.load_code_reprs(data_path+config['data_params']['use_codevecs'], engine._codebase_chunksize) engine._codebase = data_loader.load_codebase(data_path+config['data_params']['use_codebase'], engine._codebase_chunksize) vocab = data_loader.load_pickle(data_path+config['data_params']['vocab_desc']) while True: try: query = input('Input Query: ') n_results = int(input('How many results? ')) except Exception: print("Exception while parsing your input:") traceback.print_exc() break query = query.lower().replace('how to ', '').replace('how do i ', '').replace('how can i ', '').replace('?', '').strip() codes,sims=engine.search(model, vocab, query, n_results) zipped=zip(codes,sims) zipped=sorted(zipped, reverse=True, key=lambda x:x[1]) zipped=engine.postproc(zipped) zipped = list(zipped)[:n_results] results = '\n\n'.join(map(str,zipped)) #combine the result into a returning string print(results)
guiManager.py
import cv2 import tkinter from PIL import ImageTk, Image import threading from settings import * import sys class GuiManager: def __init__(self, tracker): self.drawing = False self.ax,self.ay = -1, -1 self.bx, self.by = -1,-1 self.tracker = tracker self.mainWindowThread = threading.Thread(target=self._CreateMainWindow) self.mainWindowThread.start() def Close(self): self.mainWindow.quit() self.mainWindowThread.join() def _Stop(self): self.tracker.running = False # Draw main tkinter window (based on a 12 column x 12 row grid) def _CreateMainWindow(self): self.mainWindow = tkinter.Tk() self.mainWindow.title(self.tracker.settings.mainWindowText) # Register Event Handlers self.mainWindow.protocol("WM_DELETE_WINDOW", self._Stop) self.mainWindow.bind("<KeyPress>", self.HandleKeyDown) # Create Controls for Left Frame self.previewContainer = tkinter.Label(self.mainWindow, text="Preview", fg="white", bg="black",image=self.tracker.frameView) self.previewContainer.grid(row=0, column=0, columnspan=6, rowspan=12) # Create Controls for the Right Frame tkinter.Label(self.mainWindow, text="Preview View Mode").grid(row=0, column=6, columnspan=6, rowspan=1) tkinter.Button(self.mainWindow, text = "Unprocessed", command=self.HandleUnprocBtn).grid(row=1, column=6, columnspan=2, rowspan=1) tkinter.Button(self.mainWindow, text = "Blurred", command=self.HandleBlurBtn).grid(row=1, column=8, columnspan=2, rowspan=1) tkinter.Button(self.mainWindow, text = "Thresholded", command=self.HandleThresholdedBtn).grid(row=1, column=10, columnspan=2, rowspan=1) self.UpdatePreviewFrame() self.mainWindow.mainloop() def UpdatePreviewFrame(self): try: rgbFrame = cv2.cvtColor(self.tracker.frameView, cv2.COLOR_BGR2RGBA) img = Image.fromarray(rgbFrame) imgtk = ImageTk.PhotoImage(image=img) self.previewContainer.imgtk = imgtk self.previewContainer.configure(image=imgtk) except: pass self.previewContainer.after(1, self.UpdatePreviewFrame) def HandleUnprocBtn(self): self.tracker.frameView = self.tracker.settings.frameView = FRAME_VIEW_UNPROCESSED def HandleBlurBtn(self): self.tracker.frameView = self.tracker.settings.frameView = FRAME_VIEW_BLURRED def HandleThresholdedBtn(self): self.tracker.frameView = self.tracker.settings.frameView = FRAME_VIEW_THRESHOLDED def MouseRectHandler(self, event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDOWN: self.drawing = True self.ax, self.ay = x,y elif event == cv2.EVENT_MOUSEMOVE: if self.drawing == True: self.bx, self.by = x, y elif event == cv2.EVENT_LBUTTONUP: self.bx, self.by = x, y self.drawing = False cv2.imshow(self.tracker.settings.mainWindowText, self.tracker.frameView) def HandleKeyDown(self, e): if e.keycode == 27: self._Stop()
app.py
# # from flask import Flask # # from flask import render_template # # app = Flask(__name__) # # @app.route("/") # # def hello_world(): # # return render_template("index.html") # from flask import Flask, request, abort # from urllib.parse import parse_qs # import requests # import json # import time # from flask_cors import CORS # import threading # # Functie nodig om de authservice te starten # def startAuthService(i: str): # getAuthJson(i) # # Authenticatie regelen voor Tribe # def getAuthJson(i: str): # urlToken = "https://auth.tribecrm.nl/oauth2/token/" # payload='grant_type=refresh_token&client_id=c4f13121-c90a-4933-9707-1c0c867aa91a&client_secret=Nz7cpRVVB84r4spxZIK9cdp5hnPWqseYQCnDyE89&redirect_uri=https%3A%2F%2Fwww.vvebeheerwijsamen.nl%2F&refresh_token=' + i # headers = { # 'Content-Type': 'application/x-www-form-urlencoded', # 'Cookie': 'INGRESSCOOKIE=54faaf98337d6f7813532188c573aea2; INGRESSCOOKIE=807a6e8eb3c06330d01786a893d6643b; _csrf=B1q_xCLA5I9wzTOYViitnfht; connect.sid=s%3Ax3mhao7dflalABkV6NlMW-4DMlSjo8ld.PsNG8O8iDycnmcWh8UoXWQg7wshu6uoEpOvKsWq6F8U; oauth2_authentication_csrf=MTYyNzkyNTEzNXxEdi1CQkFFQ180SUFBUkFCRUFBQVB2LUNBQUVHYzNSeWFXNW5EQVlBQkdOemNtWUdjM1J5YVc1bkRDSUFJRFF5T0RKa1pEaGtOR0ppWVRRM09EVmlPR0ZtTmprNVptSXhZalZrWTJKbHzfLLEBKYCmVvrknebFYm7aV-YN-O1aK8CN0slzFtwGHg==' # } # response = requests.request("POST", urlToken, headers=headers, data=payload) # print(response.text) # global y # y = json.loads(response.text) # global url # url = "https://api.tribecrm.nl/v1/mutation?access_token=" + y['access_token'] # def runAuth(): # i = True # while i: # startAuthService(y['refresh_token']) # time.sleep(30) # headers = { # 'Content-Type': 'application/json' # } # # Keys die aanwezig zijn op de site # keys_from_tribe = ['420', '421', '422', '423', '424', '425', '426', '427', '428', '429', '431', '432', '433', '434', '435', '436', '437', '438', '440', '442', '443', '444', '445'] # # Data checken op missende key's/value's, en indien toepasselijk standaard values invullen # def check(data): # checked_data: dict = {} # keys_from_request = [] # # Alle keys van request toeveogen aan een list # for k, v in data.items(): # keys_from_request.append(str(k)) # # Keys toevoegen aan een dict, als er een key mist wordt er een lege string toegevoegd # for i in keys_from_tribe: # if i in keys_from_request: # checked_data[i] = data[i][0] # else: # checked_data[i] = "" # # Resultaat terugsturen # return checked_data # def send_data_to_tribe(data: dict): # i = { # "entityType": "Relation.Organization", # "fields": { # "Name": data['420'], # "EmailAddress": data['438'] # } # } # createOrganizationinTribe(json.dumps(i), data) # # Organization aanmaken in Tribe en OrganisationID ophalen # def createOrganizationinTribe(data, data2): # response = requests.request("POST", url, headers=headers, data=data) # print(response.text) # # OrganizationID ophalen en aanpassen # y = json.loads(response.text) # r = { # "entityType": "Relationship.Organization.CommercialRelationship.Prospect", # "fields": { # "AccountManager": "79cdc883-f25a-4d11-80f7-250f2ca3334c", # "Organization": y['data'][0]['entityId'], # "Status": "7dca7da5-0e92-4dfc-8aa2-af8c5496d6e2", # "Group": "a85e1bdb-00ec-42f0-83e1-c08d8d7a6dff", # "6fc4e9b8-c787-4f5f-94ff-918f8643f54d": int(data2['428']), # "e3822d01-22db-47f2-bfc5-298a22eb1019": int(data2['426']), # "7b13c3c8-851d-4979-847c-9d0ef290fd88": str(data2['425']), # "c2270abe-396b-42d3-9966-281fa0cdbaf9": str(data2['444']) # } # } # #Prospectaanmaken in Tribe # createProspectinTribe(json.dumps(r)) # # Prospect aanmaken in Tribe # def createProspectinTribe(data): # response = requests.request("POST", url, headers=headers, data=data) # print(response.text) # app = Flask(__name__) # @app.route('/webhook', methods=['POST', 'GET']) # def webhook(): # if request.method == 'POST': # offerteAanvraag = request.get_data(as_text = True) # #Binnenkomende data converteren naar een dictionary --> key:['value'] # formatted_data_dict: dict = parse_qs(offerteAanvraag) # # Data checken op missende key's/value's # checked_data_dict: dict = check(formatted_data_dict) # # Data naar API functies sturen # send_data_to_tribe(checked_data_dict) # return 'success', 200 # else: # abort(400) # @app.route('/') # def index(): # return "<h1>Welcome to our server !!</h1>" # getAuthJson('xf7P3h8Jk1vr6dCIRiANDXBLx6SAx1Mslzaum9_P-9E.Ix2xAfeJAvy6h-CNWe-08yQtix3iW21nA5LnAC7buZI') # t=threading.Thread(target=runAuth) # t.start() # if __name__ == '__main__': # app.run(host='0.0.0.0',port=5000) from flask import Flask, request, abort from urllib.parse import parse_qs import requests import json import time from flask_cors import CORS import threading # Functie nodig om de authservice te starten def startAuthService(i: str): return getAuthJson(i) # Authenticatie regelen voor Tribe def getAuthJson(i: str): urlToken = "https://auth.tribecrm.nl/oauth2/token/" payload='grant_type=refresh_token&client_id=c4f13121-c90a-4933-9707-1c0c867aa91a&client_secret=Nz7cpRVVB84r4spxZIK9cdp5hnPWqseYQCnDyE89&redirect_uri=https%3A%2F%2Fwww.vvebeheerwijsamen.nl%2F&refresh_token=' + i headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Cookie': 'INGRESSCOOKIE=54faaf98337d6f7813532188c573aea2; INGRESSCOOKIE=807a6e8eb3c06330d01786a893d6643b; _csrf=B1q_xCLA5I9wzTOYViitnfht; connect.sid=s%3Ax3mhao7dflalABkV6NlMW-4DMlSjo8ld.PsNG8O8iDycnmcWh8UoXWQg7wshu6uoEpOvKsWq6F8U; oauth2_authentication_csrf=MTYyNzkyNTEzNXxEdi1CQkFFQ180SUFBUkFCRUFBQVB2LUNBQUVHYzNSeWFXNW5EQVlBQkdOemNtWUdjM1J5YVc1bkRDSUFJRFF5T0RKa1pEaGtOR0ppWVRRM09EVmlPR0ZtTmprNVptSXhZalZrWTJKbHzfLLEBKYCmVvrknebFYm7aV-YN-O1aK8CN0slzFtwGHg==' } response = requests.request("POST", urlToken, headers=headers, data=payload) print(response.text) global y y = json.loads(response.text) global url url = "https://api.tribecrm.nl/v1/mutation?access_token=" + y['access_token'] return y # def runAuth(): # i = True # while i: # startAuthService(y['refresh_token']) # time.sleep(6) headers = { 'Content-Type': 'application/json' } # Keys die aanwezig zijn op de site keys_from_tribe = ['420', '421', '422', '423', '424', '425', '426', '427', '428', '429', '431', '432', '433', '434', '435', '436', '437', '438', '440', '442', '443', '444', '445'] # Data checken op missende key's/value's, en indien toepasselijk standaard values invullen def check(data): checked_data: dict = {} keys_from_request = [] # Alle keys van request toeveogen aan een list for k, v in data.items(): keys_from_request.append(str(k)) # Keys toevoegen aan een dict, als er een key mist wordt er een lege string toegevoegd for i in keys_from_tribe: if i in keys_from_request: checked_data[i] = data[i][0] else: checked_data[i] = "" # Resultaat terugsturen return checked_data def send_data_to_tribe(data: dict): i = { "entityType": "Relation.Organization", "fields": { "Name": data['420'], "EmailAddress": data['438'] } } createOrganizationinTribe(json.dumps(i), data) # Organization aanmaken in Tribe en OrganisationID ophalen def createOrganizationinTribe(data, data2): response = requests.request("POST", url, headers=headers, data=data) print(response.text) # OrganizationID ophalen en aanpassen y = json.loads(response.text) r = { "entityType": "Relationship.Organization.CommercialRelationship.Prospect", "fields": { "AccountManager": "79cdc883-f25a-4d11-80f7-250f2ca3334c", "Organization": y['data'][0]['entityId'], "Status": "7dca7da5-0e92-4dfc-8aa2-af8c5496d6e2", "Group": "a85e1bdb-00ec-42f0-83e1-c08d8d7a6dff", "6fc4e9b8-c787-4f5f-94ff-918f8643f54d": int(data2['428']), "e3822d01-22db-47f2-bfc5-298a22eb1019": int(data2['426']), "7b13c3c8-851d-4979-847c-9d0ef290fd88": str(data2['425']), "c2270abe-396b-42d3-9966-281fa0cdbaf9": str(data2['444']) } } #Prospectaanmaken in Tribe createProspectinTribe(json.dumps(r)) # Prospect aanmaken in Tribe def createProspectinTribe(data): response = requests.request("POST", url, headers=headers, data=data) print(response.text) app = Flask(__name__) @app.route('/webhook', methods=['POST', 'GET']) def webhook(): if request.method == 'POST': offerteAanvraag = request.get_data(as_text = True) #Binnenkomende data converteren naar een dictionary --> key:['value'] formatted_data_dict: dict = parse_qs(offerteAanvraag) # Data checken op missende key's/value's checked_data_dict: dict = check(formatted_data_dict) # Data naar API functies sturen send_data_to_tribe(checked_data_dict) return 'success', 200 else: abort(400) @app.route('/first', methods=['GET']) def index(): return str(getAuthJson('PwNXmiKyi0aDWurbLBuMLPaPYprBM99aUFrvUbZYvhA.gsCBQjUpFqFFIw_Cjs5kV_UCjR48xN_VQQn4LCX97rY')), 200 @app.route('/second', methods=['GET']) def indexThree(): startAuthService(y['refresh_token']) return 'success', 200 @app.route('/', methods=['GET', 'POST']) def indexdom(): return "<h1>Welcome to our server mannn !!</h1>" if __name__ == '__main__': app.run(host='0.0.0.0',port=5000)
functional_tests.py
#!/usr/bin/env python """ This script cannot be run directly, because it needs to have test/functional/test_toolbox.py in sys.argv in order to run functional tests on repository tools after installation. The install_and_test_tool_shed_repositories.sh will execute this script with the appropriate parameters. """ import httplib import logging import os import random import shutil import socket import sys import tempfile import threading import time galaxy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, os.path.pardir)) sys.path[1:1] = [ os.path.join( galaxy_root, "scripts" ), os.path.join( galaxy_root, "lib" ), os.path.join( galaxy_root, 'test' ), os.path.join( galaxy_root, 'scripts', 'api' ) ] from galaxy import eggs eggs.require( "Paste" ) eggs.require( 'mercurial' ) # This should not be required, but it is under certain conditions thanks to this bug: # http://code.google.com/p/python-nose/issues/detail?id=284 eggs.require( "pysqlite" ) from paste import httpserver import install_and_test_tool_shed_repositories.base.util as install_and_test_base_util from functional import database_contexts from functional_tests import generate_config_file from galaxy.app import UniverseApplication from galaxy.util import asbool from galaxy.web import buildapp log = logging.getLogger( 'install_and_test_tool_dependency_definitions' ) assert sys.version_info[ :2 ] >= ( 2, 6 ) test_home_directory = os.path.join( galaxy_root, 'test', 'install_and_test_tool_shed_repositories', 'tool_dependency_definitions' ) # Here's the directory where everything happens. Temporary directories are created within this directory to contain # the database, new repositories, etc. galaxy_test_tmp_dir = os.path.join( test_home_directory, 'tmp' ) # File containing information about problematic repositories to exclude from test runs. exclude_list_file = os.path.abspath( os.path.join( test_home_directory, 'exclude.xml' ) ) default_galaxy_locales = 'en' os.environ[ 'GALAXY_INSTALL_TEST_TMP_DIR' ] = galaxy_test_tmp_dir # Use separate databases for Galaxy and tool shed install info by default, # set GALAXY_TEST_INSTALL_DB_MERGED to True to revert to merged databases # behavior. default_install_db_merged = False # This script can be run in such a way that no Tool Shed database records should be changed. if '-info_only' in sys.argv or 'GALAXY_INSTALL_TEST_INFO_ONLY' in os.environ: can_update_tool_shed = False else: can_update_tool_shed = True test_framework = install_and_test_base_util.TOOL_DEPENDENCY_DEFINITIONS def install_and_test_repositories( app, galaxy_shed_tools_dict_file, galaxy_shed_tool_conf_file, galaxy_shed_tool_path ): # Initialize a dictionary for the summary that will be printed to stdout. install_and_test_statistics_dict = install_and_test_base_util.initialize_install_and_test_statistics_dict() error_message = '' repositories_to_install, error_message = \ install_and_test_base_util.get_repositories_to_install( install_and_test_base_util.galaxy_tool_shed_url, test_framework ) if error_message: return None, error_message print 'The exclude list file is defined as %s' % exclude_list_file if os.path.exists( exclude_list_file ): print 'Loading the list of repositories excluded from testing from the file %s...' % exclude_list_file # The following exclude_list will look something like this: # [{ 'reason': The default reason or the reason specified in this section, # 'repositories': [( name, owner, changeset_revision if changeset_revision else None ), # ( name, owner, changeset_revision if changeset_revision else None )]}] exclude_list_dicts = install_and_test_base_util.parse_exclude_list( exclude_list_file ) else: print 'The exclude list file %s does not exist, so no repositories will be excluded from testing.' % exclude_list_file exclude_list_dicts = [] # Generate a test method that will use Twill to install each repository into the embedded Galaxy application that was # started up, installing repository and tool dependencies. Upon successful installation, generate a test case for each # functional test defined for each tool in the repository and execute the test cases. Record the result of the tests. # The traceback and captured output of the tool that was run will be recored for test failures. After all tests have # completed, the repository is uninstalled, so test cases don't interfere with the next repository's functional tests. for repository_dict in repositories_to_install: encoded_repository_metadata_id = repository_dict.get( 'id', None ) # Add the URL for the tool shed we're installing from, so the automated installation methods go to the right place. repository_dict[ 'tool_shed_url' ] = install_and_test_base_util.galaxy_tool_shed_url # Get the name and owner out of the repository info dict. name = str( repository_dict.get( 'name', '' ) ) owner = str( repository_dict.get( 'owner', '' ) ) changeset_revision = str( repository_dict.get( 'changeset_revision', '' ) ) print "Processing revision %s of repository %s owned by %s..." % ( changeset_revision, name, owner ) repository_identifier_tup = ( name, owner, changeset_revision ) install_and_test_statistics_dict[ 'total_repositories_processed' ] += 1 # Retrieve the stored list of tool_test_results_dicts. tool_test_results_dicts, error_message = \ install_and_test_base_util.get_tool_test_results_dicts( install_and_test_base_util.galaxy_tool_shed_url, encoded_repository_metadata_id ) if error_message: print 'Cannot install version %s of repository %s owned by %s due to the following error getting tool_test_results:\n%s' % \ ( changeset_revision, name, owner, str( error_message ) ) else: tool_test_results_dict = install_and_test_base_util.get_tool_test_results_dict( tool_test_results_dicts ) is_excluded, reason = install_and_test_base_util.is_excluded( exclude_list_dicts, name, owner, changeset_revision, encoded_repository_metadata_id ) if is_excluded: # If this repository is being skipped, register the reason. print "Not testing revision %s of repository %s owned by %s because it is in the exclude list for this test run." % \ ( changeset_revision, name, owner ) tool_test_results_dict[ 'not_tested' ] = dict( reason=reason ) params = dict( do_not_test=False ) install_and_test_base_util.save_test_results_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params, can_update_tool_shed ) else: # See if the repository was installed in a previous test. repository = install_and_test_base_util.get_repository( name, owner, changeset_revision ) if repository is None: # The repository was not previously installed, so install it now. start_time = time.time() tool_test_results_dict = install_and_test_base_util.initialize_tool_tests_results_dict( app, tool_test_results_dict ) repository, error_message = install_and_test_base_util.install_repository( app, repository_dict ) if error_message: # The repository installation failed. print 'Installation failed for revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) processed_repositories_with_installation_error = \ install_and_test_statistics_dict.get( 'repositories_with_installation_error', [] ) if repository_identifier_tup not in processed_repositories_with_installation_error: install_and_test_statistics_dict[ 'repositories_with_installation_error' ].append( repository_identifier_tup ) current_repository_installation_error_dict = dict( tool_shed=install_and_test_base_util.galaxy_tool_shed_url, name=name, owner=owner, changeset_revision=changeset_revision, error_message=error_message ) tool_test_results_dict[ 'installation_errors' ][ 'current_repository' ].append( current_repository_installation_error_dict ) params = dict( test_install_error=True, do_not_test=False ) install_and_test_base_util.save_test_results_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params, can_update_tool_shed ) else: # The repository was successfully installed. print 'Installation succeeded for revision %s of repository %s owned by %s.' % \ ( changeset_revision, name, owner ) # Populate the installation containers (success and error) for the repository's immediate dependencies # (the entire dependency tree is not handled here). params, install_and_test_statistics_dict, tool_test_results_dict = \ install_and_test_base_util.populate_dependency_install_containers( app, repository, repository_identifier_tup, install_and_test_statistics_dict, tool_test_results_dict ) install_and_test_base_util.save_test_results_for_changeset_revision( install_and_test_base_util.galaxy_tool_shed_url, tool_test_results_dicts, tool_test_results_dict, repository_dict, params, can_update_tool_shed ) # Populate the installation containers (success or error) for the repository's immediate repository # dependencies whose containers are not yet populated. install_and_test_base_util.populate_install_containers_for_repository_dependencies( app, repository, encoded_repository_metadata_id, install_and_test_statistics_dict, can_update_tool_shed ) print '\nAttempting to install revision %s of repository %s owned by %s took %s seconds.\n' % \ ( changeset_revision, name, owner, str( time.time() - start_time ) ) else: print 'Skipped attempt to install revision %s of repository %s owned by %s because ' % \ ( changeset_revision, name, owner ) print 'it was previously installed and currently has status %s' % repository.status return install_and_test_statistics_dict, error_message def main(): if install_and_test_base_util.tool_shed_api_key is None: # If the tool shed URL specified in any dict is not present in the tool_sheds_conf.xml, the installation will fail. log.debug( 'Cannot proceed without a valid tool shed API key set in the enviroment variable GALAXY_INSTALL_TEST_TOOL_SHED_API_KEY.' ) return 1 if install_and_test_base_util.galaxy_tool_shed_url is None: log.debug( 'Cannot proceed without a valid Tool Shed base URL set in the environment variable GALAXY_INSTALL_TEST_TOOL_SHED_URL.' ) return 1 # ---- Configuration ------------------------------------------------------ galaxy_test_host = os.environ.get( 'GALAXY_INSTALL_TEST_HOST', install_and_test_base_util.default_galaxy_test_host ) # Set the GALAXY_INSTALL_TEST_HOST variable so that Twill will have the Galaxy url to which to # install repositories. os.environ[ 'GALAXY_INSTALL_TEST_HOST' ] = galaxy_test_host # Set the GALAXY_TEST_HOST environment variable so that the toolbox tests will have the Galaxy url # on which to to run tool functional tests. os.environ[ 'GALAXY_TEST_HOST' ] = galaxy_test_host galaxy_test_port = os.environ.get( 'GALAXY_INSTALL_TEST_PORT', str( install_and_test_base_util.default_galaxy_test_port_max ) ) os.environ[ 'GALAXY_TEST_PORT' ] = galaxy_test_port tool_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_PATH', 'tools' ) if 'HTTP_ACCEPT_LANGUAGE' not in os.environ: os.environ[ 'HTTP_ACCEPT_LANGUAGE' ] = default_galaxy_locales if not os.path.isdir( galaxy_test_tmp_dir ): os.mkdir( galaxy_test_tmp_dir ) # Set up the configuration files for the Galaxy instance. galaxy_shed_tool_path = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_PATH', tempfile.mkdtemp( dir=galaxy_test_tmp_dir, prefix='shed_tools' ) ) shed_tool_data_table_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DATA_TABLE_CONF', os.path.join( galaxy_test_tmp_dir, 'test_shed_tool_data_table_conf.xml' ) ) galaxy_tool_data_table_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DATA_TABLE_CONF', install_and_test_base_util.tool_data_table_conf ) galaxy_tool_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_CONF', os.path.join( galaxy_test_tmp_dir, 'test_tool_conf.xml' ) ) galaxy_job_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_JOB_CONF', os.path.join( galaxy_test_tmp_dir, 'test_job_conf.xml' ) ) galaxy_shed_tool_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_CONF', os.path.join( galaxy_test_tmp_dir, 'test_shed_tool_conf.xml' ) ) galaxy_migrated_tool_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_MIGRATED_TOOL_CONF', os.path.join( galaxy_test_tmp_dir, 'test_migrated_tool_conf.xml' ) ) galaxy_tool_sheds_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF', os.path.join( galaxy_test_tmp_dir, 'test_tool_sheds_conf.xml' ) ) galaxy_shed_tools_dict_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DICT_FILE', os.path.join( galaxy_test_tmp_dir, 'shed_tool_dict' ) ) install_and_test_base_util.populate_galaxy_shed_tools_dict_file( galaxy_shed_tools_dict_file, shed_tools_dict=None ) # Set the GALAXY_TOOL_SHED_TEST_FILE environment variable to the path of the shed_tools_dict file so that # test.base.twilltestcase.setUp will find and parse it properly. os.environ[ 'GALAXY_TOOL_SHED_TEST_FILE' ] = galaxy_shed_tools_dict_file if 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' in os.environ: tool_data_path = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' ) else: tool_data_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) os.environ[ 'GALAXY_INSTALL_TEST_TOOL_DATA_PATH' ] = tool_data_path # Configure the database connection and path. if 'GALAXY_INSTALL_TEST_DBPATH' in os.environ: galaxy_db_path = os.environ[ 'GALAXY_INSTALL_TEST_DBPATH' ] else: tempdir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) galaxy_db_path = os.path.join( tempdir, 'database' ) # Checks that galaxy_db_path exists and if not, create it. if not os.path.exists(galaxy_db_path): os.makedirs(galaxy_db_path) # Configure the paths Galaxy needs to install and test tools. galaxy_file_path = os.path.join( galaxy_db_path, 'files' ) galaxy_tempfiles = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) galaxy_migrated_tool_path = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) # Set up the tool dependency path for the Galaxy instance. tool_dependency_dir = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR', None ) if tool_dependency_dir is None: tool_dependency_dir = tempfile.mkdtemp( dir=galaxy_test_tmp_dir ) os.environ[ 'GALAXY_INSTALL_TEST_TOOL_DEPENDENCY_DIR' ] = tool_dependency_dir os.environ[ 'GALAXY_TOOL_DEPENDENCY_DIR' ] = tool_dependency_dir if 'GALAXY_INSTALL_TEST_DBURI' in os.environ: database_connection = os.environ[ 'GALAXY_INSTALL_TEST_DBURI' ] else: database_connection = 'sqlite:///' + os.path.join( galaxy_db_path, 'install_and_test_repositories.sqlite' ) if 'GALAXY_INSTALL_TEST_INSTALL_DBURI' in os.environ: install_database_connection = os.environ[ 'GALAXY_INSTALL_TEST_INSTALL_DBURI' ] elif asbool( os.environ.get( 'GALAXY_TEST_INSTALL_DB_MERGED', default_install_db_merged ) ): install_database_connection = database_connection else: install_galaxy_db_path = os.path.join( galaxy_db_path, 'install.sqlite' ) install_database_connection = 'sqlite:///%s' % install_galaxy_db_path kwargs = {} for dir in [ galaxy_test_tmp_dir ]: try: os.makedirs( dir ) except OSError: pass print "Database connection: ", database_connection print "Install database connection: ", install_database_connection # Generate the shed_tool_data_table_conf.xml file. file( shed_tool_data_table_conf_file, 'w' ).write( install_and_test_base_util.tool_data_table_conf_xml_template ) os.environ[ 'GALAXY_INSTALL_TEST_SHED_TOOL_DATA_TABLE_CONF' ] = shed_tool_data_table_conf_file # ---- Start up a Galaxy instance ------------------------------------------------------ # Generate the tool_conf.xml file. file( galaxy_tool_conf_file, 'w' ).write( install_and_test_base_util.tool_conf_xml ) # Generate the job_conf.xml file. file( galaxy_job_conf_file, 'w' ).write( install_and_test_base_util.job_conf_xml ) # Generate the tool_sheds_conf.xml file, but only if a the user has not specified an existing one in the environment. if 'GALAXY_INSTALL_TEST_TOOL_SHEDS_CONF' not in os.environ: file( galaxy_tool_sheds_conf_file, 'w' ).write( install_and_test_base_util.tool_sheds_conf_xml ) # Generate the shed_tool_conf.xml file. install_and_test_base_util.populate_shed_conf_file( galaxy_shed_tool_conf_file, galaxy_shed_tool_path, xml_elems=None ) os.environ[ 'GALAXY_INSTALL_TEST_SHED_TOOL_CONF' ] = galaxy_shed_tool_conf_file # Generate the migrated_tool_conf.xml file. install_and_test_base_util.populate_shed_conf_file( galaxy_migrated_tool_conf_file, galaxy_migrated_tool_path, xml_elems=None ) # Write the embedded web application's specific configuration to a temporary file. This is necessary in order for # the external metadata script to find the right datasets. kwargs = dict( admin_users='test@bx.psu.edu', master_api_key=install_and_test_base_util.default_galaxy_master_api_key, allow_user_creation=True, allow_user_deletion=True, allow_library_path_paste=True, database_connection=database_connection, datatype_converters_config_file="datatype_converters_conf.xml.sample", file_path=galaxy_file_path, id_secret=install_and_test_base_util.galaxy_encode_secret, install_database_connection=install_database_connection, job_config_file=galaxy_job_conf_file, job_queue_workers=5, log_destination="stdout", migrated_tools_config=galaxy_migrated_tool_conf_file, new_file_path=galaxy_tempfiles, running_functional_tests=True, shed_tool_data_table_config=shed_tool_data_table_conf_file, shed_tool_path=galaxy_shed_tool_path, template_path="templates", tool_config_file=','.join( [ galaxy_tool_conf_file, galaxy_shed_tool_conf_file ] ), tool_data_path=tool_data_path, tool_dependency_dir=tool_dependency_dir, tool_path=tool_path, tool_parse_help=False, tool_sheds_config_file=galaxy_tool_sheds_conf_file, update_integrated_tool_panel=False, use_heartbeat=False ) if os.path.exists( galaxy_tool_data_table_conf_file ): kwargs[ 'tool_data_table_config_path' ] = galaxy_tool_data_table_conf_file galaxy_config_file = os.environ.get( 'GALAXY_INSTALL_TEST_INI_FILE', None ) # If the user has passed in a path for the .ini file, do not overwrite it. if not galaxy_config_file: galaxy_config_file = os.path.join( galaxy_test_tmp_dir, 'install_test_tool_shed_repositories_wsgi.ini' ) config_items = [] for label in kwargs: config_tuple = label, kwargs[ label ] config_items.append( config_tuple ) # Write a temporary file, based on galaxy.ini.sample, using the configuration options defined above. generate_config_file( 'config/galaxy.ini.sample', galaxy_config_file, config_items ) kwargs[ 'tool_config_file' ] = [ galaxy_tool_conf_file, galaxy_shed_tool_conf_file ] # Set the global_conf[ '__file__' ] option to the location of the temporary .ini file, which gets passed to set_metadata.sh. kwargs[ 'global_conf' ] = install_and_test_base_util.get_webapp_global_conf() kwargs[ 'global_conf' ][ '__file__' ] = galaxy_config_file # ---- Build Galaxy Application -------------------------------------------------- if not database_connection.startswith( 'sqlite://' ): kwargs[ 'database_engine_option_max_overflow' ] = '20' kwargs[ 'database_engine_option_pool_size' ] = '10' kwargs[ 'config_file' ] = galaxy_config_file app = UniverseApplication( **kwargs ) database_contexts.galaxy_context = app.model.context database_contexts.install_context = app.install_model.context log.debug( "Embedded Galaxy application started..." ) # ---- Run galaxy webserver ------------------------------------------------------ server = None global_conf = install_and_test_base_util.get_webapp_global_conf() global_conf[ 'database_file' ] = database_connection webapp = buildapp.app_factory( global_conf, use_translogger=False, static_enabled=install_and_test_base_util.STATIC_ENABLED, app=app ) # Serve the app on a specified or random port. if galaxy_test_port is not None: server = httpserver.serve( webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False ) else: random.seed() for i in range( 0, 9 ): try: galaxy_test_port = str( random.randint( install_and_test_base_util.default_galaxy_test_port_min, install_and_test_base_util.default_galaxy_test_port_max ) ) log.debug( "Attempting to serve app on randomly chosen port: %s", galaxy_test_port ) server = httpserver.serve( webapp, host=galaxy_test_host, port=galaxy_test_port, start_loop=False ) break except socket.error, e: if e[0] == 98: continue raise else: message = "Unable to open a port between %s and %s to start Galaxy server" % ( install_and_test_base_util.default_galaxy_test_port_min, install_and_test_base_util.default_galaxy_test_port_max ) raise Exception( message ) os.environ[ 'GALAXY_INSTALL_TEST_PORT' ] = galaxy_test_port # Start the server. t = threading.Thread( target=server.serve_forever ) t.start() # Test if the server is up. for i in range( 10 ): # Directly test the app, not the proxy. conn = httplib.HTTPConnection( galaxy_test_host, galaxy_test_port ) conn.request( "GET", "/" ) if conn.getresponse().status == 200: break time.sleep( 0.1 ) else: raise Exception( "Test HTTP server did not return '200 OK' after 10 tries" ) print "Embedded galaxy web server started..." print "The embedded Galaxy application is running on %s:%s" % ( galaxy_test_host, galaxy_test_port ) print "Repositories will be installed from the tool shed at %s" % install_and_test_base_util.galaxy_tool_shed_url # If a tool_data_table_conf.test.xml file was found, add the entries from it into the app's tool data tables. if install_and_test_base_util.additional_tool_data_tables: app.tool_data_tables.add_new_entries_from_config_file( config_filename=install_and_test_base_util.additional_tool_data_tables, tool_data_path=install_and_test_base_util.additional_tool_data_path, shed_tool_data_table_config=None, persist=False ) now = time.strftime( "%Y-%m-%d %H:%M:%S" ) print "####################################################################################" print "# %s - installation script for repositories of type tool_dependency_definition started." % now if not can_update_tool_shed: print "# This run will not update the Tool Shed database." print "####################################################################################" install_and_test_statistics_dict, error_message = install_and_test_repositories( app, galaxy_shed_tools_dict_file, galaxy_shed_tool_conf_file, galaxy_shed_tool_path ) try: install_and_test_base_util.print_install_and_test_results( 'tool dependency definitions', install_and_test_statistics_dict, error_message ) except Exception, e: message = 'Attempting to print the following dictionary...\n\n%s\n\n...threw the following exception...\n\n%s\n\n' % ( str( install_and_test_statistics_dict ), str( e ) ) log.exception( message ) log.debug( "Shutting down..." ) # Gracefully shut down the embedded web server and UniverseApplication. if server: log.debug( "Shutting down embedded galaxy web server..." ) server.server_close() server = None log.debug( "Embedded galaxy server stopped..." ) if app: log.debug( "Shutting down galaxy application..." ) app.shutdown() app = None log.debug( "Embedded galaxy application stopped..." ) # Clean up test files unless otherwise specified. if 'GALAXY_INSTALL_TEST_NO_CLEANUP' not in os.environ: for dir in [ galaxy_test_tmp_dir ]: if os.path.exists( dir ): try: shutil.rmtree( dir ) log.debug( "Cleaned up temporary files in %s", str( dir ) ) except: pass else: log.debug( 'GALAXY_INSTALL_TEST_NO_CLEANUP set, not cleaning up.' ) # Return a "successful" response to buildbot. return 0 if __name__ == "__main__": # The tool_test_results_dict should always have the following structure: # { # "test_environment": # { # "galaxy_revision": "9001:abcd1234", # "galaxy_database_version": "114", # "tool_shed_revision": "9001:abcd1234", # "tool_shed_mercurial_version": "2.3.1", # "tool_shed_database_version": "17", # "python_version": "2.7.2", # "architecture": "x86_64", # "system": "Darwin 12.2.0" # }, # "successful_installation": # { # 'tool_dependencies': # [ # { # 'type': 'Type of tool dependency, e.g. package, set_environment, etc.', # 'name': 'Name of the tool dependency.', # 'version': 'Version if this is a package, otherwise blank.', # 'installation_directory': 'The installation directory path.' # }, # ], # 'repository_dependencies': # [ # { # 'tool_shed': 'The tool shed that this repository was installed from.', # 'name': 'The name of the repository that failed to install.', # 'owner': 'Owner of the failed repository.', # 'changeset_revision': 'Changeset revision of the failed repository.' # }, # ], # 'current_repository': # [ # { # 'tool_shed': 'The tool shed that this repository was installed from.', # 'name': 'The name of the repository that failed to install.', # 'owner': 'Owner of the failed repository.', # 'changeset_revision': 'Changeset revision of the failed repository.' # }, # ], # } # "installation_errors": # { # 'tool_dependencies': # [ # { # 'type': 'Type of tool dependency, e.g. package, set_environment, etc.', # 'name': 'Name of the tool dependency.', # 'version': 'Version if this is a package, otherwise blank.', # 'error_message': 'The error message returned when installation was attempted.', # }, # ], # 'repository_dependencies': # [ # { # 'tool_shed': 'The tool shed that this repository was installed from.', # 'name': 'The name of the repository that failed to install.', # 'owner': 'Owner of the failed repository.', # 'changeset_revision': 'Changeset revision of the failed repository.', # 'error_message': 'The error message that was returned when the repository failed to install.', # }, # ], # 'current_repository': # [ # { # 'tool_shed': 'The tool shed that this repository was installed from.', # 'name': 'The name of the repository that failed to install.', # 'owner': 'Owner of the failed repository.', # 'changeset_revision': 'Changeset revision of the failed repository.', # 'error_message': 'The error message that was returned when the repository failed to install.', # }, # ], # } # } sys.exit( main() )
eventlet.py
"""A eventlet based handler.""" from __future__ import absolute_import import contextlib import logging import eventlet from eventlet.green import socket as green_socket from eventlet.green import time as green_time from eventlet.green import threading as green_threading from eventlet.green import selectors as green_selectors from eventlet import queue as green_queue from kazoo.handlers import utils import kazoo.python2atexit as python2atexit from kazoo.handlers.utils import selector_select LOG = logging.getLogger(__name__) # sentinel objects _STOP = object() @contextlib.contextmanager def _yield_before_after(): # Yield to any other co-routines... # # See: http://eventlet.net/doc/modules/greenthread.html # for how this zero sleep is really a cooperative yield to other potential # co-routines... eventlet.sleep(0) try: yield finally: eventlet.sleep(0) class TimeoutError(Exception): pass class AsyncResult(utils.AsyncResult): """A one-time event that stores a value or an exception""" def __init__(self, handler): super(AsyncResult, self).__init__(handler, green_threading.Condition, TimeoutError) class SequentialEventletHandler(object): """Eventlet handler for sequentially executing callbacks. This handler executes callbacks in a sequential manner. A queue is created for each of the callback events, so that each type of event has its callback type run sequentially. These are split into two queues, one for watch events and one for async result completion callbacks. Each queue type has a greenthread worker that pulls the callback event off the queue and runs it in the order the client sees it. This split helps ensure that watch callbacks won't block session re-establishment should the connection be lost during a Zookeeper client call. Watch and completion callbacks should avoid blocking behavior as the next callback of that type won't be run until it completes. If you need to block, spawn a new greenthread and return immediately so callbacks can proceed. .. note:: Completion callbacks can block to wait on Zookeeper calls, but no other completion callbacks will execute until the callback returns. """ name = "sequential_eventlet_handler" queue_impl = green_queue.LightQueue queue_empty = green_queue.Empty def __init__(self): """Create a :class:`SequentialEventletHandler` instance""" self.callback_queue = self.queue_impl() self.completion_queue = self.queue_impl() self._workers = [] self._started = False @staticmethod def sleep_func(wait): green_time.sleep(wait) @property def running(self): return self._started timeout_exception = TimeoutError def _process_completion_queue(self): while True: cb = self.completion_queue.get() if cb is _STOP: break try: with _yield_before_after(): cb() except Exception: LOG.warning("Exception in worker completion queue greenlet", exc_info=True) finally: del cb # release before possible idle def _process_callback_queue(self): while True: cb = self.callback_queue.get() if cb is _STOP: break try: with _yield_before_after(): cb() except Exception: LOG.warning("Exception in worker callback queue greenlet", exc_info=True) finally: del cb # release before possible idle def start(self): if not self._started: # Spawn our worker threads, we have # - A callback worker for watch events to be called # - A completion worker for completion events to be called w = eventlet.spawn(self._process_completion_queue) self._workers.append((w, self.completion_queue)) w = eventlet.spawn(self._process_callback_queue) self._workers.append((w, self.callback_queue)) self._started = True python2atexit.register(self.stop) def stop(self): while self._workers: w, q = self._workers.pop() q.put(_STOP) w.wait() self._started = False python2atexit.unregister(self.stop) def socket(self, *args, **kwargs): return utils.create_tcp_socket(green_socket) def create_socket_pair(self): return utils.create_socket_pair(green_socket) def event_object(self): return green_threading.Event() def lock_object(self): return green_threading.Lock() def rlock_object(self): return green_threading.RLock() def create_connection(self, *args, **kwargs): return utils.create_tcp_connection(green_socket, *args, **kwargs) def select(self, *args, **kwargs): with _yield_before_after(): return selector_select(*args, selectors_module=green_selectors, **kwargs) def async_result(self): return AsyncResult(self) def spawn(self, func, *args, **kwargs): t = green_threading.Thread(target=func, args=args, kwargs=kwargs) t.daemon = True t.start() return t def dispatch_callback(self, callback): self.callback_queue.put(lambda: callback.func(*callback.args))
test_end_to_end.py
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ """This module contains end to end tests for the protocol generator.""" import logging import os import shutil import tempfile import time from pathlib import Path from threading import Thread from typing import Optional, cast from aea.aea_builder import AEABuilder from aea.configurations.base import ComponentType, PublicId, SkillConfig from aea.configurations.constants import DEFAULT_LEDGER, DEFAULT_PRIVATE_KEY_FILE from aea.crypto.helpers import create_private_key from aea.protocols.base import Address, Message from aea.protocols.dialogue.base import Dialogue as BaseDialogue from aea.skills.base import Handler, Skill, SkillContext from aea.test_tools.test_cases import UseOef from packages.fetchai.connections.oef.connection import ( PUBLIC_ID as OEF_CONNECTION_PUBLIC_ID, ) from tests.conftest import ROOT_DIR from tests.data.generator.t_protocol.dialogues import ( TProtocolDialogue, TProtocolDialogues, ) from tests.data.generator.t_protocol.message import TProtocolMessage # type: ignore from tests.test_protocols.test_generator.common import PATH_TO_T_PROTOCOL logger = logging.getLogger("aea") logging.basicConfig(level=logging.INFO) class TestEndToEndGenerator(UseOef): """ Test that the generating a protocol works correctly in correct preconditions. Note: Types involving Floats seem to lose some precision when serialised then deserialised using protobuf. So tests for these types are commented out throughout for now. """ @classmethod def setup_class(cls): """Set the test up.""" cls.cwd = os.getcwd() cls.t = tempfile.mkdtemp() shutil.copytree(Path(ROOT_DIR, "packages"), Path(cls.t, "packages")) os.chdir(cls.t) cls.private_key_path_1 = os.path.join(cls.t, DEFAULT_PRIVATE_KEY_FILE + "_1") cls.private_key_path_2 = os.path.join(cls.t, DEFAULT_PRIVATE_KEY_FILE + "_2") create_private_key(DEFAULT_LEDGER, cls.private_key_path_1) create_private_key(DEFAULT_LEDGER, cls.private_key_path_2) def test_generated_protocol_end_to_end(self): """Test that a generated protocol could be used in exchanging messages between two agents.""" agent_name_1 = "my_aea_1" agent_name_2 = "my_aea_2" builder_1 = AEABuilder() builder_1.set_name(agent_name_1) builder_1.add_private_key(DEFAULT_LEDGER, self.private_key_path_1) builder_1.set_default_ledger(DEFAULT_LEDGER) builder_1.set_default_connection(OEF_CONNECTION_PUBLIC_ID) builder_1.add_protocol( Path(ROOT_DIR, "packages", "fetchai", "protocols", "fipa") ) builder_1.add_protocol( Path(ROOT_DIR, "packages", "fetchai", "protocols", "oef_search") ) builder_1.add_component( ComponentType.PROTOCOL, Path(PATH_TO_T_PROTOCOL), skip_consistency_check=True, ) builder_1.add_connection( Path(ROOT_DIR, "packages", "fetchai", "connections", "oef") ) builder_2 = AEABuilder() builder_2.set_name(agent_name_2) builder_2.add_private_key(DEFAULT_LEDGER, self.private_key_path_2) builder_2.set_default_ledger(DEFAULT_LEDGER) builder_2.add_protocol( Path(ROOT_DIR, "packages", "fetchai", "protocols", "fipa") ) builder_2.add_protocol( Path(ROOT_DIR, "packages", "fetchai", "protocols", "oef_search") ) builder_2.set_default_connection(OEF_CONNECTION_PUBLIC_ID) builder_2.add_component( ComponentType.PROTOCOL, Path(PATH_TO_T_PROTOCOL), skip_consistency_check=True, ) builder_2.add_connection( Path(ROOT_DIR, "packages", "fetchai", "connections", "oef") ) # create AEAs aea_1 = builder_1.build(connection_ids=[OEF_CONNECTION_PUBLIC_ID]) aea_2 = builder_2.build(connection_ids=[OEF_CONNECTION_PUBLIC_ID]) # dialogues def role_from_first_message_1( message: Message, receiver_address: Address ) -> BaseDialogue.Role: """Infer the role of the agent from an incoming/outgoing first message :param message: an incoming/outgoing first message :param receiver_address: the address of the receiving agent :return: The role of the agent """ return TProtocolDialogue.Role.ROLE_1 agent_1_dialogues = TProtocolDialogues( self_address=aea_1.identity.address, role_from_first_message=role_from_first_message_1, ) def role_from_first_message_1( message: Message, receiver_address: Address ) -> BaseDialogue.Role: """Infer the role of the agent from an incoming/outgoing first message :param message: an incoming/outgoing first message :param receiver_address: the address of the receiving agent :return: The role of the agent """ return TProtocolDialogue.Role.ROLE_2 agent_2_dialogues = TProtocolDialogues( self_address=aea_2.identity.address, role_from_first_message=role_from_first_message_1, ) # messages message_1, aea_1_dialogue = agent_1_dialogues.create( counterparty=aea_2.identity.address, performative=TProtocolMessage.Performative.PERFORMATIVE_PT, content_bytes=b"some bytes", content_int=42, content_float=42.7, content_bool=True, content_str="some string", ) message_1 = cast(TProtocolMessage, message_1) message_2, aea_2_dialogue = agent_2_dialogues.create( counterparty=aea_1.identity.address, performative=TProtocolMessage.Performative.PERFORMATIVE_PT, content_bytes=b"some other bytes", content_int=43, content_float=43.7, content_bool=False, content_str="some other string", ) message_2 = cast(TProtocolMessage, message_2) # add handlers to AEA resources skill_context_1 = SkillContext(aea_1.context) skill_1 = Skill(SkillConfig("fake_skill", "fetchai", "0.1.0"), skill_context_1) skill_context_1._skill = skill_1 agent_1_handler = Agent1Handler( skill_context=skill_context_1, name="fake_handler_1", dialogues=agent_1_dialogues, ) aea_1.resources._handler_registry.register( ( PublicId.from_str("fetchai/fake_skill:0.1.0"), TProtocolMessage.protocol_id, ), agent_1_handler, ) skill_context_2 = SkillContext(aea_2.context) skill_2 = Skill(SkillConfig("fake_skill", "fetchai", "0.1.0"), skill_context_2) skill_context_2._skill = skill_2 agent_2_handler = Agent2Handler( message=message_2, dialogues=agent_2_dialogues, skill_context=skill_context_2, name="fake_handler_2", ) aea_2.resources._handler_registry.register( ( PublicId.from_str("fetchai/fake_skill:0.1.0"), TProtocolMessage.protocol_id, ), agent_2_handler, ) # Start threads t_1 = Thread(target=aea_1.start) t_2 = Thread(target=aea_2.start) try: t_1.start() t_2.start() time.sleep(1.0) aea_1.outbox.put_message(message_1) time.sleep(5.0) assert ( agent_2_handler.handled_message.message_id == message_1.message_id ), "Message from Agent 1 to 2: message ids do not match" assert ( agent_2_handler.handled_message.dialogue_reference == message_1.dialogue_reference ), "Message from Agent 1 to 2: dialogue references do not match" assert ( agent_2_handler.handled_message.dialogue_reference[0] == message_1.dialogue_reference[0] ), "Message from Agent 1 to 2: dialogue reference[0]s do not match" assert ( agent_2_handler.handled_message.dialogue_reference[1] == message_1.dialogue_reference[1] ), "Message from Agent 1 to 2: dialogue reference[1]s do not match" assert ( agent_2_handler.handled_message.target == message_1.target ), "Message from Agent 1 to 2: targets do not match" assert ( agent_2_handler.handled_message.performative == message_1.performative ), "Message from Agent 1 to 2: performatives do not match" assert ( agent_2_handler.handled_message.content_bytes == message_1.content_bytes ), "Message from Agent 1 to 2: content_bytes do not match" assert ( agent_2_handler.handled_message.content_int == message_1.content_int ), "Message from Agent 1 to 2: content_int do not match" # assert ( # agent_2_handler.handled_message.content_float == message_1.content_float # noqa: E800 # ), "Message from Agent 1 to 2: content_float do not match" assert ( agent_2_handler.handled_message.content_bool == message_1.content_bool ), "Message from Agent 1 to 2: content_bool do not match" assert ( agent_2_handler.handled_message.content_str == message_1.content_str ), "Message from Agent 1 to 2: content_str do not match" assert ( agent_1_handler.handled_message.message_id == message_2.message_id ), "Message from Agent 1 to 2: dialogue references do not match" assert ( agent_1_handler.handled_message.dialogue_reference == message_2.dialogue_reference ), "Message from Agent 2 to 1: dialogue references do not match" assert ( agent_1_handler.handled_message.dialogue_reference[0] == message_2.dialogue_reference[0] ), "Message from Agent 2 to 1: dialogue reference[0]s do not match" assert ( agent_1_handler.handled_message.dialogue_reference[1] == message_2.dialogue_reference[1] ), "Message from Agent 2 to 1: dialogue reference[1]s do not match" assert ( agent_1_handler.handled_message.target == message_2.target ), "Message from Agent 2 to 1: targets do not match" assert ( agent_1_handler.handled_message.performative == message_2.performative ), "Message from Agent 2 to 1: performatives do not match" assert ( agent_1_handler.handled_message.content_bytes == message_2.content_bytes ), "Message from Agent 2 to 1: content_bytes do not match" assert ( agent_1_handler.handled_message.content_int == message_2.content_int ), "Message from Agent 2 to 1: content_int do not match" # assert ( # agent_1_handler.handled_message.content_float == message_2.content_float # noqa: E800 # ), "Message from Agent 2 to 1: content_float do not match" assert ( agent_1_handler.handled_message.content_bool == message_2.content_bool ), "Message from Agent 2 to 1: content_bool do not match" assert ( agent_1_handler.handled_message.content_str == message_2.content_str ), "Message from Agent 2 to 1: content_str do not match" time.sleep(2.0) finally: aea_1.stop() aea_2.stop() t_1.join() t_2.join() @classmethod def teardown_class(cls): """Tear the test down.""" os.chdir(cls.cwd) try: shutil.rmtree(cls.t) except (OSError, IOError): pass class Agent1Handler(Handler): """The handler for agent 1.""" SUPPORTED_PROTOCOL = TProtocolMessage.protocol_id # type: Optional[PublicId] def __init__(self, dialogues: TProtocolDialogues, **kwargs): """Initialize the handler.""" super().__init__(**kwargs) self.kwargs = kwargs self.handled_message = None # type: Optional[TProtocolMessage] self.dialogues = dialogues def setup(self) -> None: """Implement the setup for the handler.""" pass def handle(self, message: Message) -> None: """ Implement the reaction to a message. :param message: the message :return: None """ message = cast(TProtocolMessage, message) self.dialogues.update(message) self.handled_message = message def teardown(self) -> None: """ Implement the handler teardown. :return: None """ class Agent2Handler(Handler): """The handler for agent 2.""" SUPPORTED_PROTOCOL = TProtocolMessage.protocol_id # type: Optional[PublicId] def __init__( self, message: TProtocolMessage, dialogues: TProtocolDialogues, **kwargs ): """Initialize the handler.""" print("inside handler's initialisation method for agent 2") super().__init__(**kwargs) self.kwargs = kwargs self.handled_message = None # type: Optional[TProtocolMessage] self.message_2 = message self.dialogues = dialogues def setup(self) -> None: """Implement the setup for the handler.""" pass def handle(self, message: Message) -> None: """ Implement the reaction to a message. :param message: the message :return: None """ message = cast(TProtocolMessage, message) dialogue = self.dialogues.update(message) self.handled_message = message assert ( dialogue is not None ), "Agent 2 didn't update dialogue with incoming message {}".format( str(message) ) dialogue.reply( target_message=message, performative=self.message_2.performative, content_bytes=self.message_2.content_bytes, content_int=self.message_2.content_int, content_float=self.message_2.content_float, content_bool=self.message_2.content_bool, content_str=self.message_2.content_str, ) self.context.outbox.put_message(self.message_2) def teardown(self) -> None: """ Implement the handler teardown. :return: None """
util.py
import ctypes import os import re import shutil import sys from pathlib import Path from colorama import Back, Fore, Style from .settings import * if sys.version_info[0] < 3 or sys.version_info[1] <= 5: raise RuntimeError( "\nPlease restart with Python 3.6+\n" + "Current Python version:", sys.version_info) ti_core = None def in_docker(): if os.environ.get("TI_IN_DOCKER", "") == "": return False else: return True def import_ti_core(tmp_dir=None): global ti_core if get_os_name() != 'win': old_flags = sys.getdlopenflags() sys.setdlopenflags(2 | 8) # RTLD_NOW | RTLD_DEEPBIND else: pyddir = os.path.join(package_root(), 'lib') os.environ['PATH'] += ';' + pyddir try: import taichi_core as core except Exception as e: if isinstance(e, ImportError): print( Fore.YELLOW + "Share object taichi_core import failed, " "check this page for possible solutions:\n" "https://taichi.readthedocs.io/en/stable/install.html#troubleshooting" + Fore.RESET) if get_os_name() == 'win': e.msg += '\nConsider installing Microsoft Visual C++ Redistributable: https://aka.ms/vs/16/release/vc_redist.x64.exe' elif get_os_name() == 'linux': e.msg += '\nConsider installing libtinfo5: sudo apt-get install libtinfo5' raise e from None ti_core = core if get_os_name() != 'win': sys.setdlopenflags(old_flags) lib_dir = os.path.join(package_root(), 'lib') core.set_lib_dir(locale_encode(lib_dir)) if tmp_dir is not None: core.set_tmp_dir(locale_encode(tmp_dir)) def locale_encode(path): try: import locale return path.encode(locale.getdefaultlocale()[1]) except: try: import sys return path.encode(sys.getfilesystemencoding()) except: try: return path.encode() except: return path def is_ci(): return os.environ.get('TI_CI', '') == '1' def package_root(): return os.path.join(os.path.dirname(os.path.realpath(__file__)), '../') def is_release(): return os.environ.get('TAICHI_REPO_DIR', '') == '' def get_core_shared_object(): if is_release(): directory = os.path.join(package_root(), 'lib') else: directory = get_bin_directory() return os.path.join(directory, 'libtaichi_core.so') def get_repo(): from git import Repo repo = Repo(get_repo_directory()) return repo def print_red_bold(*args, **kwargs): print(Fore.RED + Style.BRIGHT, end='') print(*args, **kwargs) print(Style.RESET_ALL, end='') create_sand_box_on_windows = True def build(): tmp_cwd = os.getcwd() bin_dir = get_build_directory() try: os.mkdir(bin_dir) except: pass os.chdir(bin_dir) import multiprocessing print('Building taichi...') num_make_threads = min(20, multiprocessing.cpu_count()) if get_os_name() == 'win': make_ret = os.system( "msbuild /p:Configuration=Release /p:Platform=x64 /m taichi.sln") else: make_ret = os.system('make -j {}'.format(num_make_threads)) if make_ret != 0: print(' Error: Build failed.') exit(-1) os.chdir(tmp_cwd) def check_exists(src): if not os.path.exists(src): raise FileNotFoundError( f'File "{src}" not exist. Installation corrupted or build incomplete?' ) def prepare_sandbox(): ''' Returns a temporary directory, which will be automatically deleted on exit. It may contain the taichi_core shared object or some misc. files. ''' import atexit import shutil from tempfile import mkdtemp tmp_dir = mkdtemp(prefix='taichi-') atexit.register(shutil.rmtree, tmp_dir) print(f'[Taichi] preparing sandbox at {tmp_dir}') os.mkdir(os.path.join(tmp_dir, 'runtime/')) return tmp_dir def get_unique_task_id(): import datetime import random return datetime.datetime.now().strftime('task-%Y-%m-%d-%H-%M-%S-r') + ( '%05d' % random.randint(0, 10000)) if is_release(): print("[Taichi] mode=release") sys.path.append(os.path.join(package_root(), 'lib')) if get_os_name() != 'win': link_src = os.path.join(package_root(), 'lib', 'taichi_core.so') link_dst = os.path.join(package_root(), 'lib', 'libtaichi_core.so') # For llvm jit to find the runtime symbols if not os.path.exists(link_dst): os.symlink(link_src, link_dst) import_ti_core() if get_os_name() != 'win': dll = ctypes.CDLL(get_core_shared_object(), mode=ctypes.RTLD_LOCAL) # The C backend needs a temporary directory for the generated .c and compiled .so files: ti_core.set_tmp_dir(locale_encode(prepare_sandbox( ))) # TODO: always allocate a tmp_dir for all situations ti_core.set_python_package_dir(package_root()) os.makedirs(ti_core.get_repo_dir(), exist_ok=True) else: print("[Taichi] mode=development") if get_os_name() == 'osx': bin_dir = get_bin_directory() os.environ['DYLD_FALLBACK_LIBRARY_PATH'] = get_runtime_directory() lib_path = os.path.join(bin_dir, 'libtaichi_core.dylib') tmp_cwd = os.getcwd() tmp_dir = prepare_sandbox() check_exists(lib_path) shutil.copy(lib_path, os.path.join(tmp_dir, 'taichi_core.so')) os.chdir(tmp_dir) sys.path.append(tmp_dir) import taichi_core as ti_core os.chdir(tmp_cwd) # TODO: unify importing infrastructure: elif get_os_name() == 'linux': bin_dir = get_bin_directory() if 'LD_LIBRARY_PATH' in os.environ: os.environ['LD_LIBRARY_PATH'] += ':/usr/lib64/' else: os.environ['LD_LIBRARY_PATH'] = '/usr/lib64/' lib_path = os.path.join(bin_dir, 'libtaichi_core.so') check_exists(lib_path) tmp_cwd = os.getcwd() tmp_dir = prepare_sandbox() check_exists(lib_path) shutil.copy(lib_path, os.path.join(tmp_dir, 'taichi_core.so')) os.chdir(tmp_dir) sys.path.append(tmp_dir) try: import_ti_core(tmp_dir) except Exception as e: print_red_bold("Taichi core import failed: ", end='') print(e) print( Fore.YELLOW + "check this page for possible solutions:\n" "https://taichi.readthedocs.io/en/stable/install.html#troubleshooting" + Fore.RESET) raise e from None os.chdir(tmp_cwd) elif get_os_name() == 'win': bin_dir = get_bin_directory() dll_path_invalid = os.path.join(bin_dir, 'libtaichi_core.dll') assert not os.path.exists(dll_path_invalid) possible_folders = ['Debug', 'RelWithDebInfo', 'Release'] detected_dlls = [] for folder in possible_folders: dll_path = os.path.join(bin_dir, folder, 'taichi_core.dll') if os.path.exists(dll_path): detected_dlls.append(dll_path) if len(detected_dlls) == 0: raise FileNotFoundError( f'Cannot find Taichi core dll under {get_bin_directory()}/{possible_folders}' ) elif len(detected_dlls) != 1: print('Warning: multiple Taichi core dlls found:') for dll in detected_dlls: print(' ', dll) print(f'Using {detected_dlls[0]}') dll_path = detected_dlls[0] # On windows when an dll/pyd is loaded, we cannot write to it any more old_wd = os.getcwd() os.chdir(bin_dir) if create_sand_box_on_windows: # Create a sandbox for separated core lib development and loading folder = os.path.join(get_output_directory(), 'tmp', get_unique_task_id()) lib_dir = os.path.join(get_repo_directory(), 'external', 'lib') os.environ['PATH'] += ';' + lib_dir os.makedirs(folder) shutil.copy(dll_path, os.path.join(folder, 'taichi_core.pyd')) os.environ['PATH'] += ';' + folder sys.path.append(folder) else: shutil.copy(dll_path, os.path.join(bin_dir, 'taichi_core.pyd')) sys.path.append(bin_dir) try: import taichi_core as ti_core except Exception as e: print(e) print() print( 'Hint: please make sure the major and minor versions of the Python executable is correct.' ) print() raise e os.chdir(old_wd) log_level = os.environ.get('TI_LOG_LEVEL', '') if log_level: ti_core.set_logging_level(log_level) def get_dll_name(name): if get_os_name() == 'linux': return 'libtaichi_%s.so' % name elif get_os_name() == 'osx': return 'libtaichi_%s.dylib' % name elif get_os_name() == 'win': return 'taichi_%s.dll' % name else: raise Exception(f"Unknown OS: {get_os_name()}") def load_module(name, verbose=True): if verbose: print('Loading module', name) try: if get_os_name() == 'osx': mode = ctypes.RTLD_LOCAL else: mode = ctypes.RTLD_GLOBAL if '.so' in name: ctypes.PyDLL(name, mode=mode) else: ctypes.PyDLL(os.path.join(get_repo_directory(), 'build', get_dll_name(name)), mode=mode) except Exception as e: print(Fore.YELLOW + "Warning: module [{}] loading failed: {}".format(name, e) + Style.RESET_ALL) def at_startup(): if not is_release(): output_dir = get_output_directory() if not os.path.exists(output_dir): print('Making output directory') os.mkdir(output_dir) ti_core.set_core_state_python_imported(True) def start_memory_monitoring(output_fn, pid=-1, interval=1): # removing dependency on psutil return import os import time import psutil if pid == -1: pid = os.getpid() import multiprocessing def task(): with open(output_fn, 'w') as f: process = psutil.Process(pid) while True: try: mem = process.memory_info().rss except: mem = -1 time.sleep(interval) print(time.time(), mem, file=f) f.flush() proc = multiprocessing.Process(target=task, daemon=True) proc.start() def require_version(major, minor=None, patch=None): versions = [ int(ti_core.get_version_major()), int(ti_core.get_version_minor()), int(ti_core.get_version_patch()), ] match = major == versions[0] and ( minor < versions[1] or minor == versions[1] and patch <= versions[2]) if match: return else: print("Taichi version mismatch. required >= {}.{}.{}".format( major, minor, patch)) print("Installed =", ti_core.get_version_string()) raise Exception("Taichi version mismatch") at_startup() def _print_taichi_header(): dev_mode = not is_release() header = '[Taichi] ' if dev_mode: header += '<dev mode>, ' else: header += f'version {ti_core.get_version_string()}, ' llvm_version = ti_core.get_llvm_version_string() header += f'llvm {llvm_version}, ' commit_hash = ti_core.get_commit_hash() commit_hash = commit_hash[:8] header += f'commit {commit_hash}, ' header += f'{get_os_name()}, ' py_ver = '.'.join(str(x) for x in sys.version_info[:3]) header += f'python {py_ver}' print(header) _print_taichi_header() __all__ = [ 'ti_core', 'build', 'load_module', 'start_memory_monitoring', 'is_release', 'package_root', 'require_version', ]
myo_genData.py
import myo import sys from threading import Thread import time import numpy as np import os import matplotlib matplotlib.use('GTKAgg') import scipy from math import ceil, floor e = [] emg_correctmean = [] emg_filtered = [] emg_rectified = [] emg_envelope = [] normalized = [] import config count =0 lastLine = "" import scipy as sp from scipy import signal low_pass = 4 sfreq = 1000 #delete training data from last run from sklearn.preprocessing import MinMaxScaler start_time = time.time() #exit process def buildData(name): m = myo.Myo(sys.argv[1] if len(sys.argv) >= 2 else None) _dir = os.path.join('/home/venkatesh/Desktop', 'sub1') if not os.path.exists(_dir): os.makedirs(_dir) f = open("tra/sub1/" + name + ".txt", "a") #Callback for EMG data from Myo (8 words) def proc_emg(emg, moving, times=[]): global e,emg_correctmean,emg_filtered,emg_rectified,low_pass,sfreq,emg_envelope e = emg #emg_correctmean = e - np.mean(e) emg_correctmean = scipy.signal.detrend(e) high = 20/(1000/2) low = 450/(1000/2) b, a = sp.signal.butter(4, [high,low], btype='bandpass') emg_filtered = sp.signal.filtfilt(b, a, e, method = 'gust') emg_rectified = abs(emg_filtered) l = float(low_pass / (sfreq/2)) b2, a2 = sp.signal.butter(4, l, btype='lowpass') emg_envelope =sp.signal.filtfilt(b2, a2, emg_rectified,method = 'gust') emg_envelope = emg_envelope * 100 #print(emg_envelope) #Callback for other motion data, including accelerometer and gycroscope def proc_imu(quat, acc, gyro, times=[]): global q,a,g,b,t,c,count q = quat a = acc g = gyro if count < config.samples: if len(emg_envelope) > 0: c = list(emg_envelope) print(str(c) + "\n") f.write(str(c) + "\n") #plt.plot(emg_rectified) count = count + 1 m.connect() m.add_emg_handler(proc_emg) m.add_imu_handler(proc_imu) for x in range(0,config.samples): m.run() m.disconnect() for i in range(0,config.numGestures): thread = Thread(target = buildData, args = ("s" + str(i+9), )) thread.deamon = True thread.start() thread.join() print ("Next attempt") input("Press Enter to continue...") count = 0
test_connection_pool.py
import os import mock import pytest import re import redis import time from threading import Thread from redis.connection import ssl_available, to_bool from .conftest import skip_if_server_version_lt, _get_client, REDIS_6_VERSION from .test_pubsub import wait_for_message class DummyConnection(object): description_format = "DummyConnection<>" def __init__(self, **kwargs): self.kwargs = kwargs self.pid = os.getpid() def connect(self): pass def can_read(self): return False class TestConnectionPool(object): def get_pool(self, connection_kwargs=None, max_connections=None, connection_class=redis.Connection): connection_kwargs = connection_kwargs or {} pool = redis.ConnectionPool( connection_class=connection_class, max_connections=max_connections, **connection_kwargs) return pool def test_connection_creation(self): connection_kwargs = {'foo': 'bar', 'biz': 'baz'} pool = self.get_pool(connection_kwargs=connection_kwargs, connection_class=DummyConnection) connection = pool.get_connection('_') assert isinstance(connection, DummyConnection) assert connection.kwargs == connection_kwargs def test_multiple_connections(self): pool = self.get_pool() c1 = pool.get_connection('_') c2 = pool.get_connection('_') assert c1 != c2 def test_max_connections(self): pool = self.get_pool(max_connections=2) pool.get_connection('_') pool.get_connection('_') with pytest.raises(redis.ConnectionError): pool.get_connection('_') def test_reuse_previously_released_connection(self): pool = self.get_pool() c1 = pool.get_connection('_') pool.release(c1) c2 = pool.get_connection('_') assert c1 == c2 def test_repr_contains_db_info_tcp(self): connection_kwargs = { 'host': 'localhost', 'port': 6379, 'db': 1, 'client_name': 'test-client' } pool = self.get_pool(connection_kwargs=connection_kwargs, connection_class=redis.Connection) expected = ('ConnectionPool<Connection<' 'host=localhost,port=6379,db=1,client_name=test-client>>') assert repr(pool) == expected def test_repr_contains_db_info_unix(self): connection_kwargs = { 'path': '/abc', 'db': 1, 'client_name': 'test-client' } pool = self.get_pool(connection_kwargs=connection_kwargs, connection_class=redis.UnixDomainSocketConnection) expected = ('ConnectionPool<UnixDomainSocketConnection<' 'path=/abc,db=1,client_name=test-client>>') assert repr(pool) == expected class TestBlockingConnectionPool(object): def get_pool(self, connection_kwargs=None, max_connections=10, timeout=20): connection_kwargs = connection_kwargs or {} pool = redis.BlockingConnectionPool(connection_class=DummyConnection, max_connections=max_connections, timeout=timeout, **connection_kwargs) return pool def test_connection_creation(self): connection_kwargs = {'foo': 'bar', 'biz': 'baz'} pool = self.get_pool(connection_kwargs=connection_kwargs) connection = pool.get_connection('_') assert isinstance(connection, DummyConnection) assert connection.kwargs == connection_kwargs def test_multiple_connections(self): pool = self.get_pool() c1 = pool.get_connection('_') c2 = pool.get_connection('_') assert c1 != c2 def test_connection_pool_blocks_until_timeout(self): "When out of connections, block for timeout seconds, then raise" pool = self.get_pool(max_connections=1, timeout=0.1) pool.get_connection('_') start = time.time() with pytest.raises(redis.ConnectionError): pool.get_connection('_') # we should have waited at least 0.1 seconds assert time.time() - start >= 0.1 def connection_pool_blocks_until_another_connection_released(self): """ When out of connections, block until another connection is released to the pool """ pool = self.get_pool(max_connections=1, timeout=2) c1 = pool.get_connection('_') def target(): time.sleep(0.1) pool.release(c1) Thread(target=target).start() start = time.time() pool.get_connection('_') assert time.time() - start >= 0.1 def test_reuse_previously_released_connection(self): pool = self.get_pool() c1 = pool.get_connection('_') pool.release(c1) c2 = pool.get_connection('_') assert c1 == c2 def test_repr_contains_db_info_tcp(self): pool = redis.ConnectionPool( host='localhost', port=6379, db=0, client_name='test-client' ) expected = ('ConnectionPool<Connection<' 'host=localhost,port=6379,db=0,client_name=test-client>>') assert repr(pool) == expected def test_repr_contains_db_info_unix(self): pool = redis.ConnectionPool( connection_class=redis.UnixDomainSocketConnection, path='abc', db=0, client_name='test-client' ) expected = ('ConnectionPool<UnixDomainSocketConnection<' 'path=abc,db=0,client_name=test-client>>') assert repr(pool) == expected class TestConnectionPoolURLParsing(object): def test_defaults(self): pool = redis.ConnectionPool.from_url('redis://localhost') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 0, 'username': None, 'password': None, } def test_hostname(self): pool = redis.ConnectionPool.from_url('redis://myhost') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'myhost', 'port': 6379, 'db': 0, 'username': None, 'password': None, } def test_quoted_hostname(self): pool = redis.ConnectionPool.from_url('redis://my %2F host %2B%3D+', decode_components=True) assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'my / host +=+', 'port': 6379, 'db': 0, 'username': None, 'password': None, } def test_port(self): pool = redis.ConnectionPool.from_url('redis://localhost:6380') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6380, 'db': 0, 'username': None, 'password': None, } @skip_if_server_version_lt(REDIS_6_VERSION) def test_username(self): pool = redis.ConnectionPool.from_url('redis://myuser:@localhost') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 0, 'username': 'myuser', 'password': None, } @skip_if_server_version_lt(REDIS_6_VERSION) def test_quoted_username(self): pool = redis.ConnectionPool.from_url( 'redis://%2Fmyuser%2F%2B name%3D%24+:@localhost', decode_components=True) assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 0, 'username': '/myuser/+ name=$+', 'password': None, } def test_password(self): pool = redis.ConnectionPool.from_url('redis://:mypassword@localhost') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 0, 'username': None, 'password': 'mypassword', } def test_quoted_password(self): pool = redis.ConnectionPool.from_url( 'redis://:%2Fmypass%2F%2B word%3D%24+@localhost', decode_components=True) assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 0, 'username': None, 'password': '/mypass/+ word=$+', } @skip_if_server_version_lt(REDIS_6_VERSION) def test_username_and_password(self): pool = redis.ConnectionPool.from_url('redis://myuser:mypass@localhost') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 0, 'username': 'myuser', 'password': 'mypass', } def test_db_as_argument(self): pool = redis.ConnectionPool.from_url('redis://localhost', db='1') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 1, 'username': None, 'password': None, } def test_db_in_path(self): pool = redis.ConnectionPool.from_url('redis://localhost/2', db='1') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 2, 'username': None, 'password': None, } def test_db_in_querystring(self): pool = redis.ConnectionPool.from_url('redis://localhost/2?db=3', db='1') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 3, 'username': None, 'password': None, } def test_extra_typed_querystring_options(self): pool = redis.ConnectionPool.from_url( 'redis://localhost/2?socket_timeout=20&socket_connect_timeout=10' '&socket_keepalive=&retry_on_timeout=Yes&max_connections=10' ) assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 2, 'socket_timeout': 20.0, 'socket_connect_timeout': 10.0, 'retry_on_timeout': True, 'username': None, 'password': None, } assert pool.max_connections == 10 def test_boolean_parsing(self): for expected, value in ( (None, None), (None, ''), (False, 0), (False, '0'), (False, 'f'), (False, 'F'), (False, 'False'), (False, 'n'), (False, 'N'), (False, 'No'), (True, 1), (True, '1'), (True, 'y'), (True, 'Y'), (True, 'Yes'), ): assert expected is to_bool(value) def test_client_name_in_querystring(self): pool = redis.ConnectionPool.from_url( 'redis://location?client_name=test-client' ) assert pool.connection_kwargs['client_name'] == 'test-client' def test_invalid_extra_typed_querystring_options(self): import warnings with warnings.catch_warnings(record=True) as warning_log: redis.ConnectionPool.from_url( 'redis://localhost/2?socket_timeout=_&' 'socket_connect_timeout=abc' ) # Compare the message values assert [ str(m.message) for m in sorted(warning_log, key=lambda l: str(l.message)) ] == [ 'Invalid value for `socket_connect_timeout` in connection URL.', 'Invalid value for `socket_timeout` in connection URL.', ] def test_extra_querystring_options(self): pool = redis.ConnectionPool.from_url('redis://localhost?a=1&b=2') assert pool.connection_class == redis.Connection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 0, 'username': None, 'password': None, 'a': '1', 'b': '2' } def test_calling_from_subclass_returns_correct_instance(self): pool = redis.BlockingConnectionPool.from_url('redis://localhost') assert isinstance(pool, redis.BlockingConnectionPool) def test_client_creates_connection_pool(self): r = redis.Redis.from_url('redis://myhost') assert r.connection_pool.connection_class == redis.Connection assert r.connection_pool.connection_kwargs == { 'host': 'myhost', 'port': 6379, 'db': 0, 'username': None, 'password': None, } def test_invalid_scheme_raises_error(self): with pytest.raises(ValueError) as cm: redis.ConnectionPool.from_url('localhost') assert str(cm.value) == ( 'Redis URL must specify one of the following schemes ' '(redis://, rediss://, unix://)' ) class TestConnectionPoolUnixSocketURLParsing(object): def test_defaults(self): pool = redis.ConnectionPool.from_url('unix:///socket') assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/socket', 'db': 0, 'username': None, 'password': None, } @skip_if_server_version_lt(REDIS_6_VERSION) def test_username(self): pool = redis.ConnectionPool.from_url('unix://myuser:@/socket') assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/socket', 'db': 0, 'username': 'myuser', 'password': None, } @skip_if_server_version_lt(REDIS_6_VERSION) def test_quoted_username(self): pool = redis.ConnectionPool.from_url( 'unix://%2Fmyuser%2F%2B name%3D%24+:@/socket', decode_components=True) assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/socket', 'db': 0, 'username': '/myuser/+ name=$+', 'password': None, } def test_password(self): pool = redis.ConnectionPool.from_url('unix://:mypassword@/socket') assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/socket', 'db': 0, 'username': None, 'password': 'mypassword', } def test_quoted_password(self): pool = redis.ConnectionPool.from_url( 'unix://:%2Fmypass%2F%2B word%3D%24+@/socket', decode_components=True) assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/socket', 'db': 0, 'username': None, 'password': '/mypass/+ word=$+', } def test_quoted_path(self): pool = redis.ConnectionPool.from_url( 'unix://:mypassword@/my%2Fpath%2Fto%2F..%2F+_%2B%3D%24ocket', decode_components=True) assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/my/path/to/../+_+=$ocket', 'db': 0, 'username': None, 'password': 'mypassword', } def test_db_as_argument(self): pool = redis.ConnectionPool.from_url('unix:///socket', db=1) assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/socket', 'db': 1, 'username': None, 'password': None, } def test_db_in_querystring(self): pool = redis.ConnectionPool.from_url('unix:///socket?db=2', db=1) assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/socket', 'db': 2, 'username': None, 'password': None, } def test_client_name_in_querystring(self): pool = redis.ConnectionPool.from_url( 'redis://location?client_name=test-client' ) assert pool.connection_kwargs['client_name'] == 'test-client' def test_extra_querystring_options(self): pool = redis.ConnectionPool.from_url('unix:///socket?a=1&b=2') assert pool.connection_class == redis.UnixDomainSocketConnection assert pool.connection_kwargs == { 'path': '/socket', 'db': 0, 'username': None, 'password': None, 'a': '1', 'b': '2' } class TestSSLConnectionURLParsing(object): @pytest.mark.skipif(not ssl_available, reason="SSL not installed") def test_defaults(self): pool = redis.ConnectionPool.from_url('rediss://localhost') assert pool.connection_class == redis.SSLConnection assert pool.connection_kwargs == { 'host': 'localhost', 'port': 6379, 'db': 0, 'username': None, 'password': None, } @pytest.mark.skipif(not ssl_available, reason="SSL not installed") def test_cert_reqs_options(self): import ssl class DummyConnectionPool(redis.ConnectionPool): def get_connection(self, *args, **kwargs): return self.make_connection() pool = DummyConnectionPool.from_url( 'rediss://?ssl_cert_reqs=none') assert pool.get_connection('_').cert_reqs == ssl.CERT_NONE pool = DummyConnectionPool.from_url( 'rediss://?ssl_cert_reqs=optional') assert pool.get_connection('_').cert_reqs == ssl.CERT_OPTIONAL pool = DummyConnectionPool.from_url( 'rediss://?ssl_cert_reqs=required') assert pool.get_connection('_').cert_reqs == ssl.CERT_REQUIRED pool = DummyConnectionPool.from_url( 'rediss://?ssl_check_hostname=False') assert pool.get_connection('_').check_hostname is False pool = DummyConnectionPool.from_url( 'rediss://?ssl_check_hostname=True') assert pool.get_connection('_').check_hostname is True class TestConnection(object): def test_on_connect_error(self): """ An error in Connection.on_connect should disconnect from the server see for details: https://github.com/andymccurdy/redis-py/issues/368 """ # this assumes the Redis server being tested against doesn't have # 9999 databases ;) bad_connection = redis.Redis(db=9999) # an error should be raised on connect with pytest.raises(redis.RedisError): bad_connection.info() pool = bad_connection.connection_pool assert len(pool._available_connections) == 1 assert not pool._available_connections[0]._sock @skip_if_server_version_lt('2.8.8') def test_busy_loading_disconnects_socket(self, r): """ If Redis raises a LOADING error, the connection should be disconnected and a BusyLoadingError raised """ with pytest.raises(redis.BusyLoadingError): r.execute_command('DEBUG', 'ERROR', 'LOADING fake message') assert not r.connection._sock @skip_if_server_version_lt('2.8.8') def test_busy_loading_from_pipeline_immediate_command(self, r): """ BusyLoadingErrors should raise from Pipelines that execute a command immediately, like WATCH does. """ pipe = r.pipeline() with pytest.raises(redis.BusyLoadingError): pipe.immediate_execute_command('DEBUG', 'ERROR', 'LOADING fake message') pool = r.connection_pool assert not pipe.connection assert len(pool._available_connections) == 1 assert not pool._available_connections[0]._sock @skip_if_server_version_lt('2.8.8') def test_busy_loading_from_pipeline(self, r): """ BusyLoadingErrors should be raised from a pipeline execution regardless of the raise_on_error flag. """ pipe = r.pipeline() pipe.execute_command('DEBUG', 'ERROR', 'LOADING fake message') with pytest.raises(redis.BusyLoadingError): pipe.execute() pool = r.connection_pool assert not pipe.connection assert len(pool._available_connections) == 1 assert not pool._available_connections[0]._sock @skip_if_server_version_lt('2.8.8') def test_read_only_error(self, r): "READONLY errors get turned in ReadOnlyError exceptions" with pytest.raises(redis.ReadOnlyError): r.execute_command('DEBUG', 'ERROR', 'READONLY blah blah') def test_connect_from_url_tcp(self): connection = redis.Redis.from_url('redis://localhost') pool = connection.connection_pool assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == ( 'ConnectionPool', 'Connection', 'host=localhost,port=6379,db=0', ) def test_connect_from_url_unix(self): connection = redis.Redis.from_url('unix:///path/to/socket') pool = connection.connection_pool assert re.match('(.*)<(.*)<(.*)>>', repr(pool)).groups() == ( 'ConnectionPool', 'UnixDomainSocketConnection', 'path=/path/to/socket,db=0', ) def test_connect_no_auth_supplied_when_required(self, r): """ AuthenticationError should be raised when the server requires a password but one isn't supplied. """ with pytest.raises(redis.AuthenticationError): r.execute_command('DEBUG', 'ERROR', 'ERR Client sent AUTH, but no password is set') def test_connect_invalid_password_supplied(self, r): "AuthenticationError should be raised when sending the wrong password" with pytest.raises(redis.AuthenticationError): r.execute_command('DEBUG', 'ERROR', 'ERR invalid password') class TestMultiConnectionClient(object): @pytest.fixture() def r(self, request): return _get_client(redis.Redis, request, single_connection_client=False) def test_multi_connection_command(self, r): assert not r.connection assert r.set('a', '123') assert r.get('a') == b'123' class TestHealthCheck(object): interval = 60 @pytest.fixture() def r(self, request): return _get_client(redis.Redis, request, health_check_interval=self.interval) def assert_interval_advanced(self, connection): diff = connection.next_health_check - time.time() assert self.interval > diff > (self.interval - 1) def test_health_check_runs(self, r): r.connection.next_health_check = time.time() - 1 r.connection.check_health() self.assert_interval_advanced(r.connection) def test_arbitrary_command_invokes_health_check(self, r): # invoke a command to make sure the connection is entirely setup r.get('foo') r.connection.next_health_check = time.time() with mock.patch.object(r.connection, 'send_command', wraps=r.connection.send_command) as m: r.get('foo') m.assert_called_with('PING', check_health=False) self.assert_interval_advanced(r.connection) def test_arbitrary_command_advances_next_health_check(self, r): r.get('foo') next_health_check = r.connection.next_health_check r.get('foo') assert next_health_check < r.connection.next_health_check def test_health_check_not_invoked_within_interval(self, r): r.get('foo') with mock.patch.object(r.connection, 'send_command', wraps=r.connection.send_command) as m: r.get('foo') ping_call_spec = (('PING',), {'check_health': False}) assert ping_call_spec not in m.call_args_list def test_health_check_in_pipeline(self, r): with r.pipeline(transaction=False) as pipe: pipe.connection = pipe.connection_pool.get_connection('_') pipe.connection.next_health_check = 0 with mock.patch.object(pipe.connection, 'send_command', wraps=pipe.connection.send_command) as m: responses = pipe.set('foo', 'bar').get('foo').execute() m.assert_any_call('PING', check_health=False) assert responses == [True, b'bar'] def test_health_check_in_transaction(self, r): with r.pipeline(transaction=True) as pipe: pipe.connection = pipe.connection_pool.get_connection('_') pipe.connection.next_health_check = 0 with mock.patch.object(pipe.connection, 'send_command', wraps=pipe.connection.send_command) as m: responses = pipe.set('foo', 'bar').get('foo').execute() m.assert_any_call('PING', check_health=False) assert responses == [True, b'bar'] def test_health_check_in_watched_pipeline(self, r): r.set('foo', 'bar') with r.pipeline(transaction=False) as pipe: pipe.connection = pipe.connection_pool.get_connection('_') pipe.connection.next_health_check = 0 with mock.patch.object(pipe.connection, 'send_command', wraps=pipe.connection.send_command) as m: pipe.watch('foo') # the health check should be called when watching m.assert_called_with('PING', check_health=False) self.assert_interval_advanced(pipe.connection) assert pipe.get('foo') == b'bar' # reset the mock to clear the call list and schedule another # health check m.reset_mock() pipe.connection.next_health_check = 0 pipe.multi() responses = pipe.set('foo', 'not-bar').get('foo').execute() assert responses == [True, b'not-bar'] m.assert_any_call('PING', check_health=False) def test_health_check_in_pubsub_before_subscribe(self, r): "A health check happens before the first [p]subscribe" p = r.pubsub() p.connection = p.connection_pool.get_connection('_') p.connection.next_health_check = 0 with mock.patch.object(p.connection, 'send_command', wraps=p.connection.send_command) as m: assert not p.subscribed p.subscribe('foo') # the connection is not yet in pubsub mode, so the normal # ping/pong within connection.send_command should check # the health of the connection m.assert_any_call('PING', check_health=False) self.assert_interval_advanced(p.connection) subscribe_message = wait_for_message(p) assert subscribe_message['type'] == 'subscribe' def test_health_check_in_pubsub_after_subscribed(self, r): """ Pubsub can handle a new subscribe when it's time to check the connection health """ p = r.pubsub() p.connection = p.connection_pool.get_connection('_') p.connection.next_health_check = 0 with mock.patch.object(p.connection, 'send_command', wraps=p.connection.send_command) as m: p.subscribe('foo') subscribe_message = wait_for_message(p) assert subscribe_message['type'] == 'subscribe' self.assert_interval_advanced(p.connection) # because we weren't subscribed when sending the subscribe # message to 'foo', the connection's standard check_health ran # prior to subscribing. m.assert_any_call('PING', check_health=False) p.connection.next_health_check = 0 m.reset_mock() p.subscribe('bar') # the second subscribe issues exactly only command (the subscribe) # and the health check is not invoked m.assert_called_once_with('SUBSCRIBE', 'bar', check_health=False) # since no message has been read since the health check was # reset, it should still be 0 assert p.connection.next_health_check == 0 subscribe_message = wait_for_message(p) assert subscribe_message['type'] == 'subscribe' assert wait_for_message(p) is None # now that the connection is subscribed, the pubsub health # check should have taken over and include the HEALTH_CHECK_MESSAGE m.assert_any_call('PING', p.HEALTH_CHECK_MESSAGE, check_health=False) self.assert_interval_advanced(p.connection) def test_health_check_in_pubsub_poll(self, r): """ Polling a pubsub connection that's subscribed will regularly check the connection's health. """ p = r.pubsub() p.connection = p.connection_pool.get_connection('_') with mock.patch.object(p.connection, 'send_command', wraps=p.connection.send_command) as m: p.subscribe('foo') subscribe_message = wait_for_message(p) assert subscribe_message['type'] == 'subscribe' self.assert_interval_advanced(p.connection) # polling the connection before the health check interval # doesn't result in another health check m.reset_mock() next_health_check = p.connection.next_health_check assert wait_for_message(p) is None assert p.connection.next_health_check == next_health_check m.assert_not_called() # reset the health check and poll again # we should not receive a pong message, but the next_health_check # should be advanced p.connection.next_health_check = 0 assert wait_for_message(p) is None m.assert_called_with('PING', p.HEALTH_CHECK_MESSAGE, check_health=False) self.assert_interval_advanced(p.connection)
app.py
import json import tkinter as tk import requests from PIL import ImageTk, Image import os import interface import renderingUtil import database import multiprocessing as mp from functools import partial import ctypes import testyboi as testy import time import Camera.camera as camera import cv2 # import functions and classes import googleVision # current working directory workingDir = os.path.dirname(os.path.abspath(__file__)) backgroundColour = "#263D42" # variables for communicating between processes """ Protocol: Camera process polls sonar, and takes picture. writes picture to directory. puts notification in queue This process checks queue, if set reads image and puts acknowledgement in queue once acknowledgement read, Camera process may take a new picture. """ pictureExists = False newPicture = False acceptNextImage = True objectImg = "/images/download.jpg" buffer = None # queues to pass messages between processes imageQueue = mp.Queue() ackQueue = mp.Queue() # class app is an instantiation of the touchscreen app. # It contains several pages including Login Page, Landing Page, Regular Items Page and Custom Items page class App(tk.Tk): def __init__(self, *args, **kwargs): tk.Tk.__init__(self, *args, **kwargs) self.attributes('-fullscreen', True) self.canvas = tk.Canvas(self, bg=backgroundColour) self.canvas.pack(fill=tk.BOTH, expand=True) # Set up Menu MainMenu(self) # Set up Frames for different pages container = tk.Frame(self.canvas) container.place(relwidth=0.75, relheight=0.85, relx=0.1, rely=0.1) container.grid_rowconfigure(0, weight=1) container.grid_columnconfigure(0, weight=1) self.frames = {} # switch between different pages for F in (LandingPage, RegularItems, CustomItems, LoginPage): frame = F(container, self) self.frames[F] = frame frame.grid(row=0, column=0, sticky="nsew") self.show_frame(LoginPage) def show_frame(self, context): frame = self.frames[context] frame.tkraise() # class LoginPage is the login page of the touchscreen app # from this page you can log in as a guest or log in using bluetooth functionality class LoginPage(tk.Frame): def __init__(self, parent, controller): tk.Frame.__init__(self, parent) label = tk.Label(self, text="Welcome to Ingredients Decoder") label.config(font=('helvetica', 30)) label.pack(padx=10, pady=10) guest_login = tk.Button(self, text="Log in as a guest", height = 2, font=('helvetica', 15), command= lambda: self.loginAsGuest()) guest_login.pack() bt_login = tk.Button(self, text="Log in using bluetooth", height = 2, font=('helvetica', 15), command=lambda: self.loginBT()) bt_login.pack() self.loginStatus = tk.Label(self, text="Please log in to continue", height = 2, font=('helvetica', 15)) self.loginStatus.pack() self.continue_button = tk.Button() self.controllor = controller # log in as a guest def loginAsGuest(self): renderingUtil.refresh(self.loginStatus) self.loginStatus = tk.Label(self, text="You have successfully loged in as guest", height = 2, font=('helvetica', 15)) self.loginStatus.pack() renderingUtil.refresh(self.continue_button) self.continue_button = tk.Button(self, text="Continue", height = 2, font=('helvetica', 15), command= lambda: self.continueToLanding()) self.continue_button.pack() # start browsing for a bluetooth login def loginBT(self): textInput = "Browsing for a bluetooth login.... " renderingUtil.refresh(self.loginStatus) self.loginStatus = tk.Label(self, text=textInput, height = 2, font=('helvetica', 15)) self.loginStatus.pack() renderingUtil.refresh(self.continue_button) # go to Landing page, reset the login status and delete the continue button def continueToLanding(self): self.controllor.show_frame(LandingPage) renderingUtil.refresh(self.loginStatus) self.loginStatus = tk.Label(self, text="Please log in to continue", height = 2, font=('helvetica', 15)) self.loginStatus.pack() renderingUtil.refresh(self.continue_button) # class LandingPage is the landing page of the touchscreen app after you're signed in # from this page you can proceed to scan Regular Items, scan Custom Items or check out Personalized List class LandingPage(tk.Frame): def __init__(self, parent, controller): tk.Frame.__init__(self, parent) # welcome labels, buttons to navigate to different pages of the app label = tk.Label(self, text="You have successfully loged in") label.config(font=('helvetica', 30)) label.pack(padx=10, pady=10) regular_page = tk.Button(self, text="Regular Items", height = 2, font=('helvetica', 15), command=lambda: controller.show_frame(RegularItems)) regular_page.pack() custom_page = tk.Button(self, text="Custom Items", height = 2, font=('helvetica', 15), command=lambda: controller.show_frame(CustomItems)) custom_page.pack() user_list = tk.Button(self, text="View personalized list", height = 2, font=('helvetica', 15), command=lambda: self.show_plist(LandingPage, controller)) user_list.pack() login_page = tk.Button(self, text="Log out", height = 2, font=('helvetica', 15), command=lambda: controller.show_frame(LoginPage)) login_page.pack() self.user_list = tk.Label() # welcome image of a cute cat readImg = renderingUtil.resizeImage("/images/cat.gif") self.img = ImageTk.PhotoImage(readImg) welcomeImg = tk.Label(self, image=self.img) welcomeImg.pack() # API call to the backend to retrieve the personalized list of the customer def show_plist(self, context, controller): URL = "http://52.138.39.36:3000/plist" userName = 'customer1' PARAMS = {'username': 'customer1'} response = requests.post(url=URL, json=PARAMS) resJson = response.json() userList = [] renderingUtil.refresh(self.user_list) for element in resJson['message']: userList.append(element["p"]) str1 = "" for element in userList: str1 += element.lower() str1 += " " self.user_list = tk.Label(controller.frames[context], text='Here is your list: ' + str1, font=('helvetica', 15)) self.user_list.pack(padx=10, pady=10) """ class CommonDisplay is a common generic page both RegularItems and CustomItems classes would inherit from this class. it contains several buttons and labels displayed back to the users they differ in which recognition function they call: thus this function is passed in instantiation of sub-classes """ class CommonDisplay: def __init__(self, controller, parent, message, scanFunction, *args, **kwargs): self.infoButtonList = [] self.counter = 0 self.itemList = [None]*20 #20 items max self.ingredientsList = [None]*20 #subcanvas for rendering ingredients list self.subcanvas = tk.Canvas() readImg = renderingUtil.resizeImage("/images/Capture.jpg") self.img = ImageTk.PhotoImage(readImg) self.alert = tk.Label() # buttons to check ingredients, go back to home page and instructions labels for the users label = tk.Label(self, text=message) label.config(font=('helvetica', 30)) label.pack(padx=10, pady=10) scan_items = tk.Button(self, text="Check Ingredients", height = 2, font=('helvetica', 15), command=lambda: scanFunction("customer1")) scan_items.pack() start_page = tk.Button(self, text="Back to Home Page", height = 2, font=('helvetica', 15), command=lambda: self.backToHomePage(controller)) start_page.pack() self.instruction = tk.Label(self, text="Place item inside box with ingredients list facing camera", font=('helvetica', 15)) self.instruction.pack() self.promptLabel = tk.Label(self, image=self.img) self.promptLabel.pack() # navigate back to the homepage and clear the existing alerts def backToHomePage(self, controller): for i in self.itemList: if i != None: renderingUtil.refresh(i) for j in self.ingredientsList: if j != None: renderingUtil.refresh(j) renderingUtil.refresh(self.subcanvas) renderingUtil.refresh(self.alert) controller.show_frame(LandingPage) # print out the intersection between the ingredients received from google API and user's personal list # accounts for special cases def printIntersection(self, warning, matchingArr): renderingUtil.refresh(self.alert) # tried to scan non-existent words if matchingArr == "notOCR": self.alert = tk.Label(self, text="No ingredients text detected", font=('helvetica', 15)) self.alert.pack() return # item does not exist as a custom item. Prompted to scan printed ingredients if matchingArr == "notRecognition": self.alert = tk.Label(self, text="Not recognized as a store custom item. Maybe try regular item instead?", font=('helvetica', 15)) self.alert.pack() return # no match: no harmful ingredients if not matchingArr: self.alert = tk.Label(self, text="No harmful ingredients detected", font=('helvetica', 15)) self.alert.pack() # found harmful ingredients else: warning = "We found the following " + warning + " that you might not want: \n " for element in matchingArr: warning += element + ", " warning = warning[:-2] self.alert = tk.Label(self, text=warning, font=('helvetica', 15)) self.alert.pack() # get the text from google OCR API and match it against users' lists def CheckIngredientsOCR(self, username): if self.noImg(): return responseOCR = googleVision.requestOCR(objectImg) userList = database.Get_Personal_List(username) matchingArr = googleVision.getMatchingArr(responseOCR, userList) self.printIntersection("ingredients matching your personal list", matchingArr) # print out the ingredients of the corresponding custom item # may have to render lists for multiple potential items (if recognized as several custom items) def printIngredients(self, subcanvas, itemIngredients, i): self.ingredientsList[i] = tk.Label(subcanvas, text=itemIngredients, borderwidth=2, relief="solid", height=2, font=('helvetica', 15)) self.ingredientsList[i].grid(row=i, column=1) # get the tags array from google Recognition API and match it against the store custom items def CheckIngredientsRecognition(self, username): if self.noImg(): return tags_array = googleVision.requestRecognition(objectImg) ingredients_array = database.Get_Custom_Ingredients(tags_array) self.subcanvas = tk.Canvas(app.canvas, height=100000000) self.subcanvas.pack(padx=(50, 50), pady=(550, 0)) # create list of all the custom items relevant to the item # create buttons that can display these lists with printIngredients() max = 0 for i in range(0, len(tags_array)): # Rows if ingredients_array[i] != '0': ahoy = partial(self.printIngredients, self.subcanvas, ingredients_array[i], i) self.itemList[i] = tk.Button(self.subcanvas, text=tags_array[i], borderwidth=2, relief="solid", height = 2, font=('helvetica', 15), command=ahoy) self.itemList[i].grid(row=i, column=0, padx=10, sticky="W") if self.itemList[i].winfo_width() > max: max = self.itemList[i].winfo_width() userList = database.Get_Personal_List(username) # get the matching array matchingArr = googleVision.getMatchingArr(ingredients_array, userList) self.printIntersection("ingredients matching your personal list", matchingArr) # check out all the general harmful substances for store regular items def CheckHarmfulOCR(self): if self.noImg(): return responseOCR = googleVision.requestOCR(objectImg) harmfulList = database.Get_Harmful_List() matchingArr = googleVision.getMatchingArr(responseOCR, harmfulList) self.printIntersection("generally harmful ingredients", matchingArr) # check out all the general harmful substances for store custom items def CheckHarmfulRecognition(self): if self.noImg(): return responseRec = googleVision.requestRecognition(objectImg) responseRec = database.Get_Custom_Ingredients(responseRec) harmfulList = database.Get_Harmful_List() matchingArr = googleVision.getMatchingArr(responseRec, harmfulList) self.printIntersection("generally harmful ingredients", matchingArr) # NOTE: unused. originally planned for user to manually refresh image # final implementation it automatically refreshes. Therefore acceptNextImage never becomes false # accept incoming cropped image def MakeAcceptNextImage(self): global acceptNextImage acceptNextImage = True actualPoll() # alert users that no items detected def noImg(self): if objectImg is None: self.alert = tk.Label(self, text="No object detected, or image has not loaded yet. \n PLease wait for image of object to show up before attempting scan", font=('helvetica', 15)) self.alert.pack() return True return False # class RegularItems is the page in charge of scanning store Regular Items class RegularItems(tk.Frame, CommonDisplay): def __init__(self, parent, controller): tk.Frame.__init__(self, parent) CommonDisplay.__init__(self, message="Scan regular items here", scanFunction=self.CheckIngredientsOCR, parent=parent, controller=controller) # class RegularItems is the page in charge of scanning store Custom Items class CustomItems(tk.Frame, CommonDisplay): def __init__(self, parent, controller): tk.Frame.__init__(self, parent) CommonDisplay.__init__(self, message="Scan store custom items here", scanFunction=self.CheckIngredientsRecognition, parent=parent, controller=controller) # this class provides an exit button for the app class MainMenu: def __init__(self, master): menubar = tk.Menu(master) filemenu = tk.Menu(menubar, tearoff=0) filemenu.add_command(label="Exit", command=master.quit) menubar.add_cascade(label="File", menu=filemenu) master.config(menu=menubar) app = App() # this function will check whether the image has been generated and cropped # it will display the newly processed image to the user def loadProcessedImage(frame): # tell users to make google vision call or place an item based on the satus of the image global app renderingUtil.refresh(app.frames[frame].instruction) try: tryOpen = Image.open(workingDir + objectImg) app.frames[frame].instruction = tk.Label(app.frames[frame], text="Your item is ready to be scanned", font=('helvetica', 15)) except OSError: print('cannot open') app.frames[frame].instruction = tk.Label(app.frames[frame], text="Please place an item in front of the camera", font=('helvetica', 15)) app.frames[frame].instruction.pack() # update the image when a new image is generated and cropped renderingUtil.refresh(app.frames[frame].promptLabel) readImg = renderingUtil.resizeImage(objectImg) app.frames[frame].img = ImageTk.PhotoImage(readImg) app.frames[frame].promptLabel = tk.Label(app.frames[frame], image=app.frames[frame].img) app.frames[frame].promptLabel.pack() # this function calls actualPoll() every 1 second def pollPicture(): actualPoll() app.after(1000, pollPicture) # check if an image has been processed # call loadProcessedImage() if an image is available def actualPoll(): global acceptNextImage global objectImg global imageQueue global ackQueue if not imageQueue.empty(): print("not empty") imageQueue.get() if acceptNextImage: loadProcessedImage(RegularItems) loadProcessedImage(CustomItems) # acceptNextImage = False ackQueue.put(True) # main function called to instantiate the app main loop # it also runs another process besides the mainlopp of the app # this other process is the communication protocol between DE1-SoC and the Raspberry Pi app.after(3000, pollPicture) if __name__ == "__main__": producer = mp.Process(target=camera.run, args=(imageQueue, ackQueue)) producer.start() ackQueue.put(True) app.mainloop() app.mainloop()
light_reaper.py
# -*- coding: utf-8 -*- # Copyright 2016-2020 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Authors: # - Vincent Garonne <vincent.garonne@cern.ch>, 2016-2018 # - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019 # - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019 # - Thomas Beermann <thomas.beermann@cern.ch>, 2019-2021 # - Brandon White <bjwhite@fnal.gov>, 2019 # - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020 # - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020 ''' Light Reaper is a daemon to manage temporary object/file deletion. ''' import hashlib import logging import os import random import socket import sys import threading import time import traceback import rucio.db.sqla.util from rucio.common.config import config_get_bool from rucio.common.exception import (SourceNotFound, DatabaseException, ServiceUnavailable, RSEAccessDenied, RSENotFound, ResourceTemporaryUnavailable, VONotFound) from rucio.common.logging import setup_logging from rucio.core import rse as rse_core from rucio.core.heartbeat import live, die, sanity_check from rucio.core.message import add_message from rucio.core.rse_expression_parser import parse_expression from rucio.core.temporary_did import (list_expired_temporary_dids, delete_temporary_dids) from rucio.core.vo import list_vos from rucio.rse import rsemanager as rsemgr logging.getLogger("requests").setLevel(logging.CRITICAL) GRACEFUL_STOP = threading.Event() def reaper(rses=[], worker_number=0, total_workers=1, chunk_size=100, once=False, scheme=None): """ Main loop to select and delete files. :param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs. :param worker_number: The worker number. :param total_workers: The total number of workers. :param chunk_size: the size of chunk for deletion. :param once: If True, only runs one iteration of the main loop. :param scheme: Force the reaper to use a particular protocol, e.g., mock. """ logging.info('Starting Light Reaper %s-%s: Will work on RSEs: %s', worker_number, total_workers, ', '.join([rse['rse'] for rse in rses])) pid = os.getpid() thread = threading.current_thread() hostname = socket.gethostname() executable = ' '.join(sys.argv) hash_executable = hashlib.sha256(sys.argv[0] + ''.join([rse['rse'] for rse in rses])).hexdigest() sanity_check(executable=None, hostname=hostname) while not GRACEFUL_STOP.is_set(): try: # heartbeat heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable) logging.info('Light Reaper({0[worker_number]}/{0[total_workers]}): Live gives {0[heartbeat]}'.format(locals())) nothing_to_do = True random.shuffle(rses) for rse in rses: rse_id = rse['id'] rse = rse['rse'] replicas = list_expired_temporary_dids(rse_id=rse_id, limit=chunk_size, worker_number=worker_number, total_workers=total_workers) rse_info = rsemgr.get_rse_info(rse_id=rse_id) rse_protocol = rse_core.get_rse_protocols(rse_id=rse_id) prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme) deleted_replicas = [] try: prot.connect() for replica in replicas: nothing_to_do = False try: # pfn = str(rsemgr.lfns2pfns(rse_settings=rse_info, # lfns=[{'scope': replica['scope'].external, 'name': replica['name'], 'path': replica['path']}], # operation='delete', scheme=scheme).values()[0]) pfn = 's3://%s%s%s' % (prot.attributes['hostname'], prot.attributes['prefix'], replica['name']) # logging.debug('Light Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, total_workers, replica['scope'], replica['name'], pfn, rse) start = time.time() prot.delete(pfn) duration = time.time() - start logging.info('Light Reaper %s-%s: Deletion SUCCESS of %s:%s as %s on %s in %s seconds', worker_number, total_workers, replica['scope'], replica['name'], pfn, rse, duration) payload = {'scope': replica['scope'].external, 'name': replica['name'], 'rse': rse, 'rse_id': rse_id, 'file-size': replica.get('bytes') or 0, 'bytes': replica.get('bytes') or 0, 'url': pfn, 'duration': duration, 'protocol': prot.attributes['scheme']} if replica['scope'].vo != 'def': payload['vo'] = replica['scope'].vo add_message('deletion-done', payload) deleted_replicas.append(replica) except SourceNotFound: err_msg = 'Light Reaper %s-%s: Deletion NOTFOUND of %s:%s as %s on %s' % (worker_number, total_workers, replica['scope'], replica['name'], pfn, rse) logging.warning(err_msg) deleted_replicas.append(replica) except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error: err_msg = 'Light Reaper %s-%s: Deletion NOACCESS of %s:%s as %s on %s: %s' % (worker_number, total_workers, replica['scope'], replica['name'], pfn, rse, str(error)) logging.warning(err_msg) payload = {'scope': replica['scope'].external, 'name': replica['name'], 'rse': rse, 'rse_id': rse_id, 'file-size': replica['bytes'] or 0, 'bytes': replica['bytes'] or 0, 'url': pfn, 'reason': str(error), 'protocol': prot.attributes['scheme']} if replica['scope'].vo != 'def': payload['vo'] = replica['scope'].vo add_message('deletion-failed', payload) except: logging.critical(traceback.format_exc()) finally: prot.close() delete_temporary_dids(dids=deleted_replicas) if once: break if once: break if nothing_to_do: logging.info('Light Reaper %s-%s: Nothing to do. I will sleep for 60s', worker_number, total_workers) time.sleep(60) except DatabaseException as error: logging.warning('Reaper: %s', str(error)) except: logging.critical(traceback.format_exc()) die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable) logging.info('Graceful stop requested') logging.info('Graceful stop done') return def stop(signum=None, frame=None): """ Graceful exit. """ GRACEFUL_STOP.set() def run(total_workers=1, chunk_size=100, once=False, rses=[], scheme=None, exclude_rses=None, include_rses=None, vos=None, delay_seconds=0): """ Starts up the reaper threads. :param total_workers: The total number of workers. :param chunk_size: the size of chunk for deletion. :param once: If True, only runs one iteration of the main loop. :param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs. (Single-VO only) :param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock. :param exclude_rses: RSE expression to exclude RSEs from the Reaper. :param include_rses: RSE expression to include RSEs. :param vos: VOs on which to look for RSEs. Only used in multi-VO mode. If None, we either use all VOs if run from "def", or the current VO otherwise. """ setup_logging() if rucio.db.sqla.util.is_old_db(): raise DatabaseException('Database was not updated, daemon won\'t start') logging.info('main: starting processes') multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False) if not multi_vo: if vos: logging.warning('Ignoring argument vos, this is only applicable in a multi-VO setup.') vos = ['def'] else: if vos: invalid = set(vos) - set([v['vo'] for v in list_vos()]) if invalid: msg = 'VO{} {} cannot be found'.format('s' if len(invalid) > 1 else '', ', '.join([repr(v) for v in invalid])) raise VONotFound(msg) else: vos = [v['vo'] for v in list_vos()] logging.info('Light Reaper: This instance will work on VO%s: %s' % ('s' if len(vos) > 1 else '', ', '.join([v for v in vos]))) all_rses = [] for vo in vos: all_rses.extend(rse_core.list_rses(filters={'vo': vo})) if rses: invalid = set(rses) - set([rse['rse'] for rse in all_rses]) if invalid: msg = 'RSE{} {} cannot be found'.format('s' if len(invalid) > 1 else '', ', '.join([repr(rse) for rse in invalid])) raise RSENotFound(msg) rses = [rse for rse in all_rses if rse['rse'] in rses] else: rses = all_rses if exclude_rses: excluded_rses = parse_expression(exclude_rses) rses = [rse for rse in rses if rse not in excluded_rses] if include_rses: included_rses = parse_expression(include_rses) rses = [rse for rse in rses if rse in included_rses] if not rses: logging.error('Light Reaper: No RSEs found. Exiting.') return threads = [] for worker in range(total_workers): kwargs = {'worker_number': worker, 'total_workers': total_workers, 'rses': rses, 'once': once, 'chunk_size': chunk_size, 'scheme': scheme} threads.append(threading.Thread(target=reaper, kwargs=kwargs, name='Worker: %s, Total_Workers: %s' % (worker, total_workers))) [t.start() for t in threads] while threads[0].is_alive(): [t.join(timeout=3.14) for t in threads]
demo.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved import argparse import glob import multiprocessing as mp import numpy as np import os import queue import threading import time from typing import NoReturn import cv2 import tqdm from detectron2.config import get_cfg from detectron2.data.detection_utils import read_image from detectron2.utils.logger import setup_logger from predictor import VisualizationDemo # constants WINDOW_NAME = "COCO detections" class VideoCapture(): def __init__(self, name: str) -> NoReturn: self.cap = cv2.VideoCapture(name) self.q = queue.Queue(1) t = threading.Thread(target=self._reader) t.daemon = True t.start() def _reader(self) -> NoReturn: while True: ret, frame = self.cap.read() if not ret: break if not self.q.empty(): try: self.q.get_nowait() # discard previous (unprocessed) frame except queue.Queue.Empty: pass self.q.put_nowait((ret, frame)) def isOpened(self) -> bool: return True def read(self) -> np.ndarray: return self.q.get() def setup_cfg(args): # load config from file and command-line arguments cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) # Set score_threshold for builtin models cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold cfg.freeze() return cfg def get_parser(): parser = argparse.ArgumentParser(description="Detectron2 demo for builtin models") parser.add_argument( "--config-file", default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml", metavar="FILE", help="path to config file", ) parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.") parser.add_argument("--video-input", help="Path to video file.") parser.add_argument( "--input", nargs="+", help="A list of space separated input images; " "or a single glob pattern such as 'directory/*.jpg'", ) parser.add_argument( "--output", help="A file or directory to save output visualizations. " "If not given, will show output in an OpenCV window.", ) parser.add_argument( "--confidence-threshold", type=float, default=0.5, help="Minimum score for instance predictions to be shown", ) parser.add_argument( "--opts", help="Modify config options using the command-line 'KEY VALUE' pairs", default=[], nargs=argparse.REMAINDER, ) return parser if __name__ == "__main__": mp.set_start_method("spawn", force=True) args = get_parser().parse_args() setup_logger(name="fvcore") logger = setup_logger() logger.info("Arguments: " + str(args)) cfg = setup_cfg(args) demo = VisualizationDemo(cfg) if args.input: if len(args.input) == 1: args.input = glob.glob(os.path.expanduser(args.input[0])) assert args.input, "The input path(s) was not found" for path in tqdm.tqdm(args.input, disable=not args.output): # use PIL, to be consistent with evaluation img = read_image(path, format="BGR") start_time = time.time() predictions, visualized_output = demo.run_on_image(img) logger.info( "{}: {} in {:.2f}s".format( path, "detected {} instances".format(len(predictions["instances"])) if "instances" in predictions else "finished", time.time() - start_time, ) ) if args.output: if os.path.isdir(args.output): assert os.path.isdir(args.output), args.output out_filename = os.path.join(args.output, os.path.basename(path)) else: assert len(args.input) == 1, "Please specify a directory with args.output" out_filename = args.output visualized_output.save(out_filename) else: cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL) cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1]) if cv2.waitKey(0) == 27: break # esc to quit elif args.webcam: assert args.input is None, "Cannot have both --input and --webcam!" # assert args.output is None, "output not yet supported with --webcam!" cam = VideoCapture("rtsp://admin:admin1234@192.168.0.70:554/live") for vis in tqdm.tqdm(demo.run_on_video(cam)): cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL) cv2.imshow(WINDOW_NAME, vis) if cv2.waitKey(1) == 27: break # esc to quit cv2.destroyAllWindows() elif args.video_input: video = cv2.VideoCapture(args.video_input) width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) frames_per_second = video.get(cv2.CAP_PROP_FPS) num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) basename = os.path.basename(args.video_input) if args.output: if os.path.isdir(args.output): output_fname = os.path.join(args.output, basename) output_fname = os.path.splitext(output_fname)[0] + ".mkv" else: output_fname = args.output assert not os.path.isfile(output_fname), output_fname output_file = cv2.VideoWriter( filename=output_fname, # some installation of opencv may not support x264 (due to its license), # you can try other format (e.g. MPEG) fourcc=cv2.VideoWriter_fourcc(*"x264"), fps=float(frames_per_second), frameSize=(width, height), isColor=True, ) assert os.path.isfile(args.video_input) for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames): if args.output: output_file.write(vis_frame) else: cv2.namedWindow(basename, cv2.WINDOW_NORMAL) cv2.imshow(basename, vis_frame) if cv2.waitKey(1) == 27: break # esc to quit video.release() if args.output: output_file.release() else: cv2.destroyAllWindows()
test_execute.py
# pylint doesn't know about pytest fixtures # pylint: disable=unused-argument import os from threading import Thread from unittest import mock import pytest from dagster_celery_tests.repo import COMPOSITE_DEPTH from dagster import ( CompositeSolidExecutionResult, PipelineExecutionResult, SolidExecutionResult, execute_pipeline, execute_pipeline_iterator, ) from dagster.core.definitions.reconstruct import ReconstructablePipeline from dagster.core.errors import DagsterSubprocessError from dagster.core.events import DagsterEventType from dagster.utils import send_interrupt from .utils import ( # isort:skip execute_eagerly_on_celery, execute_pipeline_on_celery, events_of_type, REPO_FILE, ) def test_execute_on_celery_default(dagster_celery_worker): with execute_pipeline_on_celery("test_pipeline") as result: assert result.result_for_solid("simple").output_value() == 1 assert len(result.step_event_list) == 4 assert len(events_of_type(result, "STEP_START")) == 1 assert len(events_of_type(result, "STEP_OUTPUT")) == 1 assert len(events_of_type(result, "HANDLED_OUTPUT")) == 1 assert len(events_of_type(result, "STEP_SUCCESS")) == 1 def test_execute_serial_on_celery(dagster_celery_worker): with execute_pipeline_on_celery("test_serial_pipeline") as result: assert result.result_for_solid("simple").output_value() == 1 assert result.result_for_solid("add_one").output_value() == 2 assert len(result.step_event_list) == 10 assert len(events_of_type(result, "STEP_START")) == 2 assert len(events_of_type(result, "STEP_INPUT")) == 1 assert len(events_of_type(result, "STEP_OUTPUT")) == 2 assert len(events_of_type(result, "HANDLED_OUTPUT")) == 2 assert len(events_of_type(result, "LOADED_INPUT")) == 1 assert len(events_of_type(result, "STEP_SUCCESS")) == 2 def test_execute_diamond_pipeline_on_celery(dagster_celery_worker): with execute_pipeline_on_celery("test_diamond_pipeline") as result: assert result.result_for_solid("emit_values").output_values == { "value_one": 1, "value_two": 2, } assert result.result_for_solid("add_one").output_value() == 2 assert result.result_for_solid("renamed").output_value() == 3 assert result.result_for_solid("subtract").output_value() == -1 def test_execute_parallel_pipeline_on_celery(dagster_celery_worker): with execute_pipeline_on_celery("test_parallel_pipeline") as result: assert len(result.solid_result_list) == 11 def test_execute_composite_pipeline_on_celery(dagster_celery_worker): with execute_pipeline_on_celery("composite_pipeline") as result: assert result.success assert isinstance(result, PipelineExecutionResult) assert len(result.solid_result_list) == 1 composite_solid_result = result.solid_result_list[0] assert len(composite_solid_result.solid_result_list) == 2 for r in composite_solid_result.solid_result_list: assert isinstance(r, CompositeSolidExecutionResult) composite_solid_results = composite_solid_result.solid_result_list for i in range(COMPOSITE_DEPTH): next_level = [] assert len(composite_solid_results) == pow(2, i + 1) for res in composite_solid_results: assert isinstance(res, CompositeSolidExecutionResult) for r in res.solid_result_list: next_level.append(r) composite_solid_results = next_level assert len(composite_solid_results) == pow(2, COMPOSITE_DEPTH + 1) assert all( (isinstance(r, SolidExecutionResult) and r.success for r in composite_solid_results) ) def test_execute_optional_outputs_pipeline_on_celery(dagster_celery_worker): with execute_pipeline_on_celery("test_optional_outputs") as result: assert len(result.solid_result_list) == 4 assert sum([int(x.skipped) for x in result.solid_result_list]) == 2 assert sum([int(x.success) for x in result.solid_result_list]) == 2 def test_execute_fails_pipeline_on_celery(dagster_celery_worker): with execute_pipeline_on_celery("test_fails") as result: assert len(result.solid_result_list) == 2 # fail & skip assert not result.result_for_solid("fails").success assert ( "Exception: argjhgjh\n" in result.result_for_solid("fails").failure_data.error.cause.message ) assert result.result_for_solid("should_never_execute").skipped def test_terminate_pipeline_on_celery(dagster_celery_worker, instance, tempdir): pipeline_def = ReconstructablePipeline.for_file(REPO_FILE, "interrupt_pipeline") run_config = { "resources": {"io_manager": {"config": {"base_dir": tempdir}}}, "execution": {"celery": {}}, } results = [] result_types = [] interrupt_thread = None for result in execute_pipeline_iterator( pipeline=pipeline_def, run_config=run_config, instance=instance, ): # Interrupt once the first step starts if result.event_type == DagsterEventType.STEP_START and not interrupt_thread: interrupt_thread = Thread(target=send_interrupt, args=()) interrupt_thread.start() results.append(result) result_types.append(result.event_type) interrupt_thread.join() # At least one step succeeded (the one that was running when the interrupt fired) assert DagsterEventType.STEP_SUCCESS in result_types # At least one step was revoked (and there were no step failure events) revoke_steps = [ result for result in results if result.event_type == DagsterEventType.ENGINE_EVENT and "was revoked." in result.message ] assert len(revoke_steps) > 0 # The overall pipeline failed assert DagsterEventType.PIPELINE_FAILURE in result_types def test_execute_eagerly_on_celery(instance): with execute_eagerly_on_celery("test_pipeline", instance=instance) as result: assert result.result_for_solid("simple").output_value() == 1 assert len(result.step_event_list) == 4 assert len(events_of_type(result, "STEP_START")) == 1 assert len(events_of_type(result, "STEP_OUTPUT")) == 1 assert len(events_of_type(result, "HANDLED_OUTPUT")) == 1 assert len(events_of_type(result, "STEP_SUCCESS")) == 1 events = instance.all_logs(result.run_id) start_markers = {} end_markers = {} for event in events: dagster_event = event.dagster_event if dagster_event and dagster_event.is_engine_event: if dagster_event.engine_event_data.marker_start: key = "{step}.{marker}".format( step=event.step_key, marker=dagster_event.engine_event_data.marker_start ) start_markers[key] = event.timestamp if dagster_event.engine_event_data.marker_end: key = "{step}.{marker}".format( step=event.step_key, marker=dagster_event.engine_event_data.marker_end ) end_markers[key] = event.timestamp seen = set() assert set(start_markers.keys()) == set(end_markers.keys()) for key in end_markers: assert end_markers[key] - start_markers[key] > 0 seen.add(key) def test_execute_eagerly_serial_on_celery(): with execute_eagerly_on_celery("test_serial_pipeline") as result: assert result.result_for_solid("simple").output_value() == 1 assert result.result_for_solid("add_one").output_value() == 2 assert len(result.step_event_list) == 10 assert len(events_of_type(result, "STEP_START")) == 2 assert len(events_of_type(result, "STEP_INPUT")) == 1 assert len(events_of_type(result, "STEP_OUTPUT")) == 2 assert len(events_of_type(result, "HANDLED_OUTPUT")) == 2 assert len(events_of_type(result, "LOADED_INPUT")) == 1 assert len(events_of_type(result, "STEP_SUCCESS")) == 2 def test_execute_eagerly_diamond_pipeline_on_celery(): with execute_eagerly_on_celery("test_diamond_pipeline") as result: assert result.result_for_solid("emit_values").output_values == { "value_one": 1, "value_two": 2, } assert result.result_for_solid("add_one").output_value() == 2 assert result.result_for_solid("renamed").output_value() == 3 assert result.result_for_solid("subtract").output_value() == -1 def test_execute_eagerly_diamond_pipeline_subset_on_celery(): with execute_eagerly_on_celery("test_diamond_pipeline", subset=["emit_values"]) as result: assert result.result_for_solid("emit_values").output_values == { "value_one": 1, "value_two": 2, } assert len(result.solid_result_list) == 1 def test_execute_eagerly_parallel_pipeline_on_celery(): with execute_eagerly_on_celery("test_parallel_pipeline") as result: assert len(result.solid_result_list) == 11 def test_execute_eagerly_composite_pipeline_on_celery(): with execute_eagerly_on_celery("composite_pipeline") as result: assert result.success assert isinstance(result, PipelineExecutionResult) assert len(result.solid_result_list) == 1 composite_solid_result = result.solid_result_list[0] assert len(composite_solid_result.solid_result_list) == 2 for r in composite_solid_result.solid_result_list: assert isinstance(r, CompositeSolidExecutionResult) composite_solid_results = composite_solid_result.solid_result_list for i in range(COMPOSITE_DEPTH): next_level = [] assert len(composite_solid_results) == pow(2, i + 1) for res in composite_solid_results: assert isinstance(res, CompositeSolidExecutionResult) for r in res.solid_result_list: next_level.append(r) composite_solid_results = next_level assert len(composite_solid_results) == pow(2, COMPOSITE_DEPTH + 1) assert all( (isinstance(r, SolidExecutionResult) and r.success for r in composite_solid_results) ) def test_execute_eagerly_optional_outputs_pipeline_on_celery(): with execute_eagerly_on_celery("test_optional_outputs") as result: assert len(result.solid_result_list) == 4 assert sum([int(x.skipped) for x in result.solid_result_list]) == 2 assert sum([int(x.success) for x in result.solid_result_list]) == 2 def test_execute_eagerly_resources_limit_pipeline_on_celery(): with execute_eagerly_on_celery("test_resources_limit") as result: assert result.result_for_solid("resource_req_solid").success assert result.success def test_execute_eagerly_fails_pipeline_on_celery(): with execute_eagerly_on_celery("test_fails") as result: assert len(result.solid_result_list) == 2 assert not result.result_for_solid("fails").success assert ( "Exception: argjhgjh\n" in result.result_for_solid("fails").failure_data.error.cause.message ) assert result.result_for_solid("should_never_execute").skipped def test_execute_eagerly_retries_pipeline_on_celery(): with execute_eagerly_on_celery("test_retries") as result: assert len(events_of_type(result, "STEP_START")) == 1 assert len(events_of_type(result, "STEP_UP_FOR_RETRY")) == 1 assert len(events_of_type(result, "STEP_RESTARTED")) == 1 assert len(events_of_type(result, "STEP_FAILURE")) == 1 def test_engine_error(instance, tempdir): with mock.patch( "dagster.core.execution.context.system.PlanData.raise_on_error", return_value=True, ): with pytest.raises(DagsterSubprocessError): storage = os.path.join(tempdir, "flakey_storage") execute_pipeline( ReconstructablePipeline.for_file(REPO_FILE, "engine_error"), run_config={ "resources": {"io_manager": {"config": {"base_dir": storage}}}, "execution": { "celery": {"config": {"config_source": {"task_always_eager": True}}} }, "solids": {"destroy": {"config": storage}}, }, instance=instance, ) def test_memoization_celery_executor(instance, dagster_celery_worker): with execute_pipeline_on_celery( "bar_pipeline", instance=instance, run_config={"execution": {"celery": {}}} ) as result: assert result.success assert result.output_for_solid("bar_solid") == "bar" with execute_pipeline_on_celery( "bar_pipeline", instance=instance, run_config={"execution": {"celery": {}}} ) as result: assert result.success assert len(result.step_event_list) == 0
scylla_node.py
# ccm node from __future__ import with_statement import datetime import errno import os import signal import shutil import socket import stat import subprocess import time import threading import psutil import yaml from six import print_ from ccmlib import common from ccmlib.node import Node from ccmlib.node import NodeError def wait_for(func, timeout, first=0.0, step=1.0, text=None): """ Wait until func() evaluates to True. If func() evaluates to True before timeout expires, return the value of func(). Otherwise return None. :param func: Function that will be evaluated. :param timeout: Timeout in seconds :param first: Time to sleep before first attempt :param step: Time to sleep between attempts in seconds :param text: Text to print while waiting, for debug purposes """ start_time = time.time() end_time = time.time() + timeout time.sleep(first) while time.time() < end_time: if text: print_("%s (%f secs)" % (text, (time.time() - start_time))) output = func() if output: return output time.sleep(step) return None class ScyllaNode(Node): """ Provides interactions to a Scylla node. """ def __init__(self, name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save=True, binary_interface=None): super(ScyllaNode, self).__init__(name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save, binary_interface) self.__global_log_level = 'info' self.__classes_log_level = {} self.get_cassandra_version() self._process_jmx = None self._process_jmx_waiter = None self._process_scylla = None self._process_scylla_waiter = None def get_install_cassandra_root(self): return os.path.join(self.get_install_dir(), 'resources', 'cassandra') def get_node_cassandra_root(self): return os.path.join(self.get_path()) def get_conf_dir(self): """ Returns the path to the directory where Cassandra config are located """ return os.path.join(self.get_path(), 'conf') def get_tool(self, toolname): return common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', toolname) def get_tool_args(self, toolname): raise NotImplementedError('ScyllaNode.get_tool_args') def get_env(self): return common.make_cassandra_env(self.get_install_cassandra_root(), self.get_node_cassandra_root()) def get_cassandra_version(self): # TODO: Handle versioning return '2.2' def set_log_level(self, new_level, class_name=None): known_level = {'TRACE' : 'trace', 'DEBUG' : 'debug', 'INFO' : 'info', 'WARN' : 'warn', 'ERROR' : 'error', 'OFF' : 'info'} if not known_level.has_key(new_level): raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level))) new_log_level = known_level[new_level] # TODO class_name can be validated against help-loggers if class_name: self.__classes_log_level[class_name] = new_log_level else: self.__global_log_level = new_log_level return self def set_workload(self, workload): raise NotImplementedError('ScyllaNode.set_workload') def cpuset(self, id, count, cluster_id): # leaving one core for other executables to run allocated_cpus = psutil.cpu_count() - 1 start_id = (id * count + cluster_id) % allocated_cpus cpuset = [] for cpuid in xrange(start_id, start_id + count): cpuset.append(str(cpuid % allocated_cpus)) return cpuset def _wait_for_jmx(self): if self._process_jmx: self._process_jmx.wait() def _wait_for_scylla(self): if self._process_scylla: self._process_scylla.wait() def _start_jmx(self, data): jmx_jar_dir = os.path.join(self.get_path(), 'bin') jmx_java_bin = os.path.join(jmx_jar_dir, 'symlinks', 'scylla-jmx') jmx_jar = os.path.join(jmx_jar_dir, 'scylla-jmx-1.0.jar') args = [jmx_java_bin, '-Dapiaddress=%s' % data['listen_address'], '-Djavax.management.builder.initial=com.scylladb.jmx.utils.APIBuilder', '-Dcom.sun.management.jmxremote', '-Dcom.sun.management.jmxremote.port=%s' % self.jmx_port, '-Dcom.sun.management.jmxremote.rmi.port=%s' % self.jmx_port, '-Dcom.sun.management.jmxremote.local.only=false', '-Xmx256m', '-XX:+UseSerialGC', '-Dcom.sun.management.jmxremote.authenticate=false', '-Dcom.sun.management.jmxremote.ssl=false', '-jar', jmx_jar] log_file = os.path.join(self.get_path(), 'logs', 'system.log.jmx') jmx_log = open(log_file, 'a') env_copy = os.environ env_copy['SCYLLA_HOME'] = self.get_path() self._process_jmx = subprocess.Popen(args, stdout=jmx_log, stderr=jmx_log, close_fds=True, env=env_copy) self._process_jmx.poll() # When running on ccm standalone, the waiter thread would block # the create commands. Besides in that mode, waiting is unnecessary, # since the original popen reference is garbage collected. standalone = os.environ.get('SCYLLA_CCM_STANDALONE', None) if standalone is None: self._process_jmx_waiter = threading.Thread(target=self._wait_for_jmx) self._process_jmx_waiter.start() pid_filename = os.path.join(self.get_path(), 'scylla-jmx.pid') with open(pid_filename, 'w') as pid_file: pid_file.write(str(self._process_jmx.pid)) def _start_scylla(self, args, marks, update_pid, wait_other_notice, wait_for_binary_proto): log_file = os.path.join(self.get_path(), 'logs', 'system.log') # In case we are restarting a node # we risk reading the old cassandra.pid file self._delete_old_pid() scylla_log = open(log_file, 'a') env_copy = os.environ env_copy['SCYLLA_HOME'] = self.get_path() self._process_scylla = subprocess.Popen(args, stdout=scylla_log, stderr=scylla_log, close_fds=True, env=env_copy) self._process_scylla.poll() # When running on ccm standalone, the waiter thread would block # the create commands. Besides in that mode, waiting is unnecessary, # since the original popen reference is garbage collected. standalone = os.environ.get('SCYLLA_CCM_STANDALONE', None) if standalone is None: self._process_scylla_waiter = threading.Thread(target=self._wait_for_scylla) self._process_scylla_waiter.start() pid_filename = os.path.join(self.get_path(), 'cassandra.pid') with open(pid_filename, 'w') as pid_file: pid_file.write(str(self._process_scylla.pid)) if update_pid: self._update_pid(self._process_scylla) if not self.is_running(): raise NodeError("Error starting node %s" % self.name, self._process_scylla) if wait_other_notice: for node, mark in marks: node.watch_log_for_alive(self, from_mark=mark) if wait_for_binary_proto: self.wait_for_binary_interface(from_mark=self.mark) else: time.sleep(2) return self._process_scylla def _wait_java_up(self, data): java_up = False iteration = 0 while not java_up and iteration < 30: iteration += 1 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.settimeout(1.0) s.connect((data['listen_address'], int(self.jmx_port))) java_up = True except: java_up = False try: s.close() except: pass time.sleep(1) return java_up # Scylla Overload start def start(self, join_ring=True, no_wait=False, verbose=False, update_pid=True, wait_other_notice=False, replace_token=None, replace_address=None, jvm_args=None, wait_for_binary_proto=False, profile_options=None, use_jna=False, quiet_start=False): """ Start the node. Options includes: - join_ring: if false, start the node with -Dcassandra.join_ring=False - no_wait: by default, this method returns when the node is started and listening to clients. If no_wait=True, the method returns sooner. - wait_other_notice: if True, this method returns only when all other live node of the cluster have marked this node UP. - replace_token: start the node with the -Dcassandra.replace_token option. - replace_address: start the node with the -Dcassandra.replace_address option. """ if jvm_args is None: jvm_args = [] scylla_cassandra_mapping = {'-Dcassandra.replace_address_first_boot': '--replace-address-first-boot'} # Replace args in the form # ['-Dcassandra.foo=bar'] to ['-Dcassandra.foo', 'bar'] translated_args = [] new_jvm_args = [] for jvm_arg in jvm_args: if '=' in jvm_arg: split_option = jvm_arg.split("=") e_msg = ("Option %s not in the form '-Dcassandra.foo=bar'. " "Please check your test" % jvm_arg) assert len(split_option) == 2, e_msg option, value = split_option # If we have information on how to translate the jvm option, # translate it if option in scylla_cassandra_mapping: translated_args += [scylla_cassandra_mapping[option], value] # Otherwise, just pass it as is else: new_jvm_args.append(jvm_arg) else: new_jvm_args.append(jvm_arg) jvm_args = new_jvm_args if self.is_running(): raise NodeError("%s is already running" % self.name) for itf in list(self.network_interfaces.values()): if itf is not None and replace_address is None: common.check_socket_available(itf) marks = [] if wait_other_notice: marks = [(node, node.mark_log()) for node in list(self.cluster.nodes.values()) if node.is_running()] self.mark = self.mark_log() launch_bin = common.join_bin(self.get_path(), 'bin', 'scylla') options_file = os.path.join(self.get_path(), 'conf', 'scylla.yaml') os.chmod(launch_bin, os.stat(launch_bin).st_mode | stat.S_IEXEC) # TODO: we do not support forcing specific settings # TODO: workaround for api-address as we do not load it # from config file scylla#59 conf_file = os.path.join(self.get_conf_dir(), common.SCYLLA_CONF) with open(conf_file, 'r') as f: data = yaml.load(f) jvm_args = jvm_args + ['--api-address', data['api_address']] jvm_args = jvm_args + ['--collectd-hostname', '%s.%s' % (socket.gethostname(), self.name)] # Let's add jvm_args and the translated args args = [launch_bin, '--options-file', options_file, '--log-to-stdout', '1'] + jvm_args + translated_args # Lets search for default overrides in SCYLLA_EXT_OPTS scylla_ext_opts = os.getenv('SCYLLA_EXT_OPTS', "").split() opts_i = 0 orig_args = list(args) while opts_i < len(scylla_ext_opts): if scylla_ext_opts[opts_i].startswith("--scylla-manager="): opts_i += 1 elif scylla_ext_opts[opts_i].startswith('-'): add = False if scylla_ext_opts[opts_i] not in orig_args: add = True args.append(scylla_ext_opts[opts_i]) opts_i += 1 while opts_i < len(scylla_ext_opts) and not scylla_ext_opts[opts_i].startswith('-'): if add: args.append(scylla_ext_opts[opts_i]) opts_i += 1 if '--developer-mode' not in args: args += ['--developer-mode', 'true'] if '--smp' not in args: args += ['--smp', '1'] if '--memory' not in args: args += ['--memory', '512M'] if '--default-log-level' not in args: args += ['--default-log-level', self.__global_log_level] # TODO add support for classes_log_level if '--collectd' not in args: args += ['--collectd', '0'] if '--cpuset' not in args: smp = int(args[args.index('--smp') + 1]) id = int(data['listen_address'].split('.')[3]) - 1 cpuset = self.cpuset(id, smp, self.cluster.id) args += ['--cpuset', ','.join(cpuset)] if '--prometheus-address' not in args: args += ['--prometheus-address', data['api_address']] if replace_address: args += ['--replace-address', replace_address] args += ['--unsafe-bypass-fsync', '1'] scylla_process = self._start_scylla(args, marks, update_pid, wait_other_notice, wait_for_binary_proto) self._start_jmx(data) if not self._wait_java_up(data): e_msg = ("Error starting node %s: unable to connect to scylla-jmx" % self.name) raise NodeError(e_msg, scylla_process) self.is_running() return scylla_process def start_dse(self, join_ring=True, no_wait=False, verbose=False, update_pid=True, wait_other_notice=False, replace_token=None, replace_address=None, jvm_args=None, wait_for_binary_proto=False, profile_options=None, use_jna=False): """ Start the node. Options includes: - join_ring: if false, start the node with -Dcassandra.join_ring=False - no_wait: by default, this method returns when the node is started and listening to clients. If no_wait=True, the method returns sooner. - wait_other_notice: if True, this method returns only when all other live node of the cluster have marked this node UP. - replace_token: start the node with the -Dcassandra.replace_token option. - replace_address: start the node with the -Dcassandra.replace_address option. """ if jvm_args is None: jvm_args = [] raise NotImplementedError('ScyllaNode.start_dse') def _update_jmx_pid(self): pidfile = os.path.join(self.get_path(), 'scylla-jmx.pid') start = time.time() while not (os.path.isfile(pidfile) and os.stat(pidfile).st_size > 0): if time.time() - start > 30.0: print_("Timed out waiting for pidfile to be filled " "(current time is %s)" % (datetime.datetime.now())) break else: time.sleep(0.1) try: with open(pidfile, 'r') as f: self.jmx_pid = int(f.readline().strip()) except IOError as e: raise NodeError('Problem starting node %s scylla-jmx due to %s' % (self.name, e)) def stop(self, wait=True, wait_other_notice=False, gently=True): """ Stop the node. - wait: if True (the default), wait for the Scylla process to be really dead. Otherwise return after having sent the kill signal. - wait_other_notice: return only when the other live nodes of the cluster have marked this node has dead. - gently: Let Scylla and Scylla JMX clean up and shut down properly. Otherwise do a 'kill -9' which shuts down faster. """ marks = [] if self.is_running(): if wait_other_notice: marks = [(node, node.mark_log()) for node in list(self.cluster.nodes.values()) if node.is_live() and node is not self] self._update_jmx_pid() if self._process_jmx and self._process_scylla: if gently: try: self._process_jmx.terminate() except OSError as e: pass try: self._process_scylla.terminate() except OSError as e: pass else: try: self._process_jmx.kill() except OSError as e: pass try: self._process_scylla.kill() except OSError as e: pass else: signal_mapping = {True: signal.SIGTERM, False: signal.SIGKILL} for pid in [self.jmx_pid, self.pid]: try: os.kill(pid, signal_mapping[gently]) except OSError: pass if wait_other_notice: for node, mark in marks: node.watch_log_for_death(self, from_mark=mark) else: time.sleep(.1) still_running = self.is_running() if still_running and wait: wait_time_sec = 1 for i in xrange(0, 7): time.sleep(wait_time_sec) if not self.is_running(): return True wait_time_sec *= 2 raise NodeError("Problem stopping node %s" % self.name) else: return True else: return False def import_config_files(self): # TODO: override node - enable logging self._update_config() self.copy_config_files() self.__update_yaml() self.__copy_logback_files() def __copy_logback_files(self): shutil.copy(os.path.join(self.get_install_dir(), common.DSE_CASSANDRA_CONF_DIR, 'logback-tools.xml'), os.path.join(self.get_conf_dir(), 'logback-tools.xml')) def import_dse_config_files(self): raise NotImplementedError('ScyllaNode.import_dse_config_files') def copy_config_files_dse(self): raise NotImplementedError('ScyllaNode.copy_config_files_dse') def hard_link_or_copy(self, src, dst): try: os.link(src, dst) except OSError as oserror: if oserror.errno == errno.EXDEV or oserror.errno == errno.EMLINK: shutil.copy(src, dst) else: raise RuntimeError("Unable to create hard link from %s to %s: %s" % (src, dst, oserror)) def import_bin_files(self): # selectively copying files to reduce risk of using unintended items files = ['cassandra.in.sh', 'nodetool'] os.makedirs(os.path.join(self.get_path(), 'resources', 'cassandra', 'bin')) for name in files: self.hard_link_or_copy(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'bin', name), os.path.join(self.get_path(), 'resources', 'cassandra', 'bin', name)) # selectively copying files to reduce risk of using unintended items files = ['sstabledump', 'sstablelevelreset', 'sstablemetadata', 'sstablerepairedset', 'sstablesplit'] os.makedirs(os.path.join(self.get_path(), 'resources', 'cassandra', 'tools', 'bin')) for name in files: self.hard_link_or_copy(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'tools', 'bin', name), os.path.join(self.get_path(), 'resources', 'cassandra', 'tools', 'bin', name)) # TODO: - currently no scripts only executable - copying exec scylla_mode = self.cluster.get_scylla_mode() self.hard_link_or_copy(os.path.join(self.get_install_dir(), 'build', scylla_mode, 'scylla'), os.path.join(self.get_bin_dir(), 'scylla')) self.hard_link_or_copy(os.path.join(self.get_install_dir(), '..', 'scylla-jmx', 'target', 'scylla-jmx-1.0.jar'), os.path.join(self.get_bin_dir(), 'scylla-jmx-1.0.jar')) self.hard_link_or_copy(os.path.join(self.get_install_dir(), '..', 'scylla-jmx', 'scripts', 'scylla-jmx'), os.path.join(self.get_bin_dir(), 'scylla-jmx')) os.makedirs(os.path.join(self.get_bin_dir(), 'symlinks')) os.symlink('/usr/bin/java', os.path.join(self.get_bin_dir(), 'symlinks', 'scylla-jmx')) parent_dir = os.path.dirname(os.path.realpath(__file__)) resources_bin_dir = os.path.join(parent_dir, '..', 'resources', 'bin') for name in os.listdir(resources_bin_dir): filename = os.path.join(resources_bin_dir, name) if os.path.isfile(filename): shutil.copy(filename, self.get_bin_dir()) common.add_exec_permission(self.get_bin_dir(), name) def _save(self): # TODO: - overwrite node self.__update_yaml() self._update_config() def __update_yaml(self): # TODO: copied from node.py conf_file = os.path.join(self.get_conf_dir(), common.SCYLLA_CONF) with open(conf_file, 'r') as f: data = yaml.load(f) data['cluster_name'] = self.cluster.name data['auto_bootstrap'] = self.auto_bootstrap data['initial_token'] = self.initial_token if (not self.cluster.use_vnodes and self.get_base_cassandra_version() >= 1.2): data['num_tokens'] = 1 if 'seeds' in data: # cassandra 0.7 data['seeds'] = self.cluster.get_seeds() else: # cassandra 0.8 data['seed_provider'][0]['parameters'][0]['seeds'] = ( ','.join(self.cluster.get_seeds())) data['listen_address'], data['storage_port'] = ( self.network_interfaces['storage']) data['rpc_address'], data['rpc_port'] = ( self.network_interfaces['thrift']) if (self.network_interfaces['binary'] is not None and self.get_base_cassandra_version() >= 1.2): _, data['native_transport_port'] = self.network_interfaces['binary'] data['data_file_directories'] = [os.path.join(self.get_path(), 'data')] data['commitlog_directory'] = os.path.join(self.get_path(), 'commitlogs') data['hints_directory'] = os.path.join(self.get_path(), 'hints') data['saved_caches_directory'] = os.path.join(self.get_path(), 'saved_caches') if self.cluster.partitioner: data['partitioner'] = self.cluster.partitioner # TODO: add scylla options data['api_address'] = data['listen_address'] # last win and we want node options to win full_options = dict(self.cluster._config_options.items() + self.get_config_options().items()) for name in full_options: value = full_options[name] if value is None: try: del data[name] except KeyError: # it is fine to remove a key not there:w pass else: try: if isinstance(data[name], dict): for option in full_options[name]: data[name][option] = full_options[name][option] else: data[name] = full_options[name] except KeyError: data[name] = full_options[name] with open(conf_file, 'w') as f: yaml.safe_dump(data, f, default_flow_style=False) # TODO: - for now create a cassandra conf file leaving only # cassandra config items - this should be removed once tools are # updated to remove scylla conf and use a shrunk version cassandra_conf_file = os.path.join(self.get_conf_dir(), common.CASSANDRA_CONF) cassandra_conf_items = {'authenticator': 0, 'authorizer': 0, 'auto_snapshot': 0, 'batch_size_warn_threshold_in_kb': 0, 'batchlog_replay_throttle_in_kb': 0, 'broadcast_address': 0, 'broadcast_rpc_address': 0, 'cas_contention_timeout_in_ms': 0, 'client_encryption_options': 0, 'cluster_name': 0, 'column_index_size_in_kb': 0, 'commit_failure_policy': 0, 'commitlog_directory': 0, 'commitlog_segment_size_in_mb': 0, 'commitlog_sync': 0, 'commitlog_sync_batch_window_in_ms': 0, 'commitlog_sync_period_in_ms': 0, 'commitlog_total_space_in_mb': 0, 'compaction_large_partition_warning_threshold_mb': 0, 'compaction_throughput_mb_per_sec': 0, 'concurrent_compactors': 0, 'concurrent_counter_writes': 0, 'concurrent_reads': 0, 'concurrent_writes': 0, 'counter_cache_keys_to_save': 0, 'counter_cache_save_period': 0, 'counter_cache_size_in_mb': 0, 'counter_write_request_timeout_in_ms': 0, 'cross_node_timeout': 0, 'data_file_directories': 0, 'disk_failure_policy': 0, 'dynamic_snitch_badness_threshold': 0, 'dynamic_snitch_reset_interval_in_ms': 0, 'dynamic_snitch_update_interval_in_ms': 0, 'endpoint_snitch': 0, 'file_cache_size_in_mb': 0, 'hinted_handoff_enabled': 0, 'hinted_handoff_throttle_in_kb': 0, 'incremental_backups': 0, 'index_summary_capacity_in_mb': 0, 'index_summary_resize_interval_in_minutes': 0, 'inter_dc_stream_throughput_outbound_megabits_per_sec': 0, 'inter_dc_tcp_nodelay': 0, 'internode_authenticator': 0, 'internode_compression': 0, 'key_cache_keys_to_save': 0, 'key_cache_save_period': 0, 'key_cache_size_in_mb': 0, 'listen_address': 0, 'listen_interface': 0, 'listen_interface_prefer_ipv6': 0, 'max_hint_window_in_ms': 0, 'max_hints_delivery_threads': 0, 'memory_allocator': 0, 'memtable_allocation_type': 0, 'memtable_cleanup_threshold': 0, 'memtable_flush_writers': 0, 'memtable_heap_space_in_mb': 0, 'memtable_offheap_space_in_mb': 0, 'native_transport_max_concurrent_connections': 0, 'native_transport_max_concurrent_connections_per_ip': 0, 'native_transport_max_frame_size_in_mb': 0, 'native_transport_max_threads': 0, 'native_transport_port': 0, 'num_tokens': 0, 'partitioner': 0, 'permissions_validity_in_ms': 0, 'phi_convict_threshold': 0, 'range_request_timeout_in_ms': 0, 'read_request_timeout_in_ms': 0, 'request_scheduler': 0, 'request_scheduler_id': 0, 'request_scheduler_options': 0, 'request_timeout_in_ms': 0, 'row_cache_keys_to_save': 0, 'row_cache_save_period': 0, 'row_cache_size_in_mb': 0, 'rpc_address': 0, 'rpc_interface': 0, 'rpc_interface_prefer_ipv6': 0, 'rpc_keepalive': 0, 'rpc_max_threads': 0, 'rpc_min_threads': 0, 'rpc_port': 0, 'rpc_recv_buff_size_in_bytes': 0, 'rpc_send_buff_size_in_bytes': 0, 'rpc_server_type': 0, 'seed_provider': 0, 'server_encryption_options': 0, 'snapshot_before_compaction': 0, 'ssl_storage_port': 0, 'sstable_preemptive_open_interval_in_mb': 0, 'start_native_transport': 0, 'start_rpc': 0, 'storage_port': 0, 'stream_throughput_outbound_megabits_per_sec': 0, 'streaming_socket_timeout_in_ms': 0, 'thrift_framed_transport_size_in_mb': 0, 'tombstone_failure_threshold': 0, 'tombstone_warn_threshold': 0, 'trickle_fsync': 0, 'trickle_fsync_interval_in_kb': 0, 'truncate_request_timeout_in_ms': 0, 'write_request_timeout_in_ms': 0} cassandra_data = {} for key in data: if key in cassandra_conf_items: cassandra_data[key] = data[key] with open(cassandra_conf_file, 'w') as f: yaml.safe_dump(cassandra_data, f, default_flow_style=False) def __update_yaml_dse(self): raise NotImplementedError('ScyllaNode.__update_yaml_dse') def _update_log4j(self): raise NotImplementedError('ScyllaNode._update_log4j') def __generate_server_xml(self): raise NotImplementedError('ScyllaNode.__generate_server_xml') def _get_directories(self): dirs = {} for i in ['data', 'commitlogs', 'bin', 'conf', 'logs','hints']: dirs[i] = os.path.join(self.get_path(), i) return dirs def _copy_agent(self): raise NotImplementedError('ScyllaNode._copy_agent') def _start_agent(self): raise NotImplementedError('ScyllaNode._start_agent') def _stop_agent(self): raise NotImplementedError('ScyllaNode._stop_agent') def _write_agent_address_yaml(self, agent_dir): raise NotImplementedError('ScyllaNode._write_agent_address_yaml') def _write_agent_log4j_properties(self, agent_dir): raise NotImplementedError('ScyllaNode._write_agent_log4j_properties') def _wait_no_pending_flushes(self, wait_timeout=60): def no_pending_flushes(): stdout, _ = self.nodetool('cfstats') pending_flushes = False for line in stdout.splitlines(): line = line.strip() if line.startswith('Pending flushes'): _, pending_flushes_str = line.split(':') pending_flushes_count = int(pending_flushes_str.strip()) if pending_flushes_count > 0: pending_flushes = True return not pending_flushes result = wait_for(no_pending_flushes, timeout=wait_timeout, step=1.0) if result is None: raise NodeError("Node %s still has pending flushes after " "%s seconds" % (self.name, wait_timeout)) def flush(self): self.nodetool("flush") self._wait_no_pending_flushes()
foo.py
# Python 3.3.3 and 2.7.6 # python fo.py from threading import Thread, Lock # Potentially useful thing: # In Python you "import" a global variable, instead of "export"ing it when you declare it # (This is probably an effort to make you feel bad about typing the word "global") i = 0 mutex = Lock() def incrementingFunction(): global i # TODO: increment i 1_000_000 times for j in range(1000000): mutex.acquire(1) i = i + 1 mutex.release() def decrementingFunction(): global i # TODO: decrement i 1_000_000 times for j in range(1000000): mutex.acquire(1) i = i - 1 mutex.release() def main(): global i incrementing = Thread(target = incrementingFunction, args = (),) decrementing = Thread(target = decrementingFunction, args = (),) # TODO: Start both threads incrementing.start() decrementing.start() incrementing.join() decrementing.join() print("The magic number is %d" % (i)) main()
monitor.py
#!/usr/bin/env python import sys import os import hashlib import md5 import mmap import datetime import time import threading import Queue import logging from watchdog.observers import Observer from watchdog.events import LoggingEventHandler import watchdog from libs.comictaggerlib.comicarchive import * from libs.comictaggerlib.issuestring import * import utils from database import * class MonitorEventHandler(watchdog.events.FileSystemEventHandler): def __init__(self, monitor): self.monitor = monitor self.ignore_directories = True def on_any_event(self,event): if event.is_directory: return self.monitor.handleSingleEvent(event) class Monitor(): def __init__(self, dm, paths): self.dm = dm self.style = MetaDataStyle.CIX self.queue = Queue.Queue(0) self.paths = paths self.eventList = [] self.mutex = threading.Lock() self.eventProcessingTimer = None self.quit_when_done = False # for debugging/testing self.status = "IDLE" self.statusdetail = "" self.scancomplete_ts = "" def start(self): self.thread = threading.Thread(target=self.mainLoop) self.thread.daemon = True self.quit = False self.thread.start() def stop(self): self.quit = True self.thread.join() def mainLoop(self): logging.debug("Monitor: started main loop.") self.session = self.dm.Session() observer = Observer() self.eventHandler = MonitorEventHandler(self) for path in self.paths: if os.path.exists(path): observer.schedule(self.eventHandler, path, recursive=True) observer.start() while True: try: (msg, args) = self.queue.get(block=True, timeout=1) except: msg = None #dispatch messages if msg == "scan": self.dofullScan(self.paths) if msg == "events": self.doEventProcessing(args) #time.sleep(1) if self.quit: break self.session.close() self.session = None observer.stop() logging.debug("Monitor: stopped main loop.") def scan(self): self.queue.put(("scan", None)) def handleSingleEvent(self, event): # events may happen in clumps. start a timer # to defer processing. if the timer is already going, # it will be canceled # in the future there can be more smarts about # granular file events. for now this will be # good enough to just get a a trigger that *something* # changed self.mutex.acquire() if self.eventProcessingTimer is not None: self.eventProcessingTimer.cancel() self.eventProcessingTimer = threading.Timer(30, self.handleEventProcessing) self.eventProcessingTimer.start() self.mutex.release() def handleEventProcessing(self): # trigger a full rescan self.mutex.acquire() self.scan() # remove the timer if self.eventProcessingTimer is not None: self.eventProcessingTimer = None self.mutex.release() def checkIfRemovedOrModified(self, comic, pathlist): remove = False def inFolderlist(filepath, pathlist): for p in pathlist: if p in filepath: return True return False if not (os.path.exists(comic.path)): # file is missing, remove it from the comic table, add it to deleted table logging.debug(u"Removing missing {0}".format(comic.path)) remove = True elif not inFolderlist(comic.path, pathlist): logging.debug(u"Removing unwanted {0}".format(comic.path)) remove = True else: # file exists. check the mod date. # if it's been modified, remove it, and it'll be re-added #curr = datetime.datetime.fromtimestamp(os.path.getmtime(comic.path)) curr = datetime.utcfromtimestamp(os.path.getmtime(comic.path)) prev = comic.mod_ts if curr != prev: logging.debug(u"Removed modifed {0}".format(comic.path)) remove = True if remove: self.removeComic(comic) self.remove_count += 1 def getComicMetadata(self, path): #print time.time() - start_time, "seconds" ca = ComicArchive(path, default_image_path=AppFolders.imagePath("default.jpg")) if ca.seemsToBeAComicArchive(): #print >> sys.stdout, u"Adding {0}... \r".format(count), logging.debug(u"Reading in {0} {1}\r".format(self.read_count, path)) sys.stdout.flush() self.read_count += 1 if ca.hasMetadata( MetaDataStyle.CIX ): style = MetaDataStyle.CIX elif ca.hasMetadata( MetaDataStyle.CBI ): style = MetaDataStyle.CBI else: style = None if style is not None: md = ca.readMetadata(style) else: # No metadata in comic. make some guesses from the filename md = ca.metadataFromFilename() md.path = ca.path md.page_count = ca.page_count md.mod_ts = datetime.utcfromtimestamp(os.path.getmtime(ca.path)) md.filesize = os.path.getsize(md.path) md.hash = "" #logging.debug("before hash") #md5 = hashlib.md5() #md5.update(open(md.path, 'r').read()) #md.hash = unicode(md5.hexdigest()) #logging.debug("after hash") return md return None def removeComic(self, comic): deleted = DeletedComic() deleted.comic_id = comic.id self.session.add(deleted) self.session.delete(comic) def fetchObjByName(self, obj_dict, instance_name,): try: #logging.debug( u"FETCH:= {0} {1} {2}".format(obj.name, obj.id, type(obj))) obj = None obj = obj_dict[instance_name] except Exception as e: print "-------->", e, instance_name return obj def addComicFromMetadata(self, md ): #logging.debug(u"Adding {0} {1}\r".format(self.add_count, md.path)) #sys.stdout.flush() self.add_count += 1 comic = Comic() # store full path, and filename and folder separately, for search efficiency, # at the cost of redundant storage comic.folder, comic.file = os.path.split(md.path) comic.path = md.path comic.page_count = md.page_count comic.mod_ts = md.mod_ts comic.hash = md.hash comic.filesize = md.filesize if not md.isEmpty: if md.series is not None: comic.series = unicode(md.series) if md.issue is not None: comic.issue = unicode(md.issue) comic.issue_num = IssueString(unicode(comic.issue)).asFloat() #Alt Series if md.alternateNumber is not None: comic.alternateIssue = unicode(md.alternateNumber) comic.alternateNumber = IssueString(unicode(comic.alternateIssue)).asFloat() if md.year is not None: try: day = 1 month = 1 if md.month is not None: month = int(md.month) if md.day is not None: day = int(md.day) year = int(md.year) comic.date = datetime(year,month,day) except: pass comic.year = md.year comic.month = md.month comic.day = md.day if md.volume is not None: comic.volume = int(md.volume) if md.publisher is not None: comic.publisher = unicode(md.publisher) if md.title is not None: comic.title = unicode(md.title) if md.comments is not None: comic.comments = unicode(md.comments) if md.imprint is not None: comic.imprint = unicode(md.imprint) if md.webLink is not None: comic.weblink = unicode(md.webLink) self.session.add(comic) if md.characters is not None: for c in list(set(md.characters.split(","))): character = self.fetchObjByName( self.character_dict, c.strip()) comic.characters_raw.append(character) #comic.characters_raw.append(self.character_objs[0]) if md.teams is not None: for t in list(set(md.teams.split(","))): team = self.fetchObjByName( self.team_dict, t.strip()) comic.teams_raw.append(team) if md.locations is not None: for l in list(set(md.locations.split(","))): location = self.fetchObjByName( self.location_dict, l.strip()) comic.locations_raw.append(location) if md.storyArc is not None: for sa in list(set(md.storyArc.split(","))): storyarc = self.fetchObjByName( self.storyarc_dict, sa.strip()) comic.storyarcs_raw.append(storyarc) pass #series if md.alternateSeries is not None: for alt in list(set(md.alternateSeries.split(","))): alternateseries = self.fetchObjByName(self.alternateseries_dict, alt.strip()) comic.alternateseries_raw.append(alternateseries) if md.genre is not None: for g in list(set(md.genre.split(","))): genre = self.fetchObjByName( self.genre_dict, g.strip()) comic.genres_raw.append(genre) pass if md.tags is not None: for gt in list(set(md.tags)): generictag = self.fetchObjByName( self.generictag_dict, gt.strip()) comic.generictags_raw.append(generictag) pass if md.credits is not None: for credit in md.credits: role = self.fetchObjByName( self.role_dict, credit['role'].lower().strip()) person = self.fetchObjByName( self.person_dict, credit['person'].strip()) comic.credits_raw.append(Credit(person, role)) #comic.credits_raw.append(Credit(self.person_objs[0], self.role_objs[0])) pass def buildChildSets(self, md): if md.characters is not None: for n in list(set(md.characters.split(","))): self.character_names.add(n.strip()) if md.teams is not None: for n in list(set(md.teams.split(","))): self.team_names.add(n.strip()) if md.locations is not None: for n in list(set(md.locations.split(","))): self.location_names.add(n.strip()) if md.storyArc is not None: for n in list(set(md.storyArc.split(","))): self.storyarc_names.add(n.strip()) if md.alternateSeries is not None: for n in list(set(md.alternateSeries.split(","))): self.alternateseries_names.add(n.strip()) if md.genre is not None: for n in list(set(md.genre.split(","))): self.genre_names.add(n.strip()) if md.tags is not None: for n in list(set(md.tags)): self.generictag_names.add(n.strip()) if md.credits is not None: for credit in md.credits: self.person_names.add(credit['person'].strip()) self.role_names.add(credit['role'].lower().strip()) def saveChildInfoToDB(self, md_list): character_names = set() team_names = set() location_names = set() storyarc_names = set() alternateseries_names = set() genre_names = set() person_names = set() role_names = set() generictag_names = set() for md in md_list: if md.characters is not None: for n in list(set(md.characters.split(","))): character_names.add(n.strip()) if md.teams is not None: for n in list(set(md.teams.split(","))): team_names.add(n.strip()) if md.locations is not None: for n in list(set(md.locations.split(","))): location_names.add(n.strip()) if md.storyArc is not None: for n in list(set(md.storyArc.split(","))): storyarc_names.add(n.strip()) if md.alternateSeries is not None: for n in list(set(md.alternateSeries.split(","))): alternateseries_names.add(n.strip()) if md.genre is not None: for n in list(set(md.genre.split(","))): genre_names.add(n.strip()) if md.tags is not None: for n in list(set(md.tags)): generictag_names.add(n.strip()) if md.credits is not None: for credit in md.credits: person_names.add(credit['person'].strip()) role_names.add(credit['role'].lower().strip()) def addNamedObjects(cls, nameset): q = self.session.query(cls.name) existing_set = set([i[0] for i in list(q)]) nameset = nameset - existing_set #logging.debug( "new {0} size = {1}".format( cls, len(nameset ))) for n in nameset: obj = cls(name=n) #print cls, n self.session.add(obj) # For each set, get the existing set of names in the DB, # and get the difference set. With the set of only new names, # insert them all addNamedObjects(Character, character_names) addNamedObjects(Team, team_names) addNamedObjects(Location, location_names) addNamedObjects(StoryArc, storyarc_names) addNamedObjects(AlternateSeries, alternateseries_names) addNamedObjects(Genre, genre_names) addNamedObjects(Person, person_names) addNamedObjects(Role, role_names) addNamedObjects(GenericTag, generictag_names) self.session.commit() def createChildDicts(self): # read back all theose objects with their keys character_objs = self.session.query(Character).all() team_objs = self.session.query(Team).all() location_objs = self.session.query(Location).all() storyarc_objs = self.session.query(StoryArc).all() alternateseries_objs = self.session.query(AlternateSeries).all() genre_objs = self.session.query(Genre).all() person_objs = self.session.query(Person).all() role_objs = self.session.query(Role).all() generictag_objs = self.session.query(GenericTag).all() def buildDict(obj_list, objdict): for o in obj_list: objdict[o.name] = o self.character_dict = dict() self.team_dict = dict() self.location_dict = dict() self.storyarc_dict = dict() self.alternateseries_dict = dict() self.genre_dict = dict() self.person_dict = dict() self.role_dict = dict() self.generictag_dict = dict() buildDict(character_objs, self.character_dict) buildDict(team_objs, self.team_dict) buildDict(location_objs, self.location_dict) buildDict(storyarc_objs, self.storyarc_dict) buildDict(alternateseries_objs, self.alternateseries_dict) buildDict(genre_objs, self.genre_dict) buildDict(person_objs, self.person_dict) buildDict(role_objs, self.role_dict) buildDict(generictag_objs, self.generictag_dict) def setStatusDetail(self, detail, level=logging.DEBUG): self.statusdetail = detail if level == logging.DEBUG: logging.debug(detail) else: logging.info(detail) def setStatusDetailOnly(self, detail): self.statusdetail = detail def commitMetadataList(self, md_list): # now that we have a chunk of metadata is read in, make up lists of all the "named" entities to # add to the DB before the actual comics are added #self.setStatusDetailOnly(u"Monitor: Adding {0} files to library...".format(len(md_list))) self.saveChildInfoToDB(md_list) #logging.debug(u"Monitor: finish adding child sets") # create dictionarys of all those objects, so we don't have to query the database self.createChildDicts() for md in md_list: self.addComicFromMetadata( md ) if self.quit: self.setStatusDetail(u"Monitor: halting scan!") return # periodically commit #if self.add_count % 1000 == 0: # self.session.commit() # self.setStatusDetail(u"Monitor: {0} of {1} added...".format(self.add_count,len(md_list)), logging.INFO) if self.add_count > 0: self.session.commit() def dofullScan(self, dirs): self.status = "SCANNING" logging.info(u"Monitor: Beginning file scan...") self.setStatusDetail(u"Monitor: Making a list of all files in the folders...") filelist = utils.get_recursive_filelist( dirs ) self.setStatusDetail(u"Monitor: sorting recursive file list ({0} items)".format(len(filelist))) filelist = sorted(filelist, key=os.path.getmtime) self.setStatusDetail(u"Monitor: done listing files.") self.add_count = 0 self.remove_count = 0 # get the entire comic table into memory query = list(self.session.query(Comic)) # look for missing or changed files self.setStatusDetail(u"Monitor: Removing missing or modified files from DB...") #start_time = time.time() for comic in query: self.checkIfRemovedOrModified( comic, self.paths ) if self.quit: self.setStatusDetail(u"Monitor: halting scan!") return #print time.time() - start_time, "seconds" self.setStatusDetail(u"Monitor: Done removing files.") if self.remove_count > 0: self.dm.engine.echo = True self.session.commit() self.dm.engine.echo = False self.setStatusDetail(u"Monitor: found {0} files to inspect...".format(len(filelist))) # make a list of all path strings in comic table db_pathlist = [i[0] for i in list(self.session.query(Comic.path))] self.setStatusDetail(u"Monitor: removing already scanned files from file list") for f in db_pathlist: if f in filelist: filelist.remove(f) db_pathlist = None self.setStatusDetail(u"Monitor: {0} new files to scan...".format(len(filelist)), logging.INFO) md_list = [] self.read_count = 0 for filename in filelist: md = self.getComicMetadata( filename ) if md is not None: md_list.append(md) self.setStatusDetailOnly(u"Monitor: {0} files: {1} scanned, {2} added to library...".format(len(filelist), self.read_count,self.add_count)) if self.quit: self.setStatusDetail(u"Monitor: halting scan!") return #every so often, commit to DB if self.read_count % 100 == 0 and self.read_count != 0: if len(md_list) > 0: self.commitMetadataList(md_list) md_list = [] if len(md_list) > 0: self.commitMetadataList(md_list) self.setStatusDetail(u"Monitor: finished scanning metadata in {0} of {1} files".format(self.read_count,len(filelist)), logging.INFO) self.status = "IDLE" self.statusdetail = "" self.scancomplete_ts = int(time.mktime(datetime.utcnow().timetuple()) * 1000) logging.info("Monitor: Added {0} comics".format(self.add_count)) logging.info("Monitor: Removed {0} comics".format(self.remove_count)) if self.remove_count > 0 or self.add_count > 0: self.session.query(DatabaseInfo).first().last_updated = datetime.utcnow() self.session.commit() if self.quit_when_done: self.quit = True def doEventProcessing(self, eventList): logging.debug(u"Monitor: event_list:{0}".format(eventList)) if __name__ == '__main__': if len(sys.argv) < 2: print >> sys.stderr, "usage: {0} comic_folder ".format(sys.argv[0]) sys.exit(-1) utils.fix_output_encoding() dm = DataManager() dm.create() m = Monitor(dm, sys.argv[1:]) m.quit_when_done = True m.start() m.scan() #while True: # time.sleep(10) m.stop()
node.py
import time import queue from queue import Queue from threading import Thread from random import randint from . import config from constellations.socket_transport import SocketTransport class Data: def __init__(self): self.me = {} self.peers = {} self.peer_set = [] class Node: """ A Node combines an input server with a queue and a list of handlers """ def __init__(self): # TODO pick another port if the first one is already bound # TODO add the handler and acts lists implementations (think about the semantics and abstractions) # TODO make Node be a thread or use its own thread # A queue to place incoming messages, decouples the server from the handling self.message_queue = Queue() # Add the put method of the message_queue as callback to the server self.transport = SocketTransport() self.data = Data() #if(self.transport.host == ""): # self.data.me['address'] = ["localhost", self.transport.port] #else: self.data.me['address'] = [self.transport.host, self.transport.port] # Handlers handle the incoming messages self.handlers = [] # Acts are functions run in separate threads, associated with this node and its data self.acts = [] self.running = True t = Thread(target=self.queue_consumer) t.daemon = True t.start() self.transport.receive(self.message_queue.put) def add_handler(self, func): # TODO check if func supports the message argument (is this possible?) self.handlers.append(func) def add_action(self, func): # TODO check if func supports the data argument (is this possible?) t = Thread(target=func, args=(self,)) t.daemon = True t.start() self.acts.append(t) def stop(self): # TODO safely stop server and all threads self.running = False def queue_consumer(self): """Gets one message at a time from message_queue and passes it to the registered handlers""" while self.running: try: # Waits for 3 seconds, otherwise throws `Queue.Empty` next_item = self.message_queue.get(True, 3) except queue.Empty: next_item = None # Passes the next queue item to all the registered handlers (TODO should I clone the message?) if next_item is not None: for h in self.handlers: h(self, next_item) if __name__ == "__main__": def myhandler1(message): print("My handler1 received: " + message) def myhandler2(message): print("My handler2 received: " + message) def send_greetings(node): data = node.data i = 0 while True: SocketClient.send("localhost", data.me['address'][1], "lodpsdppsdpsdf" + str(i)) i += 1 time.sleep(2) n = Node(port=5003) n.add_handler(myhandler1) n.add_handler(myhandler2) n.add_action(send_greetings) import sys time.sleep(1) input("Press Enter to continue...") sys.exit()
trait_notifiers.py
# ------------------------------------------------------------------------------ # # Copyright (c) 2005-2013, Enthought, Inc. # All rights reserved. # # This software is provided without warranty under the terms of the BSD # license included in enthought/LICENSE.txt and may be redistributed only # under the conditions described in the aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt # # Thanks for using Enthought open source! # # Author: David C. Morrill # Original Date: 06/21/2002 # # ------------------------------------------------------------------------------ """ Classes that implement and support the Traits change notification mechanism """ # ------------------------------------------------------------------------------- # Imports: # ------------------------------------------------------------------------------- from __future__ import absolute_import, print_function import contextlib import six import threading from threading import local as thread_local from threading import Thread import traceback from types import MethodType import weakref import sys from .trait_base import Uninitialized from .trait_errors import TraitNotificationError # ------------------------------------------------------------------------------- # Global Data: # ------------------------------------------------------------------------------- # The thread ID for the user interface thread ui_thread = -1 # The handler for notifications that must be run on the UI thread ui_handler = None # ------------------------------------------------------------------------------- # Sets up the user interface thread handler: # ------------------------------------------------------------------------------- def set_ui_handler(handler): """ Sets up the user interface thread handler. """ global ui_handler, ui_thread ui_handler = handler ui_thread = threading.current_thread().ident def ui_dispatch(handler, *args, **kw): if threading.current_thread().ident == ui_thread: handler(*args, **kw) else: ui_handler(handler, *args, **kw) # ------------------------------------------------------------------------------- # 'NotificationExceptionHandlerState' class: # ------------------------------------------------------------------------------- class NotificationExceptionHandlerState(object): def __init__(self, handler, reraise_exceptions, locked): self.handler = handler self.reraise_exceptions = reraise_exceptions self.locked = locked # ------------------------------------------------------------------------------- # 'NotificationExceptionHandler' class: # ------------------------------------------------------------------------------- class NotificationExceptionHandler(object): def __init__(self): self.traits_logger = None self.main_thread = None self.thread_local = thread_local() # -- Private Methods ------------------------------------------------------------ def _push_handler( self, handler=None, reraise_exceptions=False, main=False, locked=False ): """ Pushes a new traits notification exception handler onto the stack, making it the new exception handler. Returns a NotificationExceptionHandlerState object describing the previous exception handler. Parameters ---------- handler : handler The new exception handler, which should be a callable or None. If None (the default), then the default traits notification exception handler is used. If *handler* is not None, then it must be a callable which can accept four arguments: object, trait_name, old_value, new_value. reraise_exceptions : bool Indicates whether exceptions should be reraised after the exception handler has executed. If True, exceptions will be re-raised after the specified handler has been executed. The default value is False. main : bool Indicates whether the caller represents the main application thread. If True, then the caller's exception handler is made the default handler for any other threads that are created. Note that a thread can explicitly set its own exception handler if desired. The *main* flag is provided to make it easier to set a global application policy without having to explicitly set it for each thread. The default value is False. locked : bool Indicates whether further changes to the Traits notification exception handler state should be allowed. If True, then any subsequent calls to _push_handler() or _pop_handler() for that thread will raise a TraitNotificationError. The default value is False. """ handlers = self._get_handlers() self._check_lock(handlers) if handler is None: handler = self._log_exception handlers.append( NotificationExceptionHandlerState( handler, reraise_exceptions, locked ) ) if main: self.main_thread = handlers return handlers[-2] def _pop_handler(self): """ Pops the traits notification exception handler stack, restoring the exception handler in effect prior to the most recent _push_handler() call. If the stack is empty or locked, a TraitNotificationError exception is raised. Note that each thread has its own independent stack. See the description of the _push_handler() method for more information on this. """ handlers = self._get_handlers() self._check_lock(handlers) if len(handlers) > 1: handlers.pop() else: raise TraitNotificationError( "Attempted to pop an empty traits notification exception " "handler stack." ) def _handle_exception(self, object, trait_name, old, new): """ Handles a traits notification exception using the handler defined by the topmost stack entry for the corresponding thread. """ excp_class, excp = sys.exc_info()[:2] handler_info = self._get_handlers()[-1] handler_info.handler(object, trait_name, old, new) if handler_info.reraise_exceptions or isinstance( excp, TraitNotificationError ): raise excp def _get_handlers(self): """ Returns the handler stack associated with the currently executing thread. """ thread_local = self.thread_local if isinstance(thread_local, dict): id = threading.current_thread().ident handlers = thread_local.get(id) else: handlers = getattr(thread_local, "handlers", None) if handlers is None: if self.main_thread is not None: handler = self.main_thread[-1] else: handler = NotificationExceptionHandlerState( self._log_exception, False, False ) handlers = [handler] if isinstance(thread_local, dict): thread_local[id] = handlers else: thread_local.handlers = handlers return handlers def _check_lock(self, handlers): """ Raises an exception if the specified handler stack is locked. """ if handlers[-1].locked: raise TraitNotificationError( "The traits notification exception handler is locked. " "No changes are allowed." ) # --------------------------------------------------------------------------- # This method defines the default notification exception handling # behavior of traits. However, it can be completely overridden by pushing # a new handler using the '_push_handler' method. # # It logs any exceptions generated in a trait notification handler. # --------------------------------------------------------------------------- def _log_exception(self, object, trait_name, old, new): """ Logs any exceptions generated in a trait notification handler. """ # When the stack depth is too great, the logger can't always log the # message. Make sure that it goes to the console at a minimum: excp_class, excp = sys.exc_info()[:2] if ( (excp_class is RuntimeError) and (len(excp.args) > 0) and (excp.args[0] == "maximum recursion depth exceeded") ): sys.__stderr__.write( "Exception occurred in traits notification " "handler for object: %s, trait: %s, old value: %s, " "new value: %s.\n%s\n" % ( object, trait_name, old, new, "".join(traceback.format_exception(*sys.exc_info())), ) ) logger = self.traits_logger if logger is None: import logging self.traits_logger = logger = logging.getLogger("traits") handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(message)s")) logger.addHandler(handler) print( "Exception occurred in traits notification handler.\n" "Please check the log file for details." ) try: logger.exception( "Exception occurred in traits notification handler for " "object: %s, trait: %s, old value: %s, new value: %s" % (object, trait_name, old, new) ) except Exception: # Ignore anything we can't log the above way: pass # ------------------------------------------------------------------------------- # Traits global notification exception handler: # ------------------------------------------------------------------------------- notification_exception_handler = NotificationExceptionHandler() push_exception_handler = notification_exception_handler._push_handler pop_exception_handler = notification_exception_handler._pop_handler handle_exception = notification_exception_handler._handle_exception # ------------------------------------------------------------------------------- # Traits global notification event tracer: # ------------------------------------------------------------------------------- _pre_change_event_tracer = None _post_change_event_tracer = None def set_change_event_tracers(pre_tracer=None, post_tracer=None): """ Set the global trait change event tracers. The global tracers are called whenever a trait change event is dispatched. There are two tracers: `pre_tracer` is called before the notification is sent; `post_tracer` is called after the notification is sent, even if the notification failed with an exception (in which case the `post_tracer` is called with a reference to the exception, then the exception is sent to the `notification_exception_handler`). The tracers should be a callable taking 5 arguments: :: tracer(obj, trait_name, old, new, handler) `obj` is the source object, on which trait `trait_name` was changed from value `old` to value `new`. `handler` is the function or method that will be notified of the change. The post-notification tracer also has a keyword argument, `exception`, that is `None` if no exception has been raised, and the a reference to the raise exception otherwise. :: post_tracer(obj, trait_name, old, new, handler, exception=None) Note that for static trait change listeners, `handler` is not a method, but rather the function before class creation, since this is the way Traits works at the moment. """ global _pre_change_event_tracer global _post_change_event_tracer _pre_change_event_tracer = pre_tracer _post_change_event_tracer = post_tracer def get_change_event_tracers(): """ Get the currently active global trait change event tracers. """ return _pre_change_event_tracer, _post_change_event_tracer def clear_change_event_tracers(): """ Clear the global trait change event tracer. """ global _pre_change_event_tracer global _post_change_event_tracer _pre_change_event_tracer = None _post_change_event_tracer = None @contextlib.contextmanager def change_event_tracers(pre_tracer, post_tracer): """ Context manager to temporarily change the global event tracers. """ old_pre_tracer, old_post_tracer = get_change_event_tracers() set_change_event_tracers(pre_tracer, post_tracer) try: yield finally: set_change_event_tracers(old_pre_tracer, old_post_tracer) # ------------------------------------------------------------------------------- # 'AbstractStaticChangeNotifyWrapper' class: # ------------------------------------------------------------------------------- class AbstractStaticChangeNotifyWrapper(object): """ Concrete implementation must define the 'argument_transforms' class argument, a dictionary mapping the number of arguments in the event handler to a function that takes the arguments (obj, trait_name, old, new) and returns the arguments tuple for the actual handler. """ arguments_transforms = {} def __init__(self, handler): arg_count = handler.__code__.co_argcount if arg_count > 4: raise TraitNotificationError( ( "Invalid number of arguments for the static anytrait change " "notification handler: %s. A maximum of 4 arguments is " "allowed, but %s were specified." ) % (handler.__name__, arg_count) ) self.argument_transform = self.argument_transforms[arg_count] self.handler = handler def __call__(self, object, trait_name, old, new): """ Dispatch to the appropriate handler method. """ if old is not Uninitialized: # Extract the arguments needed from the handler. args = self.argument_transform(object, trait_name, old, new) # Send a description of the change event to the event tracer. if _pre_change_event_tracer is not None: _pre_change_event_tracer( object, trait_name, old, new, self.handler ) try: # Call the handler. self.handler(*args) except Exception as e: if _post_change_event_tracer is not None: _post_change_event_tracer( object, trait_name, old, new, self.handler, exception=e ) handle_exception(object, trait_name, old, new) else: if _post_change_event_tracer is not None: _post_change_event_tracer( object, trait_name, old, new, self.handler, exception=None, ) def equals(self, handler): return False # ------------------------------------------------------------------------------- # 'StaticAnyTraitChangeNotifyWrapper' class: # ------------------------------------------------------------------------------- class StaticAnyTraitChangeNotifyWrapper(AbstractStaticChangeNotifyWrapper): # The wrapper is called with the full set of argument, and we need to # create a tuple with the arguments that need to be sent to the event # handler, depending on the number of those. argument_transforms = { 0: lambda obj, name, old, new: (), 1: lambda obj, name, old, new: (obj,), 2: lambda obj, name, old, new: (obj, name), 3: lambda obj, name, old, new: (obj, name, new), 4: lambda obj, name, old, new: (obj, name, old, new), } # ------------------------------------------------------------------------------- # 'StaticTraitChangeNotifyWrapper' class: # ------------------------------------------------------------------------------- class StaticTraitChangeNotifyWrapper(AbstractStaticChangeNotifyWrapper): # The wrapper is called with the full set of argument, and we need to # create a tuple with the arguments that need to be sent to the event # handler, depending on the number of those. argument_transforms = { 0: lambda obj, name, old, new: (), 1: lambda obj, name, old, new: (obj,), 2: lambda obj, name, old, new: (obj, new), 3: lambda obj, name, old, new: (obj, old, new), 4: lambda obj, name, old, new: (obj, name, old, new), } # ------------------------------------------------------------------------------- # 'TraitChangeNotifyWrapper' class: # ------------------------------------------------------------------------------- class TraitChangeNotifyWrapper(object): """ Dynamic change notify wrapper. This class is in charge to dispatch trait change events to dynamic listener, typically created using the `on_trait_change` method, or the decorator with the same name. """ # The wrapper is called with the full set of argument, and we need to # create a tuple with the arguments that need to be sent to the event # handler, depending on the number of those. argument_transforms = { 0: lambda obj, name, old, new: (), 1: lambda obj, name, old, new: (new,), 2: lambda obj, name, old, new: (name, new), 3: lambda obj, name, old, new: (obj, name, new), 4: lambda obj, name, old, new: (obj, name, old, new), } def __init__(self, handler, owner, target=None): self.init(handler, owner, target) def init(self, handler, owner, target=None): # If target is not None and handler is a function then the handler # will be removed when target is deleted. if type(handler) is MethodType: func = handler.__func__ object = handler.__self__ if object is not None: self.object = weakref.ref(object, self.listener_deleted) self.name = handler.__name__ self.owner = owner arg_count = func.__code__.co_argcount - 1 if arg_count > 4: raise TraitNotificationError( ( "Invalid number of arguments for the dynamic trait " "change notification handler: %s. A maximum of 4 " "arguments is allowed, but %s were specified." ) % (func.__name__, arg_count) ) # We use the unbound method here to prevent cyclic garbage # (issue #100). self.notify_listener = type(self)._notify_method_listener self.argument_transform = self.argument_transforms[arg_count] return arg_count elif target is not None: # Set up so the handler will be removed when the target is deleted. self.object = weakref.ref(target, self.listener_deleted) self.owner = owner arg_count = handler.__code__.co_argcount if arg_count > 4: raise TraitNotificationError( ( "Invalid number of arguments for the dynamic trait change " "notification handler: %s. A maximum of 4 arguments is " "allowed, but %s were specified." ) % (handler.__name__, arg_count) ) self.name = None self.handler = handler # We use the unbound method here to prevent cyclic garbage # (issue #100). self.notify_listener = type(self)._notify_function_listener self.argument_transform = self.argument_transforms[arg_count] return arg_count def __call__(self, object, trait_name, old, new): """ Dispatch to the appropriate method. We do explicit dispatch instead of assigning to the .__call__ instance attribute to avoid reference cycles. """ # `notify_listener` is either the *unbound* # `_notify_method_listener` or `_notify_function_listener` to # prevent cyclic garbage (issue #100). self.notify_listener(self, object, trait_name, old, new) def dispatch(self, handler, *args): """ Dispatch the event to the listener. This method is normally the only one that needs to be overridden in a subclass to implement the subclass's dispatch mechanism. """ handler(*args) def equals(self, handler): if handler is self: return True if (type(handler) is MethodType) and (handler.__self__ is not None): return (handler.__name__ == self.name) and ( handler.__self__ is self.object() ) return (self.name is None) and (handler == self.handler) def listener_deleted(self, ref): # In multithreaded situations, it's possible for this method to # be called after, or concurrently with, the dispose method. # Don't raise in that case. try: self.owner.remove(self) except ValueError: pass self.object = self.owner = None def dispose(self): self.object = None def _dispatch_change_event(self, object, trait_name, old, new, handler): """ Prepare and dispatch a trait change event to a listener. """ # Extract the arguments needed from the handler. args = self.argument_transform(object, trait_name, old, new) # Send a description of the event to the change event tracer. if _pre_change_event_tracer is not None: _pre_change_event_tracer(object, trait_name, old, new, handler) # Dispatch the event to the listener. try: self.dispatch(handler, *args) except Exception as e: if _post_change_event_tracer is not None: _post_change_event_tracer( object, trait_name, old, new, handler, exception=e ) # This call needs to be made inside the `except` block in case # the handler wants to re-raise the exception. handle_exception(object, trait_name, old, new) else: if _post_change_event_tracer is not None: _post_change_event_tracer( object, trait_name, old, new, handler, exception=None ) def _notify_method_listener(self, object, trait_name, old, new): """ Dispatch a trait change event to a method listener. """ obj_weak_ref = self.object if (obj_weak_ref is not None) and (old is not Uninitialized): # We make sure to hold a reference to the object before invoking # `getattr` so that the listener does not disappear in a # multi-threaded case. obj = obj_weak_ref() if obj is not None: # Dynamically resolve the listener by name. listener = getattr(obj, self.name) self._dispatch_change_event( object, trait_name, old, new, listener ) def _notify_function_listener(self, object, trait_name, old, new): """ Dispatch a trait change event to a function listener. """ if old is not Uninitialized: self._dispatch_change_event( object, trait_name, old, new, self.handler ) # ------------------------------------------------------------------------------- # 'ExtendedTraitChangeNotifyWrapper' class: # ------------------------------------------------------------------------------- class ExtendedTraitChangeNotifyWrapper(TraitChangeNotifyWrapper): """ Change notify wrapper for "extended" trait change events.. The "extended notifiers" are set up internally when using extended traits, to add/remove traits listeners when one of the intermediate traits changes. For example, in a listener for the extended trait `a.b`, we need to add/remove listeners to `a:b` when `a` changes. """ def _dispatch_change_event(self, object, trait_name, old, new, handler): """ Prepare and dispatch a trait change event to a listener. """ # Extract the arguments needed from the handler. args = self.argument_transform(object, trait_name, old, new) # Dispatch the event to the listener. try: self.dispatch(handler, *args) except Exception: handle_exception(object, trait_name, old, new) def _notify_method_listener(self, object, trait_name, old, new): """ Dispatch a trait change event to a method listener. """ obj_weak_ref = self.object if obj_weak_ref is not None: # We make sure to hold a reference to the object before invoking # `getattr` so that the listener does not disappear in a # multi-threaded case. obj = obj_weak_ref() if obj is not None: # Dynamically resolve the listener by name. listener = getattr(obj, self.name) self._dispatch_change_event( object, trait_name, old, new, listener ) def _notify_function_listener(self, object, trait_name, old, new): """ Dispatch a trait change event to a function listener. """ self._dispatch_change_event(object, trait_name, old, new, self.handler) # ------------------------------------------------------------------------------- # 'FastUITraitChangeNotifyWrapper' class: # ------------------------------------------------------------------------------- class FastUITraitChangeNotifyWrapper(TraitChangeNotifyWrapper): """ Dynamic change notify wrapper, dispatching on the UI thread. This class is in charge to dispatch trait change events to dynamic listener, typically created using the `on_trait_change` method and the `dispatch` parameter set to 'ui' or 'fast_ui'. """ def dispatch(self, handler, *args): if threading.current_thread().ident == ui_thread: handler(*args) else: ui_handler(handler, *args) # ------------------------------------------------------------------------------- # 'NewTraitChangeNotifyWrapper' class: # ------------------------------------------------------------------------------- class NewTraitChangeNotifyWrapper(TraitChangeNotifyWrapper): """ Dynamic change notify wrapper, dispatching on a new thread. This class is in charge to dispatch trait change events to dynamic listener, typically created using the `on_trait_change` method and the `dispatch` parameter set to 'new'. """ def dispatch(self, handler, *args): Thread(target=handler, args=args).start()
upd_bot.py
'''' пост про регулярки https://habr.com/ru/post/349860/ тестер регулярок https://regex101.com/r/ документация бс https://www.crummy.com/software/BeautifulSoup/bs4/doc/ ''' # пример использования флажка # import time # done_flag=False # while done_flag==False: # try: # print("stuff done") # done_flag=True # except: # print('stuff not done. Waiting') # time.sleep(1) #autorun #C:\Users\wital\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup import urllib.request import urllib.request from bs4 import BeautifulSoup import re import os import time #from threading import Thread from multiprocessing import Process import ast vitalaChat = 3 nasyaChat = '3' # url_link = 'http://mspvolga.ru/zakupki/' #для тестов #nasyaChat = 78654 #url_link = "HTML_страница.html" #универсальная функция для открытия страниц в интернете или на локальной машине в зависимости от названия. def get_html(url): if bool(re.search('[а-яА-Я]', url)): response = open(url, encoding='utf-8').read() print("local_open") return response else: #headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'} response = urllib.request.urlopen(url)#, headers) print("INTERNET_OPEN!") return response.read() def msp_volg_parser(url): html = get_html(url) soup = BeautifulSoup(html, features="html.parser") # print(soup.prettify()) table1 = soup.find('div', class_="grid gap-md margin-top-md") offers = table1 list = [] pattern = r'$' pattern1 = r'\n' table1 = offers.find_all('h4') for each in table1: # print(each) link = each.find('a').get('href')# text = each.find('a').get_text() link =re.sub(pattern,' ', link, count=0) text =re.sub(pattern,' ', text, count=0) text =re.sub(pattern1,' ', text, count=0) list.append(text+'\n'+"http://mspvolga.ru"+link) return list def cppvlg_parse(url): html = get_html(url) soup = BeautifulSoup(html, features="html.parser") # print(soup.prettify()) table1 = soup.find(class_="items row") table2 = table1.find_all("div", class_="title") table3 = [] for each in table2: link = each.find('a').get('href') text = each.find('a').get_text() # print(link) # print(text) table3.append(str(text+'\n'+"http://cppvlg.ru"+link)) return table3 def ciss_parse(url): html = get_html(url) soup = BeautifulSoup(html, features="html.parser") # print(soup.prettify()) table1 = soup.find_all("div", class_="col-lg-6 col-xl-4" ) table3 = [] # print(table1) for each in table1: link = each.find('a').get('href') # table3.append(link) head = each.find('h6', class_="header").get_text() # table3.append(head) text = each.find('div', class_="desc").get_text() table3.append(str(head)+"\n"+str(link)+"\n"+str(text)) pattern2 = r' +' pattern3 = r'\n' for each in table3: v1= re.sub(pattern3,r" ", each) table3[table3.index(each)] = re.sub(pattern2,r" ", v1) table4 =[] pattern4 = r'сбор' pattern5 = r'коммерч' pattern6 = r'предл' for each in table3: if re.search(pattern4, each, flags=re.IGNORECASE) or re.search(pattern5, each, flags=re.IGNORECASE) or re.search(pattern6, each, flags=re.IGNORECASE): table4.append(each) return table4 #проверка на то, прощло ли необходимое количество времени с момента прошлой отправки обновлений def is_it_time(): upd_period = 20000 if os.path.exists("time_file.txt")==False: file = open("time_file.txt", "w") file.write(str(time.time())) file.close() print("created new time file") return True, upd_period else: file = open("time_file.txt", "r") last = float(file.read()) now = time.time() file.close() if now-last>upd_period: file = open("time_file.txt", "w") file.write(str(now)) file.close() print('it is time to get updates') return True, upd_period-(now-last) else: return False, upd_period-(now-last) def get_prev_list(name): if os.path.exists(name)==False: file = open(name, "w") file.write(' ') file.close() print("created new offers file") return [' '] else: file = open(name, "r") LList = file.read() file.close() LList = ast.literal_eval(LList)#штука переводящая строку формата листа в лист return LList def upd_last_offers(LList,which): file = open(which, "w") t = str(LList) file.write(t) file.close() # процесс с ботом телеги. тут осуществляется проверка на наличие ошибки соединения, так как она ловится только тут. в main она за ошибку не считается. def sender(id, text_list,waiting_time): import telebot sent = False#флажок def aa(id, text_list,waiting_time): try: for each in text_list: bot = telebot.TeleBot("1349683616:AAGOPlMak-DrzUzrtUo_Szt1CBLecRhmuxM") print(each) bot.send_message(id, each) return True except: print("telegram bot connection EXCEPTION. Waiting", "\n", ' ') time.sleep(waiting_time) print('retry') return False while sent==False: sent = aa(id, text_list,waiting_time)#так как интерпретатор питона сука умный, то он не поменяет переменную, ведь она не используется потом print("success of retry?: ",sent)#поэтому нужна эта строка print("terminated process", os.getpid()) sent = False # функция нужна, чтоб создавать соедининение с ботом отдельно от остальной программы и предотвращать нечаянное трогание телеграма. посколько тут процесс, то # его в отличие от потока можно закрыть. даже импорт библиотеки происходит изолированно. без этого была ошибка def process_creator(id,text,waiting_time): pr = Process(target=sender, args=(id,text,waiting_time)) pr.start() pr.join() pr.terminate() if __name__=='__main__': waiting_time = 300 print("program started") while True: t, tt = is_it_time() if t == True: def _work(waiting_time, site, file): retry_limit = 16 not_done_flag=False send_list = [] while not_done_flag==False: try: list_offers = [] if file =="cppvlg.txt": list_offers = cppvlg_parse(site) if file == "mspvolga.txt": list_offers = msp_volg_parser(site) if file == 'ciss34.txt': list_offers = ciss_parse(site) old_list = get_prev_list(file) send_list = list(set(list_offers) - set(old_list)) upd_last_offers(list_offers, file) not_done_flag = True except Exception as e: print("site parsing exception: ", e, "\n" 'Retry in ', waiting_time / 60, " minutes") time.sleep(waiting_time) retry_limit-=1 if retry_limit<1: not_done_flag = True if send_list == []: print("notheing new at ", site) pass else: process_creator(nasyaChat, send_list, waiting_time) #process_creator(vitalaChat, send_list, waiting_time) print(send_list) print(site, ' sent') # send_list = mspvolga_work(waiting_time) _work(waiting_time, 'http://cppvlg.ru/news-and-events/news/', 'cppvlg.txt') _work(waiting_time, 'http://mspvolga.ru/zakupki/', 'mspvolga.txt') _work(waiting_time, 'http://ciss34.ru/news', 'ciss34.txt') ttt = float('{:.1f}'.format(tt)) print('main waiting ' + str(int(tt / 60)) + " minutes or ~ " + str( int(ttt / 60/ 60)) + " hours untill update") time.sleep(waiting_time*6)
test_closing.py
from fixtures import * # noqa: F401,F403 from flaky import flaky from lightning import RpcError from utils import only_one, sync_blockheight, wait_for, DEVELOPER, TIMEOUT, VALGRIND, SLOW_MACHINE import os import queue import pytest import re import threading import unittest @unittest.skipIf(not DEVELOPER, "Too slow without --dev-bitcoind-poll") def test_closing(node_factory, bitcoind): l1, l2 = node_factory.line_graph(2) chan = l1.get_channel_scid(l2) l1.pay(l2, 200000000) assert bitcoind.rpc.getmempoolinfo()['size'] == 0 billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.'] billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] assert billboard == ['CHANNELD_NORMAL:Funding transaction locked.'] bitcoind.generate_block(5) # Only wait for the channels to activate with DEVELOPER=1, # otherwise it's going to take too long because of the missing # --dev-broadcast-interval if DEVELOPER: wait_for(lambda: len(l1.getactivechannels()) == 2) wait_for(lambda: len(l2.getactivechannels()) == 2) billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] # This may either be from a local_update or an announce, so just # check for the substring assert 'CHANNELD_NORMAL:Funding transaction locked.' in billboard[0] l1.rpc.close(chan) l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN') l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN') l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE') l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE') # And should put closing into mempool. l1.daemon.wait_for_log('sendrawtx exit 0') l2.daemon.wait_for_log('sendrawtx exit 0') # Both nodes should have disabled the channel in their view wait_for(lambda: len(l1.getactivechannels()) == 0) wait_for(lambda: len(l2.getactivechannels()) == 0) assert bitcoind.rpc.getmempoolinfo()['size'] == 1 # Now grab the close transaction closetxid = only_one(bitcoind.rpc.getrawmempool(False)) billboard = only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] assert billboard == [ 'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi for tx:{}'.format(closetxid), ] bitcoind.generate_block(1) l1.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid) l2.daemon.wait_for_log(r'Owning output.* \(SEGWIT\).* txid %s.* CONFIRMED' % closetxid) # Make sure both nodes have grabbed their close tx funds assert closetxid in set([o['txid'] for o in l1.rpc.listfunds()['outputs']]) assert closetxid in set([o['txid'] for o in l2.rpc.listfunds()['outputs']]) wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [ 'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi for tx:{}'.format(closetxid), 'ONCHAIN:Tracking mutual close transaction', 'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel' ]) bitcoind.generate_block(9) wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == [ 'CLOSINGD_SIGEXCHANGE:We agreed on a closing fee of 5430 satoshi for tx:{}'.format(closetxid), 'ONCHAIN:Tracking mutual close transaction', 'ONCHAIN:All outputs resolved: waiting 90 more blocks before forgetting channel' ]) # Make sure both have forgotten about it bitcoind.generate_block(90) wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0) wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0) # The entry in the channels table should still be there assert l1.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1 assert l2.db_query("SELECT count(*) as c FROM channels;")[0]['c'] == 1 def test_closing_while_disconnected(node_factory, bitcoind, executor): l1, l2 = node_factory.line_graph(2, opts={'may_reconnect': True}) chan = l1.get_channel_scid(l2) l1.pay(l2, 200000000) l2.stop() # The close should still be triggered afterwards. fut = executor.submit(l1.rpc.close, chan, 0) l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN') l2.start() fut.result(TIMEOUT) l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE') l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE') # And should put closing into mempool. l1.daemon.wait_for_log('sendrawtx exit 0') l2.daemon.wait_for_log('sendrawtx exit 0') bitcoind.generate_block(101) wait_for(lambda: len(l1.rpc.listchannels()['channels']) == 0) wait_for(lambda: len(l2.rpc.listchannels()['channels']) == 0) def test_closing_id(node_factory): """Test closing using peer ID and full channel ID """ l1, l2 = node_factory.get_nodes(2) # Close by full channel ID. l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 10**6) cid = l2.rpc.listpeers()['peers'][0]['channels'][0]['channel_id'] l2.rpc.close(cid) wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected']) wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected']) # Close by peer ID. l2.rpc.connect(l1.info['id'], 'localhost', l1.port) l1.daemon.wait_for_log("Handed peer, entering loop") l2.fund_channel(l1, 10**6) pid = l1.info['id'] l2.rpc.close(pid) wait_for(lambda: not only_one(l1.rpc.listpeers(l2.info['id'])['peers'])['connected']) wait_for(lambda: not only_one(l2.rpc.listpeers(l1.info['id'])['peers'])['connected']) def test_closing_torture(node_factory, executor, bitcoind): # We set up N-to-N fully-connected mesh, then try # closing them all at once. amount = 10**6 num_nodes = 10 # => 55 channels (36 seconds on my laptop) if VALGRIND: num_nodes -= 4 # => 21 (135 seconds) if SLOW_MACHINE: num_nodes -= 1 # => 45/15 (37/95 seconds) nodes = node_factory.get_nodes(num_nodes) # Make sure bitcoind has plenty of utxos bitcoind.generate_block(num_nodes) # Give them all plenty of UTXOs, make sure they see them for i in range(len(nodes)): for j in range(i + 1, len(nodes)): addr = nodes[i].rpc.newaddr()['bech32'] bitcoind.rpc.sendtoaddress(addr, (amount + 1000000) / 10**8) bitcoind.generate_block(1) sync_blockheight(bitcoind, nodes) txs = [] for i in range(len(nodes)): for j in range(i + 1, len(nodes)): nodes[i].rpc.connect(nodes[j].info['id'], 'localhost', nodes[j].port) txs.append(nodes[i].rpc.fundchannel(nodes[j].info['id'], amount)['txid']) # Make sure they're all in, then lock them in. bitcoind.generate_block(1, wait_for_mempool=txs) # Wait for them all to be CHANNELD_NORMAL for n in nodes: wait_for(lambda: all(p['channels'][0]['state'] == 'CHANNELD_NORMAL' for p in n.rpc.listpeers()['peers'])) # Start closers: can take a long time under valgrind! futures = [] for i in range(len(nodes)): for j in range(i + 1, len(nodes)): futures.append(executor.submit(nodes[i].rpc.close, nodes[j].info['id'])) futures.append(executor.submit(nodes[j].rpc.close, nodes[i].info['id'])) # Wait for close to finish close_txs = set() for f in futures: # If one side completes closing, we'll get an error here 'Peer has no active channel' try: close_txs.add(f.result(TIMEOUT)['txid']) except RpcError as err: assert err.error['message'] == 'Peer has no active channel' # Should have one close for each open. assert len(close_txs) == len(txs) # Get closes confirmed bitcoind.generate_block(100, wait_for_mempool=list(close_txs)) # And make sure they hangup. for n in nodes: wait_for(lambda: n.rpc.listpeers()['peers'] == []) @unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test") def test_closing_different_fees(node_factory, bitcoind, executor): l1 = node_factory.get_node() # Default feerate = 15000/7500/1000 # It will start at the second number, accepting anything above the first. feerates = [[20000, 15000, 7400], [8000, 1001, 100]] amounts = [0, 545999, 546000] num_peers = len(feerates) * len(amounts) addr = l1.rpc.newaddr()['bech32'] bitcoind.rpc.sendtoaddress(addr, 1) numfunds = len(l1.rpc.listfunds()['outputs']) bitcoind.generate_block(1) wait_for(lambda: len(l1.rpc.listfunds()['outputs']) > numfunds) # Create them in a batch, for speed! peers = [] for feerate in feerates: for amount in amounts: p = node_factory.get_node(feerates=feerate) p.feerate = feerate p.amount = amount l1.rpc.connect(p.info['id'], 'localhost', p.port) peers.append(p) for p in peers: p.channel = l1.rpc.fundchannel(p.info['id'], 10**6, minconf=0)['channel_id'] # Technically, this is async to fundchannel returning. l1.daemon.wait_for_log('sendrawtx exit 0') bitcoind.generate_block(6) # Now wait for them all to hit normal state, do payments l1.daemon.wait_for_logs(['update for channel .* now ACTIVE'] * num_peers + ['to CHANNELD_NORMAL'] * num_peers) for p in peers: if p.amount != 0: l1.pay(p, 100000000) # Now close all channels (not unilaterally!) closes = [executor.submit(l1.rpc.close, p.channel, 0) for p in peers] for c in closes: c.result(90) # close does *not* wait for the sendrawtransaction, so do that! # Note that since they disagree on the ideal fee, they may conflict # (first one in will win), so we cannot look at logs, we need to # wait for mempool. wait_for(lambda: bitcoind.rpc.getmempoolinfo()['size'] == num_peers) bitcoind.generate_block(1) for p in peers: p.daemon.wait_for_log(' to ONCHAIN') wait_for(lambda: 'ONCHAIN:Tracking mutual close transaction' in only_one(p.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status']) l1.daemon.wait_for_logs([' to ONCHAIN'] * num_peers) @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_closing_negotiation_reconnect(node_factory, bitcoind): disconnects = ['-WIRE_CLOSING_SIGNED', '@WIRE_CLOSING_SIGNED', '+WIRE_CLOSING_SIGNED'] l1 = node_factory.get_node(disconnect=disconnects, may_reconnect=True) l2 = node_factory.get_node(may_reconnect=True) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) chan = l1.fund_channel(l2, 10**6) l1.pay(l2, 200000000) assert bitcoind.rpc.getmempoolinfo()['size'] == 0 l1.rpc.close(chan) l1.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN') l2.daemon.wait_for_log(' to CHANNELD_SHUTTING_DOWN') l1.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE') l2.daemon.wait_for_log(' to CLOSINGD_SIGEXCHANGE') # And should put closing into mempool (happens async, so # CLOSINGD_COMPLETE may come first). l1.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE']) l2.daemon.wait_for_logs(['sendrawtx exit 0', ' to CLOSINGD_COMPLETE']) assert bitcoind.rpc.getmempoolinfo()['size'] == 1 @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_penalty_inhtlc(node_factory, bitcoind, executor): """Test penalty transaction with an incoming HTLC""" # We suppress each one after first commit; HTLC gets added not fulfilled. # Feerates identical so we don't get gratuitous commit to update them l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit'], may_fail=True, feerates=(7500, 7500, 7500), allow_broken_log=True) l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED-nocommit']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 10**6) # Now, this will get stuck due to l1 commit being disabled.. t = executor.submit(l1.pay, l2, 100000000) assert len(l1.getactivechannels()) == 2 assert len(l2.getactivechannels()) == 2 # They should both have commitments blocked now. l1.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit') l2.daemon.wait_for_log('=WIRE_COMMITMENT_SIGNED-nocommit') # Make sure l1 got l2's commitment to the HTLC, and sent to master. l1.daemon.wait_for_log('got commitsig') # Take our snapshot. tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx'] # Let them continue l1.rpc.dev_reenable_commit(l2.info['id']) l2.rpc.dev_reenable_commit(l1.info['id']) # Should fulfill. l1.daemon.wait_for_log('peer_in WIRE_UPDATE_FULFILL_HTLC') l1.daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK') l2.daemon.wait_for_log('peer_out WIRE_UPDATE_FULFILL_HTLC') l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK') # Payment should now complete. t.result(timeout=10) # Now we really mess things up! bitcoind.rpc.sendrawtransaction(tx) bitcoind.generate_block(1) l2.daemon.wait_for_log(' to ONCHAIN') # FIXME: l1 should try to stumble along! wait_for(lambda: len(l2.getactivechannels()) == 0) # l2 should spend all of the outputs (except to-us). # Could happen in any order, depending on commitment tx. needle = l2.daemon.logsearch_start l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX', 'THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM') l2.daemon.logsearch_start = needle l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX', 'THEIR_REVOKED_UNILATERAL/THEIR_HTLC') # FIXME: test HTLC tx race! # 100 blocks later, all resolved. bitcoind.generate_block(100) l2.daemon.wait_for_log('onchaind complete, forgetting peer') outputs = l2.rpc.listfunds()['outputs'] assert [o['status'] for o in outputs] == ['confirmed'] * 2 # Allow some lossage for fees. assert sum(o['value'] for o in outputs) < 10**6 assert sum(o['value'] for o in outputs) > 10**6 - 15000 @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_penalty_outhtlc(node_factory, bitcoind, executor): """Test penalty transaction with an outgoing HTLC""" # First we need to get funds to l2, so suppress after second. # Feerates identical so we don't get gratuitous commit to update them l1 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit'], may_fail=True, feerates=(7500, 7500, 7500), allow_broken_log=True) l2 = node_factory.get_node(disconnect=['=WIRE_COMMITMENT_SIGNED*3-nocommit']) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 10**6) # Move some across to l2. l1.pay(l2, 200000000) assert not l1.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED') assert not l2.daemon.is_in_log('=WIRE_COMMITMENT_SIGNED') # Now, this will get stuck due to l1 commit being disabled.. t = executor.submit(l2.pay, l1, 100000000) # Make sure we get signature from them. l1.daemon.wait_for_log('peer_in WIRE_UPDATE_ADD_HTLC') l1.daemon.wait_for_log('peer_in WIRE_COMMITMENT_SIGNED') # They should both have commitments blocked now. l1.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED') l2.daemon.wait_for_log('dev_disconnect: =WIRE_COMMITMENT_SIGNED') # Make sure both sides got revoke_and_ack for that commitment. l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK') l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK') # Take our snapshot. tx = l1.rpc.dev_sign_last_tx(l2.info['id'])['tx'] # Let them continue l1.rpc.dev_reenable_commit(l2.info['id']) l2.rpc.dev_reenable_commit(l1.info['id']) # Thread should complete. t.result(timeout=10) # Make sure both sides got revoke_and_ack for final. l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK') l2.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK') # Now we really mess things up! bitcoind.rpc.sendrawtransaction(tx) bitcoind.generate_block(1) l2.daemon.wait_for_log(' to ONCHAIN') # FIXME: l1 should try to stumble along! # l2 should spend all of the outputs (except to-us). # Could happen in any order, depending on commitment tx. needle = l2.daemon.logsearch_start l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX', 'THEIR_REVOKED_UNILATERAL/DELAYED_OUTPUT_TO_THEM') l2.daemon.logsearch_start = needle l2.wait_for_onchaind_broadcast('OUR_PENALTY_TX', 'THEIR_REVOKED_UNILATERAL/OUR_HTLC') l2.daemon.logsearch_start = needle l2.daemon.wait_for_log('Ignoring output.*: THEIR_REVOKED_UNILATERAL/OUTPUT_TO_US') # FIXME: test HTLC tx race! # 100 blocks later, all resolved. bitcoind.generate_block(100) wait_for(lambda: len(l2.rpc.listpeers()['peers']) == 0) outputs = l2.rpc.listfunds()['outputs'] assert [o['status'] for o in outputs] == ['confirmed'] * 3 # Allow some lossage for fees. assert sum(o['value'] for o in outputs) < 10**6 assert sum(o['value'] for o in outputs) > 10**6 - 15000 @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_onchain_first_commit(node_factory, bitcoind): """Onchain handling where funder immediately drops to chain""" # HTLC 1->2, 1 fails just after funding. disconnects = ['+WIRE_FUNDING_LOCKED', 'permfail'] l1 = node_factory.get_node(disconnect=disconnects) # Make locktime different, as we once had them reversed! l2 = node_factory.get_node(options={'watchtime-blocks': 10}) l1.fundwallet(10**7) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.fundchannel(l2.info['id'], 10**6) l1.daemon.wait_for_log('sendrawtx exit 0') l1.bitcoin.generate_block(1) # l1 will drop to chain. l1.daemon.wait_for_log('permfail') l1.daemon.wait_for_log('sendrawtx exit 0') l1.bitcoin.generate_block(1) l1.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log(' to ONCHAIN') # 10 later, l1 should collect its to-self payment. bitcoind.generate_block(10) l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET', 'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US') # 94 later, l2 is done. bitcoind.generate_block(94) l2.daemon.wait_for_log('onchaind complete, forgetting peer') # Now, 100 blocks and l1 should be done. bitcoind.generate_block(6) l1.daemon.wait_for_log('onchaind complete, forgetting peer') @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_onchain_unwatch(node_factory, bitcoind): """Onchaind should not watch random spends""" l1, l2 = node_factory.line_graph(2) l1.pay(l2, 200000000) l1.rpc.dev_fail(l2.info['id']) l1.daemon.wait_for_log('Failing due to dev-fail command') l1.wait_for_channel_onchain(l2.info['id']) l1.bitcoin.generate_block(1) l1.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log(' to ONCHAIN') # 10 later, l1 should collect its to-self payment. bitcoind.generate_block(10) l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET', 'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US') # First time it sees it, onchaind cares. bitcoind.generate_block(1) l1.daemon.wait_for_log('Resolved OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by our proposal ' 'OUR_DELAYED_RETURN_TO_WALLET') # Now test unrelated onchain churn. # Daemon gets told about wallet; says it doesn't care. l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all') bitcoind.generate_block(1) l1.daemon.wait_for_log("but we don't care") # And lightningd should respect that! assert not l1.daemon.is_in_log("Can't unwatch txid") # So these should not generate further messages for i in range(5): l1.rpc.withdraw(l1.rpc.newaddr()['bech32'], 'all') bitcoind.generate_block(1) # Make sure it digests the block sync_blockheight(bitcoind, [l1]) # We won't see this again. assert not l1.daemon.is_in_log("but we don't care", start=l1.daemon.logsearch_start) # Note: for this test we leave onchaind running, so we can detect # any leaks! @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_onchaind_replay(node_factory, bitcoind): disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail'] options = {'watchtime-blocks': 201, 'cltv-delta': 101} # Feerates identical so we don't get gratuitous commit to update them l1 = node_factory.get_node(options=options, disconnect=disconnects, feerates=(7500, 7500, 7500)) l2 = node_factory.get_node(options=options) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 10**6) rhash = l2.rpc.invoice(10**8, 'onchaind_replay', 'desc')['payment_hash'] routestep = { 'msatoshi': 10**8 - 1, 'id': l2.info['id'], 'delay': 101, 'channel': '1x1x1' } l1.rpc.sendpay([routestep], rhash) l1.daemon.wait_for_log('sendrawtx exit 0') bitcoind.generate_block(1) # Wait for nodes to notice the failure, this seach needle is after the # DB commit so we're sure the tx entries in onchaindtxs have been added l1.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent") l2.daemon.wait_for_log("Deleting channel .* due to the funding outpoint being spent") # We should at least have the init tx now assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0 assert len(l2.db_query("SELECT * FROM channeltxs;")) > 0 # Generate some blocks so we restart the onchaind from DB (we rescan # last_height - 100) bitcoind.generate_block(100) sync_blockheight(bitcoind, [l1, l2]) # l1 should still have a running onchaind assert len(l1.db_query("SELECT * FROM channeltxs;")) > 0 l2.rpc.stop() l1.restart() # Can't wait for it, it's after the "Server started" wait in restart() assert l1.daemon.is_in_log(r'Restarting onchaind for channel') # l1 should still notice that the funding was spent and that we should react to it l1.daemon.wait_for_log("Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET") sync_blockheight(bitcoind, [l1]) bitcoind.generate_block(10) sync_blockheight(bitcoind, [l1]) @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_onchain_dust_out(node_factory, bitcoind, executor): """Onchain handling of outgoing dust htlcs (they should fail)""" # HTLC 1->2, 1 fails after it's irrevocably committed disconnects = ['@WIRE_REVOKE_AND_ACK', 'permfail'] # Feerates identical so we don't get gratuitous commit to update them l1 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500)) l2 = node_factory.get_node() l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 10**6) # Must be dust! rhash = l2.rpc.invoice(1, 'onchain_dust_out', 'desc')['payment_hash'] routestep = { 'msatoshi': 1, 'id': l2.info['id'], 'delay': 5, 'channel': '1x1x1' } l1.rpc.sendpay([routestep], rhash) payfuture = executor.submit(l1.rpc.waitsendpay, rhash) # l1 will drop to chain. l1.daemon.wait_for_log('permfail') l1.wait_for_channel_onchain(l2.info['id']) l1.bitcoin.generate_block(1) l1.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log(' to ONCHAIN') # We use 3 blocks for "reasonable depth" bitcoind.generate_block(3) # It should fail. with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: missing in commitment tx'): payfuture.result(5) # Retry payment, this should fail (and, as a side-effect, tickle a # bug). with pytest.raises(RpcError, match=r'WIRE_UNKNOWN_NEXT_PEER'): l1.rpc.sendpay([routestep], rhash) # 6 later, l1 should collect its to-self payment. bitcoind.generate_block(6) l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET', 'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US') # 94 later, l2 is done. bitcoind.generate_block(94) l2.daemon.wait_for_log('onchaind complete, forgetting peer') # Restart l1, it should not crash! l1.restart() # Now, 100 blocks and l1 should be done. bitcoind.generate_block(6) l1.daemon.wait_for_log('onchaind complete, forgetting peer') # Payment failed, BTW assert only_one(l2.rpc.listinvoices('onchain_dust_out')['invoices'])['status'] == 'unpaid' @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_onchain_timeout(node_factory, bitcoind, executor): """Onchain handling of outgoing failed htlcs""" # HTLC 1->2, 1 fails just after it's irrevocably committed disconnects = ['+WIRE_REVOKE_AND_ACK*3', 'permfail'] # Feerates identical so we don't get gratuitous commit to update them l1 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500)) l2 = node_factory.get_node() l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 10**6) rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash'] # We underpay, so it fails. routestep = { 'msatoshi': 10**8 - 1, 'id': l2.info['id'], 'delay': 5, 'channel': '1x1x1' } l1.rpc.sendpay([routestep], rhash) with pytest.raises(RpcError): l1.rpc.waitsendpay(rhash) # Make sure CLTVs are different, in case it confuses onchaind. bitcoind.generate_block(1) sync_blockheight(bitcoind, [l1]) # Second one will cause drop to chain. l1.rpc.sendpay([routestep], rhash) payfuture = executor.submit(l1.rpc.waitsendpay, rhash) # l1 will drop to chain. l1.daemon.wait_for_log('permfail') l1.wait_for_channel_onchain(l2.info['id']) l1.bitcoin.generate_block(1) l1.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log(' to ONCHAIN') # Wait for timeout. l1.daemon.wait_for_logs(['Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks', 'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX .* after 6 blocks']) bitcoind.generate_block(4) l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET', 'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US') bitcoind.generate_block(1) l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX', 'OUR_UNILATERAL/OUR_HTLC') # We use 3 blocks for "reasonable depth" bitcoind.generate_block(3) # It should fail. with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE: timed out'): payfuture.result(5) # 2 later, l1 spends HTLC (5 blocks total). bitcoind.generate_block(2) l1.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET', 'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US') # 89 later, l2 is done. bitcoind.generate_block(89) l2.daemon.wait_for_log('onchaind complete, forgetting peer') # Now, 100 blocks and l1 should be done. bitcoind.generate_block(10) sync_blockheight(bitcoind, [l1]) assert not l1.daemon.is_in_log('onchaind complete, forgetting peer') bitcoind.generate_block(1) l1.daemon.wait_for_log('onchaind complete, forgetting peer') # Payment failed, BTW assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid' @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_onchain_middleman(node_factory, bitcoind): # HTLC 1->2->3, 1->2 goes down after 2 gets preimage from 3. disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail'] l1 = node_factory.get_node() l2 = node_factory.get_node(disconnect=disconnects) l3 = node_factory.get_node() # l2 connects to both, so l1 can't reconnect and thus l2 drops to chain l2.rpc.connect(l1.info['id'], 'localhost', l1.port) l2.rpc.connect(l3.info['id'], 'localhost', l3.port) l2.fund_channel(l1, 10**6) c23 = l2.fund_channel(l3, 10**6) # Make sure routes finalized. bitcoind.generate_block(5) l1.wait_channel_active(c23) # Give l1 some money to play with. l2.pay(l1, 2 * 10**8) # Must be bigger than dust! rhash = l3.rpc.invoice(10**8, 'middleman', 'desc')['payment_hash'] route = l1.rpc.getroute(l3.info['id'], 10**8, 1)["route"] assert len(route) == 2 q = queue.Queue() def try_pay(): try: l1.rpc.sendpay(route, rhash) l1.rpc.waitsendpay(rhash) q.put(None) except Exception as err: q.put(err) t = threading.Thread(target=try_pay) t.daemon = True t.start() # l2 will drop to chain. l2.daemon.wait_for_log('sendrawtx exit 0') l1.bitcoin.generate_block(1) l2.daemon.wait_for_log(' to ONCHAIN') l1.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log('OUR_UNILATERAL/THEIR_HTLC') # l2 should fulfill HTLC onchain, and spend to-us (any order) l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX', 'OUR_UNILATERAL/THEIR_HTLC') # Payment should succeed. l1.bitcoin.generate_block(1) l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage') err = q.get(timeout=10) if err: print("Got err from sendpay thread") raise err t.join(timeout=1) assert not t.is_alive() # Three more, l2 can spend to-us. bitcoind.generate_block(3) l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET', 'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US') # One more block, HTLC tx is now spendable. l1.bitcoin.generate_block(1) l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET', 'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US') # 100 blocks after last spend, l2 should be done. l1.bitcoin.generate_block(100) l2.daemon.wait_for_log('onchaind complete, forgetting peer') @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_onchain_feechange(node_factory, bitcoind, executor): """Onchain handling when we restart with different fees""" # HTLC 1->2, 2 fails just after they're both irrevocably committed # We need 2 to drop to chain, because then 1's HTLC timeout tx # is generated on-the-fly, and is thus feerate sensitive. disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail'] l1 = node_factory.get_node(may_reconnect=True) l2 = node_factory.get_node(disconnect=disconnects, may_reconnect=True) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 10**6) rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash'] # We underpay, so it fails. routestep = { 'msatoshi': 10**8 - 1, 'id': l2.info['id'], 'delay': 5, 'channel': '1x1x1' } executor.submit(l1.rpc.sendpay, [routestep], rhash) # l2 will drop to chain. l2.daemon.wait_for_log('permfail') l2.wait_for_channel_onchain(l1.info['id']) bitcoind.generate_block(1) l1.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log(' to ONCHAIN') # Wait for timeout. l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US .* after 6 blocks') bitcoind.generate_block(6) l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US', 'THEIR_UNILATERAL/OUR_HTLC') # Make sure that gets included. bitcoind.generate_block(1) # Now we restart with different feerates. l1.stop() l1.daemon.cmd_line.append('--override-fee-rates=20000/9000/2000') l1.start() # We recognize different proposal as ours. l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US') # We use 3 blocks for "reasonable depth", so add two more bitcoind.generate_block(2) # Note that the very similar test_onchain_timeout looks for a # different string: that's because it sees the JSONRPC response, # and due to the l1 restart, there is none here. l1.daemon.wait_for_log('WIRE_PERMANENT_CHANNEL_FAILURE') # 90 later, l2 is done bitcoind.generate_block(89) sync_blockheight(bitcoind, [l2]) assert not l2.daemon.is_in_log('onchaind complete, forgetting peer') bitcoind.generate_block(1) l2.daemon.wait_for_log('onchaind complete, forgetting peer') # Now, 7 blocks and l1 should be done. bitcoind.generate_block(6) sync_blockheight(bitcoind, [l1]) assert not l1.daemon.is_in_log('onchaind complete, forgetting peer') bitcoind.generate_block(1) l1.daemon.wait_for_log('onchaind complete, forgetting peer') # Payment failed, BTW assert only_one(l2.rpc.listinvoices('onchain_timeout')['invoices'])['status'] == 'unpaid' @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev-set-fees") def test_onchain_all_dust(node_factory, bitcoind, executor): """Onchain handling when we reduce output to all dust""" # HTLC 1->2, 2 fails just after they're both irrevocably committed # We need 2 to drop to chain, because then 1's HTLC timeout tx # is generated on-the-fly, and is thus feerate sensitive. disconnects = ['-WIRE_UPDATE_FAIL_HTLC', 'permfail'] # Feerates identical so we don't get gratuitous commit to update them l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500)) l2 = node_factory.get_node(disconnect=disconnects) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 10**6) rhash = l2.rpc.invoice(10**8, 'onchain_timeout', 'desc')['payment_hash'] # We underpay, so it fails. routestep = { 'msatoshi': 10**7 - 1, 'id': l2.info['id'], 'delay': 5, 'channel': '1x1x1' } executor.submit(l1.rpc.sendpay, [routestep], rhash) # l2 will drop to chain. l2.daemon.wait_for_log('permfail') l2.wait_for_channel_onchain(l1.info['id']) # Make l1's fees really high (and wait for it to exceed 50000) l1.set_feerates((100000, 100000, 100000)) l1.daemon.wait_for_log('Feerate estimate for normal set to [56789][0-9]{4}') bitcoind.generate_block(1) l1.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log(' to ONCHAIN') # Wait for timeout. l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by IGNORING_TINY_PAYMENT .* after 6 blocks') bitcoind.generate_block(5) l1.wait_for_onchaind_broadcast('IGNORING_TINY_PAYMENT', 'THEIR_UNILATERAL/OUR_HTLC') l1.daemon.wait_for_log('Ignoring output 0 of .*: THEIR_UNILATERAL/OUR_HTLC') # 100 deep and l2 forgets. bitcoind.generate_block(93) sync_blockheight(bitcoind, [l1, l2]) assert not l2.daemon.is_in_log('onchaind complete, forgetting peer') assert not l1.daemon.is_in_log('onchaind complete, forgetting peer') bitcoind.generate_block(1) l2.daemon.wait_for_log('onchaind complete, forgetting peer') # l1 does not wait for ignored payment. l1.daemon.wait_for_log('onchaind complete, forgetting peer') @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_fail") def test_onchain_different_fees(node_factory, bitcoind, executor): """Onchain handling when we've had a range of fees""" l1, l2 = node_factory.line_graph(2, fundchannel=True, fundamount=10**7, opts={'may_reconnect': True}) l2.rpc.dev_ignore_htlcs(id=l1.info['id'], ignore=True) p1 = executor.submit(l1.pay, l2, 1000000000) l1.daemon.wait_for_log('htlc 0: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION') l1.set_feerates((16000, 7500, 3750)) p2 = executor.submit(l1.pay, l2, 900000000) l1.daemon.wait_for_log('htlc 1: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION') # Restart with different feerate for second HTLC. l1.set_feerates((5000, 5000, 3750)) l1.restart() l1.daemon.wait_for_log('peer_out WIRE_UPDATE_FEE') p3 = executor.submit(l1.pay, l2, 800000000) l1.daemon.wait_for_log('htlc 2: RCVD_ADD_ACK_COMMIT->SENT_ADD_ACK_REVOCATION') # Drop to chain l1.rpc.dev_fail(l2.info['id']) l1.wait_for_channel_onchain(l2.info['id']) bitcoind.generate_block(1) l1.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log(' to ONCHAIN') # Both sides should have correct feerate assert l1.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{ 'min_possible_feerate': 5000, 'max_possible_feerate': 16000 }] assert l2.db_query('SELECT min_possible_feerate, max_possible_feerate FROM channels;') == [{ 'min_possible_feerate': 5000, 'max_possible_feerate': 16000 }] bitcoind.generate_block(5) # Three HTLCs, and one for the to-us output. l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 4) # We use 3 blocks for "reasonable depth" bitcoind.generate_block(3) with pytest.raises(Exception): p1.result(10) with pytest.raises(Exception): p2.result(10) with pytest.raises(Exception): p3.result(10) # Two more for HTLC timeout tx to be spent. bitcoind.generate_block(2) l1.daemon.wait_for_logs(['sendrawtx exit 0'] * 3) # Now, 100 blocks it should be done. bitcoind.generate_block(100) wait_for(lambda: l1.rpc.listpeers()['peers'] == []) wait_for(lambda: l2.rpc.listpeers()['peers'] == []) @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_permfail_new_commit(node_factory, bitcoind, executor): # Test case where we have two possible commits: it will use new one. disconnects = ['-WIRE_REVOKE_AND_ACK', 'permfail'] # Feerates identical so we don't get gratuitous commit to update them l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500)) l2 = node_factory.get_node(disconnect=disconnects) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 10**6) # This will fail at l2's end. t = executor.submit(l1.pay, l2, 200000000) l2.daemon.wait_for_log('dev_disconnect permfail') l2.wait_for_channel_onchain(l1.info['id']) bitcoind.generate_block(1) l1.daemon.wait_for_log('Their unilateral tx, new commit point') l1.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks') l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks') # OK, time out HTLC. bitcoind.generate_block(5) l1.wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US', 'THEIR_UNILATERAL/OUR_HTLC') bitcoind.generate_block(1) l1.daemon.wait_for_log('Resolved THEIR_UNILATERAL/OUR_HTLC by our proposal OUR_HTLC_TIMEOUT_TO_US') l2.daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC') t.cancel() # Now, 100 blocks it should be done. bitcoind.generate_block(100) wait_for(lambda: l1.rpc.listpeers()['peers'] == []) wait_for(lambda: l2.rpc.listpeers()['peers'] == []) def setup_multihtlc_test(node_factory, bitcoind): # l1 -> l2 -> l3 -> l4 -> l5 -> l6 -> l7 # l1 and l7 ignore and HTLCs they're sent. # For each direction, we create these HTLCs with same payment_hash: # 1 failed (CLTV1) # 1 failed (CLTV2) # 2 live (CLTV2) # 1 live (CLTV3) nodes = node_factory.line_graph(7, wait_for_announce=True, opts={'dev-no-reconnect': None, 'may_reconnect': True}) # Balance by pushing half the funds. b11 = nodes[-1].rpc.invoice(10**9 // 2, '1', 'balancer')['bolt11'] nodes[0].rpc.pay(b11) nodes[0].rpc.dev_ignore_htlcs(id=nodes[1].info['id'], ignore=True) nodes[-1].rpc.dev_ignore_htlcs(id=nodes[-2].info['id'], ignore=True) preimage = "0" * 64 h = nodes[0].rpc.invoice(msatoshi=10**8, label='x', description='desc', preimage=preimage)['payment_hash'] nodes[-1].rpc.invoice(msatoshi=10**8, label='x', description='desc', preimage=preimage)['payment_hash'] # First, the failed attempts (paying wrong node). CLTV1 r = nodes[0].rpc.getroute(nodes[-2].info['id'], 10**8, 1)["route"] nodes[0].rpc.sendpay(r, h) with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'): nodes[0].rpc.waitsendpay(h) r = nodes[-1].rpc.getroute(nodes[1].info['id'], 10**8, 1)["route"] nodes[-1].rpc.sendpay(r, h) with pytest.raises(RpcError, match=r'INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'): nodes[-1].rpc.waitsendpay(h) # Now increment CLTV -> CLTV2 bitcoind.generate_block(1) sync_blockheight(bitcoind, nodes) # Now, the live attempts with CLTV2 (blackholed by end nodes) r = nodes[0].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"] nodes[0].rpc.sendpay(r, h) r = nodes[-1].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"] nodes[-1].rpc.sendpay(r, h) # We send second HTLC from different node, since they refuse to send # multiple with same hash. r = nodes[1].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"] nodes[1].rpc.sendpay(r, h) r = nodes[-2].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"] nodes[-2].rpc.sendpay(r, h) # Now increment CLTV -> CLTV3. bitcoind.generate_block(1) sync_blockheight(bitcoind, nodes) r = nodes[2].rpc.getroute(nodes[-1].info['id'], 10**8, 1)["route"] nodes[2].rpc.sendpay(r, h) r = nodes[-3].rpc.getroute(nodes[0].info['id'], 10**8, 1)["route"] nodes[-3].rpc.sendpay(r, h) # Make sure HTLCs have reached the end. nodes[0].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3) nodes[-1].daemon.wait_for_logs(['peer_in WIRE_UPDATE_ADD_HTLC'] * 3) return h, nodes @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs") @unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test") def test_onchain_multihtlc_our_unilateral(node_factory, bitcoind): """Node pushes a channel onchain with multiple HTLCs with same payment_hash """ h, nodes = setup_multihtlc_test(node_factory, bitcoind) mid = len(nodes) // 2 for i in range(len(nodes) - 1): assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected'] # Now midnode goes onchain with n+1 channel. nodes[mid].rpc.dev_fail(nodes[mid + 1].info['id']) nodes[mid].wait_for_channel_onchain(nodes[mid + 1].info['id']) bitcoind.generate_block(1) nodes[mid].daemon.wait_for_log(' to ONCHAIN') nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN') # Now, restart and manually reconnect end nodes (so they don't ignore HTLCs) # In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE. # TODO Remove our reliance on HTLCs failing on startup and the need for # this plugin nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py') nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py') nodes[0].restart() nodes[-1].restart() # We disabled auto-reconnect so we'd detect breakage, so manually reconnect. nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port) nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port) # Wait for HTLCs to stabilize. nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3) nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED') nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK') nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3) nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED') nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK') # After at depth 5, midnode will spend its own to-self output. bitcoind.generate_block(4) nodes[mid].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET', 'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US') # The three outgoing HTLCs time out at 21, 21 and 22 blocks. bitcoind.generate_block(16) nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX', 'OUR_UNILATERAL/OUR_HTLC') nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX', 'OUR_UNILATERAL/OUR_HTLC') bitcoind.generate_block(1) nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX', 'OUR_UNILATERAL/OUR_HTLC') # And three more for us to consider them all settled. bitcoind.generate_block(3) # Now, those nodes should have correctly failed the HTLCs for n in nodes[:mid - 1]: with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'): n.rpc.waitsendpay(h, TIMEOUT) # Other timeouts are 27,27,28 blocks. bitcoind.generate_block(2) nodes[mid].daemon.wait_for_logs(['Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC'] * 2) for _ in range(2): nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US', 'THEIR_UNILATERAL/OUR_HTLC') bitcoind.generate_block(1) nodes[mid].daemon.wait_for_log('Ignoring output.*: OUR_UNILATERAL/THEIR_HTLC') nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US', 'THEIR_UNILATERAL/OUR_HTLC') # Depth 3 to consider it settled. bitcoind.generate_block(3) for n in nodes[mid + 1:]: with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'): n.rpc.waitsendpay(h, TIMEOUT) # At depth 100 it's all done (we didn't bother waiting for mid+1's # spends, so that might still be going) bitcoind.generate_block(97) nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer']) # No other channels should have failed. for i in range(len(nodes) - 1): if i != mid: assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected'] @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1 for dev_ignore_htlcs") @unittest.skipIf(SLOW_MACHINE and VALGRIND, "slow test") def test_onchain_multihtlc_their_unilateral(node_factory, bitcoind): """Node pushes a channel onchain with multiple HTLCs with same payment_hash """ h, nodes = setup_multihtlc_test(node_factory, bitcoind) mid = len(nodes) // 2 for i in range(len(nodes) - 1): assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected'] # Now midnode+1 goes onchain with midnode channel. nodes[mid + 1].rpc.dev_fail(nodes[mid].info['id']) nodes[mid + 1].wait_for_channel_onchain(nodes[mid].info['id']) bitcoind.generate_block(1) nodes[mid].daemon.wait_for_log(' to ONCHAIN') nodes[mid + 1].daemon.wait_for_log(' to ONCHAIN') # Now, restart and manually reconnect end nodes (so they don't ignore HTLCs) # In fact, they'll fail them with WIRE_TEMPORARY_NODE_FAILURE. # TODO Remove our reliance on HTLCs failing on startup and the need for # this plugin nodes[0].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py') nodes[-1].daemon.opts['plugin'] = os.path.join(os.getcwd(), 'tests/plugins/fail_htlcs.py') nodes[0].restart() nodes[-1].restart() # We disabled auto-reconnect so we'd detect breakage, so manually reconnect. nodes[0].rpc.connect(nodes[1].info['id'], 'localhost', nodes[1].port) nodes[-1].rpc.connect(nodes[-2].info['id'], 'localhost', nodes[-2].port) # Wait for HTLCs to stabilize. nodes[0].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3) nodes[0].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED') nodes[0].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK') nodes[-1].daemon.wait_for_logs(['peer_out WIRE_UPDATE_FAIL_HTLC'] * 3) nodes[-1].daemon.wait_for_log('peer_out WIRE_COMMITMENT_SIGNED') nodes[-1].daemon.wait_for_log('peer_out WIRE_REVOKE_AND_ACK') # At depth 5, midnode+1 will spend its own to-self output. bitcoind.generate_block(4) nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET') # The three outgoing HTLCs time out at depth 21, 21 and 22 blocks. bitcoind.generate_block(16) nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US', 'THEIR_UNILATERAL/OUR_HTLC') nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US', 'THEIR_UNILATERAL/OUR_HTLC') bitcoind.generate_block(1) nodes[mid].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TO_US', 'THEIR_UNILATERAL/OUR_HTLC') # At depth 3 we consider them all settled. bitcoind.generate_block(3) # Now, those nodes should have correctly failed the HTLCs for n in nodes[:mid - 1]: with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'): n.rpc.waitsendpay(h, TIMEOUT) # Other timeouts are at depths 27,27,28 blocks. bitcoind.generate_block(2) nodes[mid].daemon.wait_for_logs(['Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC'] * 2) for _ in range(2): nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX', 'OUR_UNILATERAL/OUR_HTLC') bitcoind.generate_block(1) nodes[mid].daemon.wait_for_log('Ignoring output.*: THEIR_UNILATERAL/THEIR_HTLC') nodes[mid + 1].wait_for_onchaind_broadcast('OUR_HTLC_TIMEOUT_TX', 'OUR_UNILATERAL/OUR_HTLC') # At depth 3 we consider them all settled. bitcoind.generate_block(3) for n in nodes[mid + 1:]: with pytest.raises(RpcError, match=r'WIRE_PERMANENT_CHANNEL_FAILURE'): n.rpc.waitsendpay(h, TIMEOUT) # At depth 5, mid+1 can spend HTLC_TIMEOUT_TX output. bitcoind.generate_block(1) for _ in range(2): nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET', 'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US') bitcoind.generate_block(1) nodes[mid + 1].wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET', 'OUR_HTLC_TIMEOUT_TX/DELAYED_OUTPUT_TO_US') # At depth 100 they're all done. bitcoind.generate_block(100) nodes[mid].daemon.wait_for_logs(['onchaind complete, forgetting peer']) nodes[mid + 1].daemon.wait_for_logs(['onchaind complete, forgetting peer']) # No other channels should have failed. for i in range(len(nodes) - 1): if i != mid: assert only_one(nodes[i].rpc.listpeers(nodes[i + 1].info['id'])['peers'])['connected'] @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_permfail_htlc_in(node_factory, bitcoind, executor): # Test case where we fail with unsettled incoming HTLC. disconnects = ['-WIRE_UPDATE_FULFILL_HTLC', 'permfail'] # Feerates identical so we don't get gratuitous commit to update them l1 = node_factory.get_node(options={'dev-no-reconnect': None}, feerates=(7500, 7500, 7500)) l2 = node_factory.get_node(disconnect=disconnects) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 10**6) # This will fail at l2's end. t = executor.submit(l1.pay, l2, 200000000) l2.daemon.wait_for_log('dev_disconnect permfail') l2.wait_for_channel_onchain(l1.info['id']) bitcoind.generate_block(1) l1.daemon.wait_for_log('Their unilateral tx, old commit point') l1.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks') l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TO_US (.*) after 6 blocks') # l2 then gets preimage, uses it instead of ignoring l2.wait_for_onchaind_broadcast('OUR_HTLC_SUCCESS_TX', 'OUR_UNILATERAL/THEIR_HTLC') bitcoind.generate_block(1) # OK, l1 sees l2 fulfill htlc. l1.daemon.wait_for_log('THEIR_UNILATERAL/OUR_HTLC gave us preimage') l2.daemon.wait_for_log('Propose handling OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks') bitcoind.generate_block(5) l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET', 'OUR_HTLC_SUCCESS_TX/DELAYED_OUTPUT_TO_US') t.cancel() # Now, 100 blocks it should be done. bitcoind.generate_block(95) l1.daemon.wait_for_log('onchaind complete, forgetting peer') assert not l2.daemon.is_in_log('onchaind complete, forgetting peer') bitcoind.generate_block(5) l2.daemon.wait_for_log('onchaind complete, forgetting peer') @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_permfail_htlc_out(node_factory, bitcoind, executor): # Test case where we fail with unsettled outgoing HTLC. disconnects = ['+WIRE_REVOKE_AND_ACK', 'permfail'] l1 = node_factory.get_node(options={'dev-no-reconnect': None}) # Feerates identical so we don't get gratuitous commit to update them l2 = node_factory.get_node(disconnect=disconnects, feerates=(7500, 7500, 7500)) l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l2.daemon.wait_for_log('openingd-{} chan #1: Handed peer, entering loop'.format(l1.info['id'])) l2.fund_channel(l1, 10**6) # This will fail at l2's end. t = executor.submit(l2.pay, l1, 200000000) l2.daemon.wait_for_log('dev_disconnect permfail') l2.wait_for_channel_onchain(l1.info['id']) bitcoind.generate_block(1) l1.daemon.wait_for_log('Their unilateral tx, old commit point') l1.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_logs([ 'Propose handling OUR_UNILATERAL/OUR_HTLC by OUR_HTLC_TIMEOUT_TX \\(.*\\) after 6 blocks', 'Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET .* after 5 blocks' ]) l1.daemon.wait_for_log('Propose handling THEIR_UNILATERAL/THEIR_HTLC by THEIR_HTLC_TIMEOUT_TO_THEM \\(IGNORING\\) after 6 blocks') # l1 then gets preimage, uses it instead of ignoring l1.wait_for_onchaind_broadcast('THEIR_HTLC_FULFILL_TO_US', 'THEIR_UNILATERAL/THEIR_HTLC') # l2 sees l1 fulfill tx. bitcoind.generate_block(1) l2.daemon.wait_for_log('OUR_UNILATERAL/OUR_HTLC gave us preimage') t.cancel() # l2 can send OUR_DELAYED_RETURN_TO_WALLET after 3 more blocks. bitcoind.generate_block(3) l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET', 'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US') # Now, 100 blocks they should be done. bitcoind.generate_block(95) sync_blockheight(bitcoind, [l1, l2]) assert not l1.daemon.is_in_log('onchaind complete, forgetting peer') assert not l2.daemon.is_in_log('onchaind complete, forgetting peer') bitcoind.generate_block(1) l1.daemon.wait_for_log('onchaind complete, forgetting peer') sync_blockheight(bitcoind, [l2]) assert not l2.daemon.is_in_log('onchaind complete, forgetting peer') bitcoind.generate_block(3) sync_blockheight(bitcoind, [l2]) assert not l2.daemon.is_in_log('onchaind complete, forgetting peer') bitcoind.generate_block(1) wait_for(lambda: l2.rpc.listpeers()['peers'] == []) @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_permfail(node_factory, bitcoind): l1, l2 = node_factory.line_graph(2) # The funding change should be confirmed and our only output assert [o['status'] for o in l1.rpc.listfunds()['outputs']] == ['confirmed'] l1.pay(l2, 200000000) # Make sure l2 has received sig with 0 htlcs! l2.daemon.wait_for_log('Received commit_sig with 1 htlc sigs') l2.daemon.wait_for_log('Received commit_sig with 0 htlc sigs') # Make sure l1 has final revocation. l1.daemon.wait_for_log('Sending commit_sig with 1 htlc sigs') l1.daemon.wait_for_log('Sending commit_sig with 0 htlc sigs') l1.daemon.wait_for_log('peer_in WIRE_REVOKE_AND_ACK') # We fail l2, so l1 will reconnect to it. l2.rpc.dev_fail(l1.info['id']) l2.daemon.wait_for_log('Failing due to dev-fail command') l2.wait_for_channel_onchain(l1.info['id']) assert l1.bitcoin.rpc.getmempoolinfo()['size'] == 1 # Now grab the close transaction closetxid = only_one(l1.bitcoin.rpc.getrawmempool(False)) # l2 will send out tx (l1 considers it a transient error) bitcoind.generate_block(1) l1.daemon.wait_for_log('Their unilateral tx, old commit point') l1.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log(' to ONCHAIN') l2.daemon.wait_for_log('Propose handling OUR_UNILATERAL/DELAYED_OUTPUT_TO_US by OUR_DELAYED_RETURN_TO_WALLET (.*) after 5 blocks') wait_for(lambda: only_one(l1.rpc.listpeers(l2.info['id'])['peers'][0]['channels'])['status'] == ['ONCHAIN:Tracking their unilateral close', 'ONCHAIN:All outputs resolved: waiting 99 more blocks before forgetting channel']) def check_billboard(): billboard = only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] return ( len(billboard) == 2 and billboard[0] == 'ONCHAIN:Tracking our own unilateral close' and re.fullmatch(r'ONCHAIN:.* outputs unresolved: in 4 blocks will spend DELAYED_OUTPUT_TO_US \(.*:0\) using OUR_DELAYED_RETURN_TO_WALLET', billboard[1]) ) wait_for(check_billboard) # Now, mine 4 blocks so it sends out the spending tx. bitcoind.generate_block(4) # onchaind notes to-local payment immediately. assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]) # Restart, should still be confirmed (fails: unwinding blocks erases # the confirmation, and we don't re-make it). l1.restart() wait_for(lambda: (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']])) # It should send the to-wallet tx. l2.wait_for_onchaind_broadcast('OUR_DELAYED_RETURN_TO_WALLET', 'OUR_UNILATERAL/DELAYED_OUTPUT_TO_US') # 100 after l1 sees tx, it should be done. bitcoind.generate_block(95) wait_for(lambda: l1.rpc.listpeers()['peers'] == []) wait_for(lambda: only_one(l2.rpc.listpeers(l1.info['id'])['peers'][0]['channels'])['status'] == [ 'ONCHAIN:Tracking our own unilateral close', 'ONCHAIN:All outputs resolved: waiting 5 more blocks before forgetting channel' ]) # Now, 100 blocks l2 should be done. bitcoind.generate_block(5) wait_for(lambda: l2.rpc.listpeers()['peers'] == []) # Only l1 has a direct output since all of l2's outputs are respent (it # failed). Also the output should now be listed as confirmed since we # generated some more blocks. assert (closetxid, "confirmed") in set([(o['txid'], o['status']) for o in l1.rpc.listfunds()['outputs']]) # Check that the all the addresses match what we generated ourselves: for o in l1.rpc.listfunds()['outputs']: txout = bitcoind.rpc.gettxout(o['txid'], o['output']) addr = txout['scriptPubKey']['addresses'][0] assert(addr == o['address']) addr = l1.bitcoin.rpc.getnewaddress() l1.rpc.withdraw(addr, "all") @unittest.skipIf(not DEVELOPER, "needs DEVELOPER=1") def test_shutdown(node_factory): # Fail, in that it will exit before cleanup. l1 = node_factory.get_node(may_fail=True) if not VALGRIND: leaks = l1.rpc.dev_memleak()['leaks'] if len(leaks): raise Exception("Node {} has memory leaks: {}" .format(l1.daemon.lightning_dir, leaks)) l1.rpc.stop() @flaky @unittest.skipIf(not DEVELOPER, "needs to set upfront_shutdown_script") def test_option_upfront_shutdown_script(node_factory, bitcoind): l1 = node_factory.get_node(start=False) # Insist on upfront script we're not going to match. l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = "76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac" l1.start() l2 = node_factory.get_node() l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 1000000, False) l1.rpc.close(l2.info['id']) # l2 will close unilaterally when it dislikes shutdown script. l1.daemon.wait_for_log(r'received ERROR.*scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)') # Clear channel. wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0) bitcoind.generate_block(1) wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN']) wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN']) # Works when l2 closes channel, too. l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.fund_channel(l2, 1000000, False) l2.rpc.close(l1.info['id']) # l2 will close unilaterally when it dislikes shutdown script. l1.daemon.wait_for_log(r'received ERROR.*scriptpubkey .* is not as agreed upfront \(76a91404b61f7dc1ea0dc99424464cc4064dc564d91e8988ac\)') # Clear channel. wait_for(lambda: len(bitcoind.rpc.getrawmempool()) != 0) bitcoind.generate_block(1) wait_for(lambda: [c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN']) wait_for(lambda: [c['state'] for c in only_one(l2.rpc.listpeers()['peers'])['channels']] == ['ONCHAIN', 'ONCHAIN']) # Figure out what address it will try to use. keyidx = int(l1.db_query("SELECT val FROM vars WHERE name='bip32_max_index';")[0]['val']) # Expect 1 for change address, 1 for the channel final address, # which are discarded as the 'scratch' tx that the fundchannel # plugin makes, plus 1 for the funding address of the actual # funding tx. addr = l1.rpc.call('dev-listaddrs', [keyidx + 3])['addresses'][-1] # Now, if we specify upfront and it's OK, all good. l1.stop() # We need to prepend the segwit version (0) and push opcode (14). l1.daemon.env["DEV_OPENINGD_UPFRONT_SHUTDOWN_SCRIPT"] = '0014' + addr['bech32_redeemscript'] l1.start() l1.rpc.connect(l2.info['id'], 'localhost', l2.port) l1.rpc.fundchannel(l2.info['id'], 1000000) l1.rpc.close(l2.info['id']) wait_for(lambda: sorted([c['state'] for c in only_one(l1.rpc.listpeers()['peers'])['channels']]) == ['CLOSINGD_COMPLETE', 'ONCHAIN', 'ONCHAIN'])
reader.py
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import core import sys import six import numpy as np import threading import paddle from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, in_dygraph_mode, cpu_places from .executor import global_scope from .data_feeder import DataFeeder, BatchedTensorProvider from .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer from .unique_name import UniqueNameGenerator import logging from .dataset import DatasetBase, InMemoryDataset ### Dygraph DataLoader configs ### import multiprocessing import signal # NOTE: queue has a different name in python2 and python3 if sys.version_info[0] == 2: import Queue as queue else: import queue # NOTE: [ avoid hanging ] This value is used in getting data from another process MP_CHECK_TIMEOUT = 10 __all__ = ['PyReader', 'DataLoader'] data_loader_unique_name_generator = UniqueNameGenerator() def _convert_places(places): if not isinstance(places, (list, tuple)): places = [places] ret = [] for p in places: if not isinstance(p, core.Place): tmp = core.Place() tmp.set_place(p) p = tmp ret.append(p) return ret class DataLoaderBase(object): def __init__(self): self._places = None def __call__(self): return self def next(self): ''' Get the next item in the DataLoader object. This method should not be called by users directly. It is used for implementing iterator protocol of Python 2.x inside PaddlePaddle framework. ''' return self.__next__() def __iter__(self): raise NotImplementedError() def __next__(self): raise NotImplementedError() class DataLoader(object): @staticmethod def from_generator(feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=False, use_multiprocess=False): """ Create a DataLoader object for loading data from Python generator. Data would be prefetched using Python thread and be pushed into a queue asynchronously. The created DataLoader object provides 3 methods to set the data source :code:`set_sample_generator` , :code:`set_sample_list_generator` and :code:`set_batch_generator` . Please see the following example codes to know their usages. If iterable = True, the created DataLoader object is a Python generator object, which is iterable using for-range loop. If iterable = False, the created DataLoader object provides :code:`start()` and :code:`reset()` method to control the data reading process. This mode is designed to be compatible with the :code:`fluid.layers.py_reader` interface. Users can migrate the codes from :code:`fluid.layers.py_reader` to :code:`fluid.io.DataLoader` easily when using iterable=False. Args: feed_list (list(Variable)|tuple(Variable)): feed variable list. The variables should be created by :code:`fluid.data()`. capacity (int): capacity of the queue maintained in DataLoader. The unit is batch number. Set larger capacity if your reader is fast. use_double_buffer (bool): whether to use double_buffer_reader. If use_double_buffer=True, the DataLoader would prefetch next batch data asynchronously, so it would speed up data feeding and occupies a little more CPU or GPU memory, i.e., the memory of one batch input data. iterable (bool): whether the created DataLoader is iterable. return_list (bool): whether the return value on each device is presented as a list. It is only valid when iterable=True. If return_list=False, the return value on each device would be a dict of str -> LoDTensor, where the key of the dict is the name of each feeded variables. If return_list=True, the return value on each device would be a list(LoDTensor). It is recommended to use return_list=False in static graph mode and use return_list=True in dygraph mode. use_multiprocess (bool): whether to use multi-process to speed up the data loading process in dygraph. Note: this parameter only can be used in the dygraph mode. In the static graph mode, whether this parameter is set or not has no effect. The Default value is False. Returns: loader (DataLoader): the created DataLoader object. Examples: .. code-block:: python import paddle.fluid as fluid import numpy as np BATCH_NUM = 10 BATCH_SIZE = 16 EPOCH_NUM = 4 CLASS_NUM = 10 ITERABLE = True # whether the created DataLoader object is iterable USE_GPU = False # whether to use GPU DATA_FORMAT = 'batch_generator' # data format of data source user provides def simple_net(image, label): fc_tmp = fluid.layers.fc(image, size=CLASS_NUM) cross_entropy = fluid.layers.softmax_with_cross_entropy(image, label) loss = fluid.layers.reduce_mean(cross_entropy) sgd = fluid.optimizer.SGD(learning_rate=1e-3) sgd.minimize(loss) return loss def get_random_images_and_labels(image_shape, label_shape): image = np.random.random(size=image_shape).astype('float32') label = np.random.random(size=label_shape).astype('int64') return image, label # If the data generator yields one sample each time, # use DataLoader.set_sample_generator to set the data source. def sample_generator_creator(): def __reader__(): for _ in range(BATCH_NUM * BATCH_SIZE): image, label = get_random_images_and_labels([784], [1]) yield image, label return __reader__ # If the data generator yield list of samples each time, # use DataLoader.set_sample_list_generator to set the data source. def sample_list_generator_creator(): def __reader__(): for _ in range(BATCH_NUM): sample_list = [] for _ in range(BATCH_SIZE): image, label = get_random_images_and_labels([784], [1]) sample_list.append([image, label]) yield sample_list return __reader__ # If the data generator yields a batch each time, # use DataLoader.set_batch_generator to set the data source. def batch_generator_creator(): def __reader__(): for _ in range(BATCH_NUM): batch_image, batch_label = get_random_images_and_labels([BATCH_SIZE, 784], [BATCH_SIZE, 1]) yield batch_image, batch_label return __reader__ # If DataLoader is iterable, use for loop to train the network def train_iterable(exe, prog, loss, loader): for _ in range(EPOCH_NUM): for data in loader(): exe.run(prog, feed=data, fetch_list=[loss]) # If DataLoader is not iterable, use start() and reset() method to control the process def train_non_iterable(exe, prog, loss, loader): for _ in range(EPOCH_NUM): loader.start() # call DataLoader.start() before each epoch starts try: while True: exe.run(prog, fetch_list=[loss]) except fluid.core.EOFException: loader.reset() # call DataLoader.reset() after catching EOFException def set_data_source(loader, places): if DATA_FORMAT == 'sample_generator': loader.set_sample_generator(sample_generator_creator(), batch_size=BATCH_SIZE, drop_last=True, places=places) elif DATA_FORMAT == 'sample_list_generator': loader.set_sample_list_generator(sample_list_generator_creator(), places=places) elif DATA_FORMAT == 'batch_generator': loader.set_batch_generator(batch_generator_creator(), places=places) else: raise ValueError('Unsupported data format') image = fluid.data(name='image', shape=[None, 784], dtype='float32') label = fluid.data(name='label', shape=[None, 1], dtype='int64') # Define DataLoader loader = fluid.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE) # Define network loss = simple_net(image, label) # Set data source of DataLoader # # If DataLoader is iterable, places must be given and the number of places must be the same with device number. # - If you are using GPU, call `fluid.cuda_places()` to get all GPU places. # - If you are using CPU, call `fluid.cpu_places()` to get all CPU places. # # If DataLoader is not iterable, places can be None. places = fluid.cuda_places() if USE_GPU else fluid.cpu_places() set_data_source(loader, places) exe = fluid.Executor(places[0]) exe.run(fluid.default_startup_program()) prog = fluid.CompiledProgram(fluid.default_main_program()).with_data_parallel(loss_name=loss.name) if loader.iterable: train_iterable(exe, prog, loss, loader) else: train_non_iterable(exe, prog, loss, loader) ''' Users can use return_list = True in dygraph mode. ''' with fluid.dygraph.guard(places[0]): loader = fluid.io.DataLoader.from_generator(capacity=2, return_list=True) set_data_source(loader, places[0]) for image, label in loader(): relu = fluid.layers.relu(image) assert image.shape == [BATCH_SIZE, 784] assert label.shape == [BATCH_SIZE, 1] assert relu.shape == [BATCH_SIZE, 784] """ if in_dygraph_mode(): return DygraphGeneratorLoader(feed_list, capacity, use_double_buffer, iterable, return_list, use_multiprocess) else: return GeneratorLoader(feed_list, capacity, use_double_buffer, iterable, return_list) @staticmethod def from_dataset(dataset, places, drop_last=True): """ Create an iterable DataLoader object for loading data from Dataset. Dataset is only supported in Linux system currently. Args: dataset (InMemoryDataset|QueueDataset): the dataset object. places (list(CUDAPlace)|list(CPUPlace)): places where the result data should be converted. drop_last (bool): whether to drop the last batch whose sample number is less than batch size. If drop_last = True, they would be dropped. If drop_last = False, they would be kept. Returns: loader (DataLoader): the created DataLoader object, which can be treated as a Python generator. Examples: .. code-block:: python import paddle.fluid as fluid image = fluid.data(name='image', shape=[None, 784], dtype='float32') label = fluid.data(name='label', shape=[None, 1], dtype='int64') dataset = fluid.DatasetFactory().create_dataset("QueueDataset") dataset.set_batch_size(32) dataset.set_filelist(['a.txt', 'b.txt', 'c.txt']) dataset.set_use_var([image, label]) dataset.set_pipe_command('cat') loader = fluid.io.DataLoader.from_dataset(dataset, fluid.cpu_places()) """ return DatasetLoader(dataset, places, drop_last) class DygraphGeneratorLoader(DataLoaderBase): """ The GeneratorLoader of dygraph The multiprocess dygraph GeneratorLoader's most functions are different from static graph GeneratorLoader, Separate implementation to keep code readable. """ def __init__(self, feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=True, use_multiprocess=False): self._batch_reader = None self._places = None self._feed_list = feed_list if not capacity: raise ValueError("Please give value to capacity.") self._capacity = capacity self._use_double_buffer = use_double_buffer if not iterable: logging.warning( "Please NOTE: dygraph can support iterable mode only. Change to iterable mode." ) self._iterable = True if not return_list: logging.warning( "Please NOTE: dygraph can support return as list only. Change to return as list." ) self._return_list = True # NOTE: the multiprocessing in different platform is incompatible, we will solve it later self._use_multiprocess = use_multiprocess if self._use_multiprocess and (sys.platform == 'darwin' or sys.platform == 'win32'): logging.warning( "NOTE: The multiprocess mode does not currently support MacOs and Windows." ) self._use_multiprocess = False if self._use_multiprocess: # NOTE: the multiprocessing.Queue used to save loading data in self._process self._data_queue = None # NOTE: this process is used to load data asynchronously from self._batch_reader self._process = None # NOTE: the C++ LoDTensorBlockingQueue instance self._blocking_queue = None # NOTE: 1. In multiprocess mode, this thread is used to get next batch data from # self._data_queue, then push it into self._blocking_queue; 2. In singleprocess # mode, this thread is used to get next batch data from self._batch_reader, then # push it into self._blocking_queue self._thread = None @property def queue(self): return self._blocking_queue @property def iterable(self): return self._iterable def _wait_thread_ends(self): thread = self._thread if thread is not None: self._blocking_queue.close() thread.join() def _wait_process_ends(self): process = self._process if process is not None: self._data_queue.cancel_join_thread() self._data_queue.close() process.join() # erase process id core._erase_process_pid(id(self)) def _init_iterable(self): self._wait_thread_ends() if self._use_multiprocess: self._wait_process_ends() self._var_names = [] self._shapes = [] self._dtypes = [] self._need_check_feed = [] self._blocking_queue = core.init_lod_tensor_blocking_queue( core.Variable(), self._capacity) self._reader = core.create_py_reader( self.queue, self._var_names, self._shapes, self._dtypes, self._need_check_feed, self._places, self._use_double_buffer) def _start(self): if self._use_multiprocess: # Set data_queue and process self._data_queue = multiprocessing.Queue(self._capacity) self._process = multiprocessing.Process( target=self._reader_process_loop) self._process.daemon = True self._process.start() # Set child process signal handler # NOTE: [ avoiding hang ] 1. if the child process dies due to bus error/segfault # or just hang, the main process will hang waiting for data, so here need to deal # with SIGSEGV and SIGBUS of child process; 2. if the main process end before child # process, it shuts the all its daemonic children down with a SIGTERM (instead of # joining them without a timeout), so here nedd to deal with SIGTERM. self._set_child_signal_handler() # Set reader_thread self._thread_done_event = threading.Event() self._thread = threading.Thread( target=self._reader_thread_loop_with_process) self._thread.daemon = True self._thread.start() else: self._thread = threading.Thread(target=self._reader_thread_loop) self._thread.daemon = True self._thread.start() def _reset(self): self._reader.reset() self._wait_thread_ends() if self._use_multiprocess: self._wait_process_ends() def __iter__(self): assert self.iterable, "DataLoader is not iterable" assert self._batch_reader is not None, \ "Data source of DataLoader has not set yet" self._init_iterable() self._start() return self def __next__(self): try: return self._reader.read_next_var_list() except StopIteration: self._reset() six.reraise(*sys.exc_info()) @classmethod def _check_input_array(cls, item): arr = np.array(item) if arr.dtype == np.object: raise TypeError( "\n\tFaild to convert input data to a regular ndarray :\n\t* Usually " "this means the input data contains nested lists with different lengths. " "\n\t* Check the reader function passed to 'decorate_batch_generator'" " to locate the data causes this issue.\n\t* Please consider using " "'fluid.create_lod_tensor' to convert it to a LoD-Tensor.") def _set_child_signal_handler(self): core._set_process_pid(id(self), self._process.pid) current_handler = signal.getsignal(signal.SIGCHLD) if not callable(current_handler): current_handler = None def __handler__(signum, frame): core._throw_error_if_process_failed() if current_handler is not None: current_handler(signum, frame) signal.signal(signal.SIGCHLD, __handler__) def _reader_process_loop(self): try: # set signal handler core._set_process_signal_handler() for sample in self._batch_reader(): if sample is None: raise ValueError( "Sample in reader is None. Please check whether your dataset is valid." ) self._data_queue.put(sample) self._data_queue.put(None) except KeyboardInterrupt: # NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process pass except: self._data_queue.cancel_join_thread() self._data_queue.close() six.reraise(*sys.exc_info()) def _reader_thread_loop_with_process(self): while not self._thread_done_event.is_set(): try: # NOTE: [ avoid hanging ] Even with carefully designed data dependencies # (i.e., a put() always corresponding to a get()), hanging on get() can # still happen when data in queue is corrupted (e.g., due to # Queue.cancel_join_thread or unexpected exit). So we set a timeout whenever # we try to get data from `data_queue` sample = self._data_queue.get(timeout=MP_CHECK_TIMEOUT) except queue.Empty: self._thread_done_event.set() logging.error("The reader has not read data for a long time.") if not self._thread_done_event.is_set(): if sample is not None: try: array = core.LoDTensorArray() for item in sample: if not isinstance(item, core.LoDTensor): self._check_input_array(item) tmp = core.LoDTensor() tmp.set(item, core.CPUPlace()) item = tmp array.append(item) if not self._blocking_queue.push(array): self._blocking_queue.close() except: self._thread_done_event.set() self._blocking_queue.kill() self._data_queue.close() logging.warning( "DygraphDataLoader reader thread raised an exception." ) six.reraise(*sys.exc_info()) else: self._thread_done_event.set() self._blocking_queue.close() self._data_queue.close() else: self._blocking_queue.kill() self._data_queue.close() def _reader_thread_loop(self): try: for sample in self._batch_reader(): array = core.LoDTensorArray() for item in sample: if not isinstance(item, core.LoDTensor): self._check_input_array(item) tmp = core.LoDTensor() tmp.set(item, core.CPUPlace()) item = tmp array.append(item) if not self._blocking_queue.push(array): break self._blocking_queue.close() self._thread = None except Exception: self._blocking_queue.kill() self._thread = None logging.warning( "DygraphDataLoader reader thread raised an exception.") six.reraise(*sys.exc_info()) def set_sample_generator(self, reader, batch_size, drop_last=True, places=None): assert batch_size > 0, "batch_size must be larger than 0" self.set_sample_list_generator( paddle.batch( reader, batch_size=batch_size, drop_last=drop_last), places=places) return self def set_sample_list_generator(self, reader, places=None): def __batch_reader_impl__(): for batch in reader(): slots = [] for items in batch: for i, item in enumerate(items): if len(slots) < len(items): slots.append([item]) else: slots[i].append(item) yield slots self.set_batch_generator(__batch_reader_impl__, places) return self def set_batch_generator(self, reader, places=None): self._batch_reader = reader assert places is not None, "Places cannot be None when DataLoader is iterable" self._places = _convert_places(places) assert len(self._places) == 1, \ "Number of places must be 1 in dygraph mode" return self class GeneratorLoader(DataLoaderBase): def __init__(self, feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=False): self._tensor_reader = None self._places = None self._thread = None self._queue = None self._feed_list = feed_list if not capacity: raise ValueError("Please give value to capacity.") self._iterable = iterable self._return_list = return_list if not self._feed_list: raise Exception("Feed list must be given under static mode.") self._use_double_buffer = use_double_buffer self._capacity = capacity if not self._iterable: self._init_non_iterable() def _wait_thread_ends(self): # Get self._thread first to prevent data race, because __thread_main__ # would set self._thread be None at the end thread = self._thread if thread is not None and self._iterable: self._queue.close() thread.join() def _init_iterable(self): self._wait_thread_ends() self._var_names = [v.name for v in self._feed_list] self._shapes = [v.shape for v in self._feed_list] self._dtypes = [v.dtype for v in self._feed_list] self._need_check_feed = [ v.desc.need_check_feed() for v in self._feed_list ] self._queue = core.init_lod_tensor_blocking_queue(core.Variable(), self._capacity) self._reader = core.create_py_reader( self.queue, self._var_names, self._shapes, self._dtypes, self._need_check_feed, self._places, self._use_double_buffer) def _init_non_iterable(self): lod_levels = [] dtypes = [] shape_concat = [] ranks = [] shapes = [] need_check_feed = [] for feed_data in self._feed_list: dtypes.append(feed_data.dtype) shape_concat.extend(feed_data.shape) ranks.append(len(feed_data.shape)) shapes.append(feed_data.shape) lod_levels.append(feed_data.lod_level) need_check_feed.append(int(feed_data.desc.need_check_feed())) queue_name = data_loader_unique_name_generator( 'lod_tensor_blocking_queue') reader_name = data_loader_unique_name_generator('create_py_reader') double_buffer_name = data_loader_unique_name_generator('double_buffer') var = global_scope().var(queue_name) self._queue = core.init_lod_tensor_blocking_queue(var, self._capacity) startup_blk = default_startup_program().current_block() startup_var = startup_blk.create_var(name=reader_name) dtype_int = [int(t) for t in dtypes] startup_blk.append_op( type='create_py_reader', inputs={'blocking_queue': [queue_name]}, outputs={'Out': [startup_var]}, attrs={ 'shape_concat': shape_concat, 'lod_levels': lod_levels, 'dtypes': dtype_int, 'need_check_feed': need_check_feed, 'ranks': ranks }) startup_var.desc.set_dtypes(dtypes) startup_var.persistable = True main_prog_var = _copy_reader_var_( default_main_program().current_block(), startup_var) main_prog_var.stop_gradient = True main_prog_var.persistable = True reader = monkey_patch_reader_methods(main_prog_var) if self._use_double_buffer: double_buffer_reader = double_buffer( reader, name=double_buffer_name) # we return a double buffer reader. However, the reset method comes from # py_reader. double_buffer_reader.reset = reader.reset reader = double_buffer_reader self._reader = reader default_main_program().current_block().append_op( type='read', inputs={'Reader': [self._reader]}, outputs={'Out': self._feed_list}) @property def queue(self): return self._queue @property def iterable(self): return self._iterable def __iter__(self): assert self.iterable, "DataLoader is not iterable" assert self._tensor_reader is not None, \ "Data source of DataLoader has not set yet" self._init_iterable() self._start() return self def __next__(self): try: if self._return_list: return self._reader.read_next_list() else: return self._reader.read_next() except StopIteration: self._queue.close() self._reset() six.reraise(*sys.exc_info()) def start(self): assert not self._iterable, "start() cannot be called when DataLoader is iterable" self._start() def reset(self): assert not self._iterable, "reset() cannot be called when DataLoader is iterable" self._reset() @classmethod def _check_input_array(cls, item): arr = np.array(item) if arr.dtype == np.object: raise TypeError(( "\n\tFaild to convert input data to a regular ndarray :\n\t* Usually " "this means the input data contains nested lists with different lengths. " "\n\t* Check the reader function passed to 'decorate_batch_generator'" " to locate the data causes this issue.\n\t* Please consider using " "'fluid.create_lod_tensor' to convert it to a LoD-Tensor.")) def _start(self): def __thread_main__(): try: for tensors in self._tensor_reader(): array = core.LoDTensorArray() for item in tensors: if not isinstance(item, core.LoDTensor): self._check_input_array(item) tmp = core.LoDTensor() tmp.set(item, core.CPUPlace()) item = tmp array.append(item) if not self._queue.push(array): break self._queue.close() self._thread = None except Exception as ex: self._queue.kill() self._thread = None logging.warn('Your reader has raised an exception!') six.reraise(*sys.exc_info()) self._thread = threading.Thread(target=__thread_main__) self._thread.daemon = True self._thread.start() def _reset(self): self._queue.close() thread = self._thread if thread is not None: thread.join() self._reader.reset() def set_sample_generator(self, reader, batch_size, drop_last=True, places=None): assert batch_size > 0, "batch_size must be larger than 0" has_lod = False for f in self._feed_list: if f.lod_level != 0: has_lod = True break if has_lod: self.set_sample_list_generator( paddle.batch( reader, batch_size=batch_size, drop_last=drop_last), places=places) else: reader = BatchedTensorProvider( feed_list=self._feed_list, place=core.CPUPlace(), batch_size=batch_size, generator=reader, drop_last=drop_last) self.set_batch_generator(reader, places=places) return self def set_sample_list_generator(self, reader, places=None): with program_guard(Program(), Program()): feeder = DataFeeder( feed_list=self._feed_list, place=core.CPUPlace()) paddle_reader = feeder.decorate_reader(reader, multi_devices=False) def __tensor_reader_impl__(): for slots in paddle_reader(): yield [slots[var.name] for var in self._feed_list] self.set_batch_generator(__tensor_reader_impl__, places) return self def set_batch_generator(self, reader, places=None): self._tensor_reader = reader if self._iterable: assert places is not None, "Places cannot be None when DataLoader is iterable" self._places = _convert_places(places) else: if places is not None: logging.info( 'places would be ommited when DataLoader is not iterable') return self class PyReader(DataLoaderBase): """ Create a reader object for data feeding in Python. Data would be prefetched using Python thread and be pushed into a queue asynchronously. Data in the queue would be extracted automatically when `Executor.run(...)` is called. Args: feed_list (list(Variable)|tuple(Variable)): feed variable list. The variables should be created by :code:`fluid.layers.data()`. capacity (int): capacity of the queue maintained in PyReader. The unit is batch number. Set larger capacity if your reader is fast. use_double_buffer (bool): whether to use double_buffer_reader. If use_double_buffer=True, PyReader would prefetch next batch data asynchronously, so it would speed up data feeding and occupies a little more CPU or GPU memory, i.e., the memory of one batch input data. iterable (bool): whether the created PyReader is iterable. return_list (bool): whether the return value on each device is presented as a list. It is only valid when iterable=True. If return_list=False, the return value on each device would be a dict of str -> LoDTensor, where the key of the dict is the name of each feeded variables. If return_list=True, the return value on each device would be a list(LoDTensor). It is recommended to use return_list=False in static graph mode and use return_list=True in dygraph mode. Returns: the created reader object. Return type: reader(Reader) Examples: 1. If iterable = False, the created PyReader object is almost the same as :code:`fluid.layers.py_reader()`. Operators would be inserted into the program. User should call :code:`start()` before each epoch and catch :code:`fluid.core.EOFException` thrown by :code:`Executor.run()` when epoch ends. Once the exception is caught, user should call :code:`reset()` to reset the reader manually. .. code-block:: python import paddle import paddle.fluid as fluid import numpy as np EPOCH_NUM = 3 ITER_NUM = 5 BATCH_SIZE = 3 def network(image, label): # User-defined network, here is an example of softmax regression. predict = fluid.layers.fc(input=image, size=10, act='softmax') return fluid.layers.cross_entropy(input=predict, label=label) def reader_creator_random_image_and_label(height, width): def reader(): for i in range(ITER_NUM): fake_image = np.random.uniform(low=0, high=255, size=[height, width]) fake_label = np.ones([1]) yield fake_image, fake_label return reader image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') label = fluid.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=False) user_defined_reader = reader_creator_random_image_and_label(784, 784) reader.decorate_sample_list_generator( paddle.batch(user_defined_reader, batch_size=BATCH_SIZE)) loss = network(image, label) executor = fluid.Executor(fluid.CPUPlace()) executor.run(fluid.default_startup_program()) for i in range(EPOCH_NUM): reader.start() while True: try: executor.run(feed=None) except fluid.core.EOFException: reader.reset() break 2. If iterable=True, the created PyReader object is decoupled with the program. No operator would be inserted into the program. In this case, the created reader is a Python generator, which is iterable. User should feed the data yielded from PyReader object into :code:`Executor.run(feed=...)`. .. code-block:: python import paddle import paddle.fluid as fluid import numpy as np EPOCH_NUM = 3 ITER_NUM = 5 BATCH_SIZE = 10 def network(image, label): # User-defined network, here is an example of softmax regression. predict = fluid.layers.fc(input=image, size=10, act='softmax') return fluid.layers.cross_entropy(input=predict, label=label) def reader_creator_random_image(height, width): def reader(): for i in range(ITER_NUM): fake_image = np.random.uniform(low=0, high=255, size=[height, width]) fake_label = np.ones([1]) yield fake_image, fake_label return reader image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') label = fluid.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False) user_defined_reader = reader_creator_random_image(784, 784) reader.decorate_sample_list_generator( paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), fluid.core.CPUPlace()) loss = network(image, label) executor = fluid.Executor(fluid.CPUPlace()) executor.run(fluid.default_startup_program()) for _ in range(EPOCH_NUM): for data in reader(): executor.run(feed=data, fetch_list=[loss]) 3. If return_list=True, the return values would be presented as list instead of dict. This is usually used in dygraph mode. .. code-block:: python import paddle import paddle.fluid as fluid import numpy as np ITER_NUM = 5 BATCH_SIZE = 10 def reader_creator_random_image(height, width): def reader(): for i in range(ITER_NUM): yield np.random.uniform(low=0, high=255, size=[height, width]), \ np.random.random_integers(low=0, high=9, size=[1]) return reader place = fluid.CPUPlace() with fluid.dygraph.guard(place): py_reader = fluid.io.PyReader(capacity=2, return_list=True) user_defined_reader = reader_creator_random_image(784, 784) py_reader.decorate_sample_list_generator( paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), place) for image, label in py_reader(): relu = fluid.layers.relu(image) """ def __init__(self, feed_list=None, capacity=None, use_double_buffer=True, iterable=True, return_list=False): self._loader = DataLoader.from_generator( feed_list, capacity, use_double_buffer, iterable, return_list) @property def queue(self): return self._loader.queue @property def iterable(self): return self._loader.iterable def __iter__(self): return self._loader.__iter__() def __next__(self): return self._loader.__next__() def start(self): ''' Start the data feeding thread. Can only call when the reader object is not iterable. Example: .. code-block:: python import paddle import paddle.fluid as fluid import numpy as np BATCH_SIZE = 10 def generator(): for i in range(5): yield np.random.uniform(low=0, high=255, size=[784, 784]), image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) reader.decorate_sample_list_generator( paddle.batch(generator, batch_size=BATCH_SIZE)) executor = fluid.Executor(fluid.CPUPlace()) executor.run(fluid.default_startup_program()) for i in range(3): reader.start() while True: try: executor.run(feed=None) except fluid.core.EOFException: reader.reset() break ''' self._loader.start() def reset(self): ''' Reset the reader object when :code:`fluid.core.EOFException` raises. Can only call when the reader object is not iterable. Example: .. code-block:: python import paddle import paddle.fluid as fluid import numpy as np BATCH_SIZE = 10 def generator(): for i in range(5): yield np.random.uniform(low=0, high=255, size=[784, 784]), image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) reader.decorate_sample_list_generator( paddle.batch(generator, batch_size=BATCH_SIZE)) executor = fluid.Executor(fluid.CPUPlace()) executor.run(fluid.default_startup_program()) for i in range(3): reader.start() while True: try: executor.run(feed=None) except fluid.core.EOFException: reader.reset() break ''' self._loader.reset() def decorate_sample_generator(self, sample_generator, batch_size, drop_last=True, places=None): ''' Set the data source of the PyReader object. The provided :code:`sample_generator` should be a Python generator, which yields list(numpy.ndarray)-typed data of each sample. :code:`places` must be set when the PyReader object is iterable. If all inputs have no lods, this method is faster than :code:`decorate_sample_list_generator(paddle.batch(sample_generator, ...))` . Args: sample_generator (generator): Python generator that yields list(numpy.ndarray)-typed sample data. batch_size (int): batch size. Must be larger than 0. drop_last (bool): Whether to drop the last batch when sample number is less than batch_size. places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must be provided when PyReader is iterable. Example: .. code-block:: python import paddle.fluid as fluid import numpy as np EPOCH_NUM = 3 ITER_NUM = 15 BATCH_SIZE = 3 def network(image, label): # User-defined network, here is an example of softmax regression. predict = fluid.layers.fc(input=image, size=10, act='softmax') return fluid.layers.cross_entropy(input=predict, label=label) def random_image_and_label_generator(height, width): def generator(): for i in range(ITER_NUM): fake_image = np.random.uniform(low=0, high=255, size=[height, width]) fake_label = np.array([1]) yield fake_image, fake_label return generator image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') label = fluid.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) reader.decorate_sample_generator(user_defined_generator, batch_size=BATCH_SIZE, places=[fluid.CPUPlace()]) loss = network(image, label) executor = fluid.Executor(fluid.CPUPlace()) executor.run(fluid.default_startup_program()) for _ in range(EPOCH_NUM): for data in reader(): executor.run(feed=data, fetch_list=[loss]) ''' self._loader.set_sample_generator(sample_generator, batch_size, drop_last, places) def decorate_sample_list_generator(self, reader, places=None): ''' Set the data source of the PyReader object. The provided :code:`reader` should be a Python generator, which yields list(numpy.ndarray) typed batched data. :code:`places` must be set when the PyReader object is iterable. Args: reader (generator): Python generator that yields list(numpy.ndarray)-typed batched data. places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must be provided when PyReader is iterable. Example: .. code-block:: python import paddle import paddle.fluid as fluid import numpy as np EPOCH_NUM = 3 ITER_NUM = 15 BATCH_SIZE = 3 def network(image, label): # User-defined network, here is an example of softmax regression. predict = fluid.layers.fc(input=image, size=10, act='softmax') return fluid.layers.cross_entropy(input=predict, label=label) def random_image_and_label_generator(height, width): def generator(): for i in range(ITER_NUM): fake_image = np.random.uniform(low=0, high=255, size=[height, width]) fake_label = np.ones([1]) yield fake_image, fake_label return generator image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') label = fluid.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) reader.decorate_sample_list_generator( paddle.batch(user_defined_generator, batch_size=BATCH_SIZE), fluid.core.CPUPlace()) loss = network(image, label) executor = fluid.Executor(fluid.core.CPUPlace()) executor.run(fluid.default_startup_program()) for _ in range(EPOCH_NUM): for data in reader(): executor.run(feed=data, fetch_list=[loss]) ''' self._loader.set_sample_list_generator(reader, places) def decorate_batch_generator(self, reader, places=None): ''' Set the data source of the PyReader object. The provided :code:`reader` should be a Python generator, which yields numpy.ndarray-typed or LoDTensor-typed batched data. :code:`places` must be set when the PyReader object is iterable. Args: reader (generator): Python generator that yields LoDTensor-typed batched data. places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must be provided when PyReader is iterable. Example: .. code-block:: python import paddle.fluid as fluid import numpy as np EPOCH_NUM = 3 ITER_NUM = 15 BATCH_SIZE = 3 def network(image, label): # User-defined network, here is an example of softmax regression. predict = fluid.layers.fc(input=image, size=10, act='softmax') return fluid.layers.cross_entropy(input=predict, label=label) def random_image_and_label_generator(height, width): def generator(): for i in range(ITER_NUM): batch_image = np.random.uniform(low=0, high=255, size=[BATCH_SIZE, height, width]) batch_label = np.ones([BATCH_SIZE, 1]) batch_image = batch_image.astype('float32') batch_label = batch_label.astype('int64') yield batch_image, batch_label return generator image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') label = fluid.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace()) loss = network(image, label) executor = fluid.Executor(fluid.CPUPlace()) executor.run(fluid.default_startup_program()) for _ in range(EPOCH_NUM): for data in reader(): executor.run(feed=data, fetch_list=[loss]) ''' self._loader.set_batch_generator(reader, places) class DatasetLoader(DataLoaderBase): def __init__(self, dataset, places, drop_last): assert isinstance(dataset, DatasetBase), "dataset must be type of DatasetBase" assert not in_dygraph_mode( ), "DatasetLoader is not supported in dygraph mode yet" thread_num = len(places) assert len(dataset.filelist) >= thread_num, \ "Filelist number of dataset {} must be not less than place number {}".format(len(dataset.filelist), thread_num) if dataset.thread_num != 0 and dataset.thread_num != thread_num: logging.warn('thread_num {} which is set in Dataset is ignored'. format(dataset.thread_num)) dataset.set_thread(thread_num) if isinstance(dataset, InMemoryDataset) and dataset.queue_num > thread_num: logging.warn("queue_num {} which is set in Dataset is ignored". format(dataset.queue_num)) dataset.set_queue_num(thread_num) self._dataset = dataset use_slots = [ slot.name for slot in dataset.proto_desc.multi_slot_desc.slots if slot.is_used ] self._iterable_dataset = core.IterableDatasetWrapper( dataset.dataset, use_slots, _convert_places(places), dataset.proto_desc.batch_size, drop_last) def __iter__(self): self._dataset._finish_to_run() self._dataset._prepare_to_run() self._iterable_dataset._start() return self def __next__(self): return self._iterable_dataset._next()
chord.py
import requests import pickle import time from time import time import threading import asyncio from Node import Node from config import * from flask import Blueprint, request node = Node() chord = Blueprint('chord', __name__) ''' This is the single point in the application used for returning to the client and terminating actions. ''' @chord.route("/eureka", methods=['POST']) def eureka(): data = pickle.loads(request.get_data()) if(data['action'] == OVERLAY): if(data['value']): node.ready[data['time']] = '\n'.join(data['node_list']) else: node.ready[data['time']] = ' -> '.join(data['node_list']) return node.ready[data['time']] elif(data['action'] == SEARCH): node.ready[data['time']] = '{}:{} -> {}'.format(data['succ_IP'], data['succ_port'], data['value']) return node.ready[data['time']] elif(data['action'] == INSERT): node.ready[data['time']] = '{}:{} -> ({}, {})'.format(data['succ_IP'], data['succ_port'], data['key'], data['value'][data['key']]) return node.ready[data['time']] elif(data['action'] == DELETE): if(not data['value']): node.ready[data['time']] = '{}:{} -> The requested key was not found.'.format(data['succ_IP'], data['succ_port']) else: node.ready[data['time']] = 'Record ({}, {}) deleted from {}:{} successfully!'.format(data['key'], data['value'][data['key']], data['succ_IP'], data['succ_port']) return node.ready[data['time']] elif(data['action'] == JOIN): node.k = data['value']['k'] node.consistency = data['value']['consistency'] node.succ['ID'] = data['succ_ID'] node.succ['IP'] = data['succ_IP'] node.succ['port'] = data['succ_port'] node.pred['ID'] = data['pred_ID'] node.pred['IP'] = data['pred_IP'] node.pred['port'] = data['pred_port'] print("Now I know who my successor is, I shall claim what is righteously mine!") ''' -Receive replicas from predecessor -Receive records for which I am responsible (k==1) from successor (request->receive) -Replicate my whole storage and forward * Sequence initiated with notify_predecessor needs to be completed first! ''' timestamp = str(time()) node.ready[timestamp] = "" if node.k == 1: node.notify_predecessor(timestamp) node.request_items(timestamp) else: async def barrier(): while(not node.ready[timestamp]): pass return node.ready[timestamp] async def req(): node.notify_predecessor(timestamp) return "Notified..." async def do(): res2 = loop.create_task(req()) res1 = loop.create_task(barrier()) await asyncio.wait([res1, res2]) return res1 asyncio.set_event_loop(asyncio.new_event_loop()) loop = asyncio.get_event_loop() loop.run_until_complete(do()) loop.close() print("NOW I CAN TALK TO MY SUCCESSOR AT LAST.") node.request_items(timestamp) node.ready.pop(timestamp) return "200" @chord.route('/join', methods=['POST']) def join(): data = pickle.loads(request.get_data()) data['value']['k'] = node.k data['value']['consistency'] = node.consistency data['consistency'] = node.consistency node.find_successor(data) print("Forwarded query...") return "200" ''' Most actions pass through /query. This is the entry point for find_successor. Any replication action gets dealt with here. ''' @chord.route('/query', methods=['POST']) def query(): data = pickle.loads(request.get_data()) repl = {} condition = (data['action'] == INS_REPL) or (data['action'] == DEL_REPL) or (data['action'] == REPL) if(condition and (node.k > 1)): for t in data['value'].items(): ''' - If repl_num < k we need to increase and forward. * When forwarding we update the key with the successor's ID - If repl_num == k we need to stop creating new replicas. However, there is a chance a new node has been assigned keys for which their predecessor used to hold the last replica. In this case, we need to notify the previous tail, so as to delete any trailing replicas. - If the replication tail is found (either the node with repl_num == k or its successor who identifies an overlap): * We need to update 'action' so that it matches no other action, hence going straight to /eureka. * No further action is needed at this point, dest_ID/IP/port already hold the desired values. ''' overlap = 0 if(not t[1]): ''' If there's a new tail, delete local replica: - ONLY IF THERE IS NO OVERLAP (i.e. the current node doesn't hold the record with replica_num == 1) ''' if((data['action'] == INS_REPL) or (data['action'] == REPL)): try: get = node.storage[t[0]] if(get[1] == node.k): node.storage.pop(t[0]) except: pass elif(data['action'] == DEL_REPL): try: value = node.storage.pop(t[0]) if(value[1] < node.k): repl[t[0]] = () data['key'] = node.succ['ID'] elif (data['consistency'] == LINEARIZABILITY): data['action'] = STOP_DEL except: if (data['consistency'] == LINEARIZABILITY): data['action'] = STOP_DEL # pass overlap = 1 ''' For the case of STOP_DEL: A tuple containing: 0: a dict with the node's ID/IP/port 1: a dict with the deleted key-value pair ''' if not overlap: data['node_list'].append(({ 'ID': node.ID, 'IP': node.IP, 'port': node.port }, { 'key': t[0], 'value': value })) else: ''' Overwite replica only if abs(new_replica_num - prev_replica_num) <= 1: We need to avoid overlaps (possible on joining). ''' try: gotIt = node.storage[t[0]] if((abs(gotIt[1] - t[1][1]) <= 1)): # and (node.succ['ID'] != node.ID)) or ((node.succ['ID'] == node.ID) and (gotIt[1] == t[1][1])) and (not fwd_to_self) gotIt[1] >= t[1][1] node.storage[t[0]] = t[1] elif ((data['consistency'] == LINEARIZABILITY) and (data['action'] == INS_REPL)): data['action'] = STOP_INS overlap = 1 except: node.storage[t[0]] = t[1] # just insert to storage if no older copy was found if((t[1][1] < node.k)): repl[t[0]] = (t[1][0], t[1][1] + 1) # prepare to forward data['key'] = node.succ['ID'] else: repl[t[0]] = () # forward replica deletion message ''' The current node has inserted a replica with replica_num == k. In the case of linearizability: - If we are in the middle of an insert operation: * We need to return to the client by hitting /eureka. * The deletion message to the successor will be sent by a different thread inside find_successor In any other case (eventual consistency or basic replication), we are simply preparing to forward (data['key'] = node.succ['ID']) ''' if ((data['consistency'] == LINEARIZABILITY) and (data['action'] == INS_REPL)): data['action'] = STOP_INS else: data['key'] = node.succ['ID'] ''' In the case of STOP_INS: A tuple containing: 0: a dict with the node's ID/IP/port 1: a dict with the inserted key-value pair ''' if not overlap: data['node_list'].append(({ 'ID': node.ID, 'IP': node.IP, 'port': node.port }, { 'key': t[0], 'value': t[1] })) wena = data['time'].split('indlovu') if(len(wena) == 1): node.ready[wena[0]] = "Synchronised notify predecessor-receive from successor for replication purposes..." data['value'] = repl.copy() node.find_successor(data) return "200" @chord.route('/notify', methods=['POST']) def notify(): data = pickle.loads(request.get_data()) node.succ['ID'] = data['ID'] node.succ['IP'] = data['IP'] node.succ['port'] = data['port'] print("Noted...") ''' Someone entered in front of me: -Send everything to be replicated! ''' repl = {} if(node.k > 1 and (node.ID != node.succ['ID'])): for item in node.storage.items(): if(item[1][1] < node.k): repl[item[0]] = (item[1][0], item[1][1] + 1) args = { 'dest_ID': node.ID, 'dest_IP': node.IP, 'dest_port': node.port, 'key': node.succ['ID'], 'action': REPL, 'consistency': node.consistency, 'node_list': [], 'value': repl, 'time': data['time'] } endpoint = 'http://' + node.succ['IP'] + ":" + str(node.succ['port']) + "/query" def thread_function(): requests.post(endpoint, data=pickle.dumps(args)) req = threading.Thread(target=thread_function, args=()) req.start() return "Forwarded..." ''' Gets run by the successor of a newly inserted node. ''' @chord.route('/requestItems', methods=['POST']) def requestItems(): data = pickle.loads(request.get_data()) node.pred['ID'] = data['ID'] node.pred['IP'] = data['IP'] node.pred['port'] = data['port'] node.send_items(data['time']) return "200" ''' Gets run by a newly inserted node, after its successor has dispatched any items for which the former is now responsible, inserting them into the local storage and initiating any replication specific actions. ''' @chord.route('/receiveItems', methods=['POST']) def receiveItems(): data = pickle.loads(request.get_data()) for t in data['storage'].items(): node.storage[t[0]] = t[1] print("Got them...") repl = {} if(node.k > 1): ''' Iterating over my whole storage, initiating replication where necessary. ''' for item in node.storage.items(): if(item[1][1] < node.k): repl[item[0]] = (item[1][0], item[1][1] + 1) if repl: args = { 'dest_ID': node.ID, 'dest_IP': node.IP, 'dest_port': node.port, 'key': node.pred['ID'], 'action': REPL, 'consistency': node.consistency, 'node_list': [], 'value': repl, 'time': data['time'] } endpoint = 'http://' + node.succ['IP'] + ":" + str(node.succ['port']) + "/query" response = requests.post(endpoint, data=pickle.dumps(args)) return response.text else: return "200" ''' My predecessor has just left, leaving all these items with me. I should update my predecessor info, notify the latter accordingly and start a replication process wherever needed. ''' @chord.route('/departure', methods=['POST']) def departure(): print("Ta pame.") data = pickle.loads(request.get_data()) node.pred['ID'] = data['ID'] node.pred['IP'] = data['IP'] node.pred['port'] = data['port'] node.notify_predecessor(data['time']) repl = {} if(node.k > 1): for t in data['storage'].items(): try: got = node.storage[t[0]] except: got = () if((not got) or (got[1] > t[1][1])): node.storage[t[0]] = t[1] if(t[1][1] < node.k): repl[t[0]] = (t[1][0], t[1][1] + 1) if repl: data['dest_ID'] = node.ID data['dest_IP'] = node.IP data['dest_port'] = node.port data['key'] = node.pred['ID'] # this could probably be succ['ID'], but it's ok data['action'] = REPL data['consistency'] = node.consistency, data['node_list'] = [] data['value'] = repl node.find_successor(data) return "200"
Thread_Communication_Queue.py
# Thread communication Queue from threading import Thread from queue import Queue from time import sleep class Producer: def __init__(self): self.q = Queue() def produce(self): for i in range(1, 6): print("Item Produce", i) self.q.put(i) sleep(1) class Consumer: def __init__(self, prod): self.prod = prod def consume(self): for i in range(1, 6): print("Item Recevied : ", self.prod.q.get(i)) p = Producer() c = Consumer(p) t1 = Thread(target=p.produce) t2 = Thread(target=c.consume) t1.start() t2.start()
discoverer.py
import sys import struct import socket from threading import Thread from logging import getLogger from . constants import MAGIC from . utils import str_to_bytes LOG = getLogger(__name__) class Discoverer: ''' Emulates a SSDP-like discovery protocol on a specified discovery port. Each Discoverer has a (preferably unique) ID. Upon startup, a Discoverer announces its ID, then begins listening for other announcements. Upon hearing an announcement from an ID it does not recognize, it adds the ID to a set and reannounces its own ID. ''' def __init__(self, id, port=50000, callback=None): self.id = str_to_bytes(id) self.port = port # optional function that will be called upon new ID discovery. self.callback = callback # set that holds previously seen IDs self.friends = set() self.friends.add(self.id) # listener thread self.listening = True self.listener = Thread(target=self.listen) self.listener.daemon = True self.listener.start() def announce(self): '''Broadcasts our ID.''' self.socket.sendto(MAGIC + self.id, ('<broadcast>', self.port)) LOG.debug(f'Discoverer {self.id} announcing.') def initialize_socket(self): # build a UDP broadcast socket that multiple clients can bind to self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) self.join = False try: self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) except: pass try: self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) except: pass if sys.maxsize > 2**32: time = struct.pack(str("ll"), int(1), int(0)) else: time = struct.pack(str("ii"), int(1), int(0)) self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO, time) self.socket.bind(('', self.port)) def listen(self): ''' Listens for broadcasts on a loop. Upon hearing a broadcast, check to see if the ID isn't recognized. If it isn't, add it to our list of heard IDs, announce ourselves, and call our callback. ''' self.initialize_socket() self.announce() while self.listening: try: data, addr = self.socket.recvfrom(1024) if data.startswith(MAGIC): recv_id = data[len(MAGIC):] if recv_id not in self.friends: self.friends.add(recv_id) LOG.debug(f'Discoverer {self.id} found {recv_id}.') if self.callback: self.callback(int(recv_id)) self.announce() except: pass LOG.debug(f'Discoverer {self.id} closed.') def close(self): self.listening = False
utils.py
# -*- coding:utf-8 -*- """ Author: Weichen Shen,weichenswc@163.com """ import json import logging from threading import Thread import requests try: from packaging.version import parse except ImportError: from pip._vendor.packaging.version import parse def check_version(version): """Return version of package on pypi.python.org using json.""" def check(version): try: url_pattern = 'https://pypi.python.org/pypi/deepctr/json' req = requests.get(url_pattern) latest_version = parse('0') version = parse(version) if req.status_code == requests.codes.ok: j = json.loads(req.text.encode('utf-8')) releases = j.get('releases', []) for release in releases: ver = parse(release) if ver.is_prerelease or ver.is_postrelease: continue latest_version = max(latest_version, ver) if latest_version > version: logging.warning( '\nDeepCTR version {0} detected. Your version is {1}.\nUse `pip install -U deepctr` to upgrade.Changelog: https://github.com/shenweichen/DeepCTR/releases/tag/v{0}'.format( latest_version, version)) except: print("Please check the latest version manually on https://pypi.org/project/deepctr/#history") return Thread(target=check, args=(version,)).start()
app.py
from flask import Flask, jsonify, Session, render_template, request import os from time import sleep from multiprocessing import Process import configparser from spotify_background_color import SpotifyBackgroundColor from current_spotify_playback import CurrentSpotifyPlayback, NoArtworkException from led_controller import LEDController app = Flask(__name__) CLIENT_ID = os.environ.get('SPOTIPY_CLIENT_ID') CLIENT_SECRET = os.environ.get('SPOTIPY_CLIENT_SECRET') REDIRECT_URI = os.environ.get('SPOTIPY_REDIRECT_URI') REFRESH_TOKEN = os.environ.get('SPOTIPY_REFRESH_TOKEN') Session(app) @app.route('/') def main(): return render_template('index.html') @app.route('/spotify') def spotify(): p = Session['process'] if not p.is_alive(): p = Process(target=main_spotify, args=()) p.start() return render_template('spotify.html') @app.route('/manual') def manual(): try: Session['process'].terminate() except AttributeError: pass return render_template('manual.html') @app.route('/color', methods=['GET', 'POST']) def color(): if request.method == 'POST': data = request.json r = data['r'] g = data['g'] b = data['b'] led.set_color(r, g, b, delay=0) return jsonify(status='updating', data=data) else: curr_r, curr_g, curr_b = led.get_color() return jsonify(status='current', data={'r': curr_r, 'g': curr_g, 'b': curr_b}) @app.route('/off') def off(): try: Session['process'].terminate() except AttributeError: pass led.set_color(0, 0, 0) return render_template('off.html') def main_spotify(): old_song_id = '' while True: spotify.update_current_playback() if spotify.connected_to_chromecast(name): if spotify.new_song(old_song_id): try: artwork = spotify.get_artwork() background_color = SpotifyBackgroundColor( img=artwork, image_processing_size=(100, 100)) r, g, b = background_color.best_color( k=8, color_tol=0) except NoArtworkException: r, g, b = 255, 255, 255 led.set_color(r, g, b) old_song_id = spotify.get_current_song_id() else: old_song_id = '' r, g, b = led.get_color() if r != 0 or g != 0 or b != 0: led.set_color(0, 0, 0) sleep(2) if __name__ == '__main__': config = configparser.ConfigParser() config.read('config.ini') GPIO_PINS = config['GPIO PINS'] red_pin = int(GPIO_PINS['red_pin']) green_pin = int(GPIO_PINS['green_pin']) blue_pin = int(GPIO_PINS['blue_pin']) name = config['CHROMECAST']['name'] led = LEDController(red_pin, green_pin, blue_pin) spotify = CurrentSpotifyPlayback(CLIENT_ID, CLIENT_SECRET, REDIRECT_URI, REFRESH_TOKEN) Session['process'] = Process(target=main_spotify, args=()) app.run(host='0.0.0.0')
dispatcher.py
#!/usr/bin/env python3 # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Script to run on the dispatcher. Builds each benchmark with each fuzzing configuration, spawns a runner VM for each benchmark-fuzzer combo, and then records coverage data received from the runner VMs.""" import datetime import multiprocessing import os import sys import threading import time from typing import List from common import experiment_path as exp_path from common import experiment_utils from common import logs from common import yaml_utils from database import models from database import utils as db_utils from experiment.build import builder from experiment.measurer import measure_manager from experiment import reporter from experiment import scheduler from experiment import stop_experiment LOOP_WAIT_SECONDS = 5 * 60 # TODO(metzman): Convert more uses of os.path.join to exp_path.path. def _get_config_dir(): """Return config directory.""" return exp_path.path(experiment_utils.CONFIG_DIR) def create_work_subdirs(subdirs: List[str]): """Create |subdirs| in work directory.""" for subdir in subdirs: os.mkdir(os.path.join(experiment_utils.get_work_dir(), subdir)) def _initialize_experiment_in_db(experiment_config: dict): """Initializes |experiment| in the database by creating the experiment entity.""" with db_utils.session_scope() as session: experiment_exists = session.query(models.Experiment).filter( models.Experiment.name == experiment_config['experiment']).first() if experiment_exists: raise Exception('Experiment already exists in database.') db_utils.add_all([ db_utils.get_or_create( models.Experiment, name=experiment_config['experiment'], git_hash=experiment_config['git_hash'], private=experiment_config.get('private', True), experiment_filestore=experiment_config['experiment_filestore'], description=experiment_config['description']), ]) def _record_experiment_time_ended(experiment_name: str): """Record |experiment| end time in the database.""" with db_utils.session_scope() as session: experiment = session.query(models.Experiment).filter( models.Experiment.name == experiment_name).one() experiment.time_ended = datetime.datetime.utcnow() db_utils.add_all([experiment]) def _initialize_trials_in_db(trials: List[models.Trial]): """Initializes entities for each trial in the experiment.""" # TODO(metzman): Consider doing this without sqlalchemy. This can get # slow with SQLalchemy (it's much worse with add_all). db_utils.bulk_save(trials) class Experiment: # pylint: disable=too-many-instance-attributes """Class representing an experiment.""" def __init__(self, experiment_config_filepath: str): self.config = yaml_utils.read(experiment_config_filepath) self.benchmarks = self.config['benchmarks'] self.fuzzers = self.config['fuzzers'] self.num_trials = self.config['trials'] self.experiment_name = self.config['experiment'] self.git_hash = self.config['git_hash'] self.preemptible = self.config.get('preemptible_runners') def build_images_for_trials(fuzzers: List[str], benchmarks: List[str], num_trials: int, preemptible: bool) -> List[models.Trial]: """Builds the images needed to run |experiment| and returns a list of trials that can be run for experiment. This is the number of trials specified in experiment times each pair of fuzzer+benchmark that builds successfully.""" # This call will raise an exception if the images can't be built which will # halt the experiment. builder.build_base_images() # Only build fuzzers for benchmarks whose measurers built successfully. benchmarks = builder.build_all_measurers(benchmarks) build_successes = builder.build_all_fuzzer_benchmarks(fuzzers, benchmarks) experiment_name = experiment_utils.get_experiment_name() trials = [] for fuzzer, benchmark in build_successes: fuzzer_benchmark_trials = [ models.Trial(fuzzer=fuzzer, experiment=experiment_name, benchmark=benchmark, preemptible=preemptible) for _ in range(num_trials) ] trials.extend(fuzzer_benchmark_trials) return trials def dispatcher_main(): """Do the experiment and report results.""" logs.info('Starting experiment.') # Set this here because we get failures if we do it in measurer for some # reason. multiprocessing.set_start_method('spawn') db_utils.initialize() if experiment_utils.is_local_experiment(): models.Base.metadata.create_all(db_utils.engine) experiment_config_file_path = os.path.join(_get_config_dir(), 'experiment.yaml') experiment = Experiment(experiment_config_file_path) _initialize_experiment_in_db(experiment.config) trials = build_images_for_trials(experiment.fuzzers, experiment.benchmarks, experiment.num_trials, experiment.preemptible) _initialize_trials_in_db(trials) create_work_subdirs(['experiment-folders', 'measurement-folders']) # Start measurer and scheduler in seperate threads/processes. scheduler_loop_thread = threading.Thread(target=scheduler.schedule_loop, args=(experiment.config,)) scheduler_loop_thread.start() measurer_main_process = multiprocessing.Process( target=measure_manager.measure_main, args=(experiment.config,)) measurer_main_process.start() is_complete = False while True: time.sleep(LOOP_WAIT_SECONDS) if not scheduler_loop_thread.is_alive(): is_complete = not measurer_main_process.is_alive() # Generate periodic output reports. reporter.output_report(experiment.config, in_progress=not is_complete, coverage_report=is_complete) if is_complete: # Experiment is complete, bail out. break scheduler_loop_thread.join() measurer_main_process.join() _record_experiment_time_ended(experiment.experiment_name) logs.info('Experiment ended.') def main(): """Do the experiment and report results.""" logs.initialize(default_extras={ 'component': 'dispatcher', }) try: dispatcher_main() except Exception as error: logs.error('Error conducting experiment.') raise error experiment_config_file_path = os.path.join(_get_config_dir(), 'experiment.yaml') if experiment_utils.is_local_experiment(): return 0 if stop_experiment.stop_experiment(experiment_utils.get_experiment_name(), experiment_config_file_path): return 0 return 1 if __name__ == '__main__': sys.exit(main())